#!/bin/bash
-IMAGE=juplo/endless-consumer:1.0-SNAPSHOT
+IMAGE=juplo/counting-consumer:1.0-SNAPSHOT
if [ "$1" = "cleanup" ]
then
docker-compose exec cli cub kafka-ready -b kafka:9092 1 60 > /dev/null 2>&1 || exit 1
docker-compose up setup
docker-compose up -d producer consumer
-sleep 15
+sleep 10
+http :8081/seen
+sleep 1
+http :8081/seen
+sleep 1
+http :8081/seen
+sleep 1
+http :8081/seen
+sleep 1
+http :8081/seen
+sleep 1
docker-compose stop producer consumer
docker-compose logs consumer
</parent>
<groupId>de.juplo.kafka</groupId>
- <artifactId>endless-consumer</artifactId>
+ <artifactId>counting-consumer</artifactId>
<version>1.0-SNAPSHOT</version>
- <name>Endless Consumer: a Simple Consumer-Group that reads and print the topic</name>
+ <name>Endless Consumer: a Simple Consumer-Group that reads and prints the topic and counts the received messages for each key by topic</name>
<dependencies>
<dependency>
package de.juplo.kafka;
import lombok.RequiredArgsConstructor;
+import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RestController;
+import java.util.Map;
import java.util.concurrent.ExecutionException;
{
consumer.stop();
}
+
+
+ @GetMapping("seen")
+ public Map<Integer, Map<String, Integer>> seen()
+ {
+ return consumer.getSeen();
+ }
}
import javax.annotation.PreDestroy;
import java.time.Duration;
import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
private KafkaConsumer<String, String> consumer = null;
private Future<?> future = null;
+ private Map<Integer, Map<String, Integer>> seen;
+
+
public EndlessConsumer(
ExecutorService executor,
String bootstrapServer,
log.info("{} - Subscribing to topic {}", id, topic);
consumer.subscribe(Arrays.asList(topic));
+ seen = new HashMap<>();
+
while (true)
{
ConsumerRecords<String, String> records =
record.key(),
record.value()
);
+
+ Integer partition = record.partition();
+ String key = record.key();
+
+ if (!seen.containsKey(partition))
+ seen.put(partition, new HashMap<>());
+
+ Map<String, Integer> byKey = seen.get(partition);
+
+ if (!byKey.containsKey(key))
+ byKey.put(key, 0);
+
+ int seenByKey = byKey.get(key);
+ seenByKey++;
+ byKey.put(key, seenByKey);
}
}
}
{
log.info("{} - Closing the KafkaConsumer", id);
consumer.close();
+
+ for (Integer partition : seen.keySet())
+ {
+ Map<String, Integer> byKey = seen.get(partition);
+ for (String key : byKey.keySet())
+ {
+ log.info(
+ "{} - Seen {} messages for partition={}|key={}",
+ id,
+ byKey.get(key),
+ partition,
+ key);
+ }
+ }
+ seen = null;
+
log.info("{} - Consumer-Thread exiting", id);
}
}
+ public Map<Integer, Map<String, Integer>> getSeen()
+ {
+ return seen;
+ }
public synchronized void start()
{