package de.juplo.kafka;
import lombok.extern.slf4j.Slf4j;
+import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
+import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.errors.WakeupException;
import org.apache.kafka.common.serialization.StringDeserializer;
import javax.annotation.PreDestroy;
import java.time.Duration;
-import java.util.Arrays;
-import java.util.Properties;
+import java.util.*;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
private final String groupId;
private final String id;
private final String topic;
+ private final String autoOffsetReset;
private AtomicBoolean running = new AtomicBoolean();
private long consumed = 0;
private KafkaConsumer<String, String> consumer = null;
private Future<?> future = null;
+ private final Map<Integer, Map<String, Integer>> seen = new HashMap<>();
+
+
public EndlessConsumer(
ExecutorService executor,
String bootstrapServer,
String groupId,
String clientId,
- String topic)
+ String topic,
+ String autoOffsetReset)
{
this.executor = executor;
this.bootstrapServer = bootstrapServer;
this.groupId = groupId;
this.id = clientId;
this.topic = topic;
+ this.autoOffsetReset = autoOffsetReset;
}
@Override
public void run()
{
- Properties props = new Properties();
- props.put("bootstrap.servers", bootstrapServer);
- props.put("group.id", groupId);
- props.put("client.id", id);
- props.put("auto.offset.reset", "earliest");
- props.put("key.deserializer", StringDeserializer.class.getName());
- props.put("value.deserializer", StringDeserializer.class.getName());
-
- this.consumer = new KafkaConsumer<>(props);
-
try
{
+ Properties props = new Properties();
+ props.put("bootstrap.servers", bootstrapServer);
+ props.put("group.id", groupId);
+ props.put("client.id", id);
+ props.put("auto.offset.reset", autoOffsetReset);
+ props.put("metadata.max.age.ms", "1000");
+ props.put("key.deserializer", StringDeserializer.class.getName());
+ props.put("value.deserializer", StringDeserializer.class.getName());
+
+ this.consumer = new KafkaConsumer<>(props);
+
log.info("{} - Subscribing to topic {}", id, topic);
- consumer.subscribe(Arrays.asList(topic));
+ consumer.subscribe(Arrays.asList(topic), new ConsumerRebalanceListener()
+ {
+ @Override
+ public void onPartitionsRevoked(Collection<TopicPartition> partitions)
+ {
+ partitions.forEach(tp -> seen.remove(tp.partition()));
+ }
+
+ @Override
+ public void onPartitionsAssigned(Collection<TopicPartition> partitions)
+ {
+ partitions.forEach(tp -> seen.put(tp.partition(), new HashMap<>()));
+ }
+ });
while (true)
{
record.key(),
record.value()
);
+
+ Integer partition = record.partition();
+ String key = record.key() == null ? "NULL" : record.key();
+ Map<String, Integer> byKey = seen.get(partition);
+
+ if (!byKey.containsKey(key))
+ byKey.put(key, 0);
+
+ int seenByKey = byKey.get(key);
+ seenByKey++;
+ byKey.put(key, seenByKey);
}
}
}
}
catch(Exception e)
{
- log.error("{} - Unexpected error: {}", id, e.toString());
+ log.error("{} - Unexpected error: {}", id, e.toString(), e);
running.set(false); // Mark the instance as not running
}
finally
{
log.info("{} - Closing the KafkaConsumer", id);
consumer.close();
+
+ for (Integer partition : seen.keySet())
+ {
+ Map<String, Integer> byKey = seen.get(partition);
+ for (String key : byKey.keySet())
+ {
+ log.info(
+ "{} - Seen {} messages for partition={}|key={}",
+ id,
+ byKey.get(key),
+ partition,
+ key);
+ }
+ }
+
log.info("{} - Consumer-Thread exiting", id);
}
}
+ public Map<Integer, Map<String, Integer>> getSeen()
+ {
+ return seen;
+ }
public synchronized void start()
{