package de.juplo.kafka;
import lombok.extern.slf4j.Slf4j;
+import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
+import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.errors.WakeupException;
import org.apache.kafka.common.serialization.StringDeserializer;
+import javax.annotation.PreDestroy;
import java.time.Duration;
-import java.util.Arrays;
-import java.util.Properties;
+import java.util.*;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
@Slf4j
-public class SimpleConsumer
+public class EndlessConsumer implements Runnable
{
+ private final ExecutorService executor;
+ private final String bootstrapServer;
+ private final String groupId;
+ private final String id;
+ private final String topic;
+ private final String autoOffsetReset;
+
+ private final Lock lock = new ReentrantLock();
+ private final Condition condition = lock.newCondition();
+ private boolean running = false;
+ private Exception exception;
private long consumed = 0;
- private KafkaConsumer<String, String> consumer;
- private Lock lock = new ReentrantLock();
- private Condition stopped = lock.newCondition();
+ private KafkaConsumer<String, String> consumer = null;
+
+
+ private final Map<Integer, Map<String, Long>> seen = new HashMap<>();
+ private final Map<Integer, Long> offsets = new HashMap<>();
- public SimpleConsumer()
+ public EndlessConsumer(
+ ExecutorService executor,
+ String bootstrapServer,
+ String groupId,
+ String clientId,
+ String topic,
+ String autoOffsetReset)
{
- // tag::create[]
- Properties props = new Properties();
- props.put("bootstrap.servers", ":9092");
- props.put("group.id", "my-consumer"); // << Used for Offset-Commits
- // end::create[]
- props.put("auto.offset.reset", "earliest");
- // tag::create[]
- props.put("key.deserializer", StringDeserializer.class.getName());
- props.put("value.deserializer", StringDeserializer.class.getName());
-
- KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
- // end::create[]
- this.consumer = consumer;
+ this.executor = executor;
+ this.bootstrapServer = bootstrapServer;
+ this.groupId = groupId;
+ this.id = clientId;
+ this.topic = topic;
+ this.autoOffsetReset = autoOffsetReset;
}
-
+ @Override
public void run()
{
- String id = "C";
-
try
{
- log.info("{} - Subscribing to topic test", id);
- consumer.subscribe(Arrays.asList("test"));
+ Properties props = new Properties();
+ props.put("bootstrap.servers", bootstrapServer);
+ props.put("group.id", groupId);
+ props.put("client.id", id);
+ props.put("auto.offset.reset", autoOffsetReset);
+ props.put("metadata.max.age.ms", "1000");
+ props.put("key.deserializer", StringDeserializer.class.getName());
+ props.put("value.deserializer", StringDeserializer.class.getName());
+
+ this.consumer = new KafkaConsumer<>(props);
+
+ log.info("{} - Subscribing to topic {}", id, topic);
+ consumer.subscribe(Arrays.asList(topic), new ConsumerRebalanceListener()
+ {
+ @Override
+ public void onPartitionsRevoked(Collection<TopicPartition> partitions)
+ {
+ partitions.forEach(tp ->
+ {
+ Integer partition = tp.partition();
+ Long newOffset = consumer.position(tp);
+ Long oldOffset = offsets.remove(partition);
+ log.info(
+ "{} - removing partition: {}, consumed {} records (offset {} -> {})",
+ id,
+ partition,
+ newOffset - oldOffset,
+ oldOffset,
+ newOffset);
+ Map<String, Long> removed = seen.remove(partition);
+ for (String key : removed.keySet())
+ {
+ log.info(
+ "{} - Seen {} messages for partition={}|key={}",
+ id,
+ removed.get(key),
+ partition,
+ key);
+ }
+ });
+ }
+
+ @Override
+ public void onPartitionsAssigned(Collection<TopicPartition> partitions)
+ {
+ partitions.forEach(tp ->
+ {
+ Integer partition = tp.partition();
+ Long offset = consumer.position(tp);
+ log.info("{} - adding partition: {}, offset={}", id, partition, offset);
+ offsets.put(partition, offset);
+ seen.put(partition, new HashMap<>());
+ });
+ }
+ });
- // tag::loop[]
while (true)
{
ConsumerRecords<String, String> records =
consumer.poll(Duration.ofSeconds(1));
// Do something with the data...
- // end::loop[]
log.info("{} - Received {} messages", id, records.count());
for (ConsumerRecord<String, String> record : records)
{
record.key(),
record.value()
);
+
+ Integer partition = record.partition();
+ String key = record.key() == null ? "NULL" : record.key();
+ Map<String, Long> byKey = seen.get(partition);
+
+ if (!byKey.containsKey(key))
+ byKey.put(key, 0l);
+
+ long seenByKey = byKey.get(key);
+ seenByKey++;
+ byKey.put(key, seenByKey);
}
- // tag::loop[]
}
- // end::loop[]
}
catch(WakeupException e)
{
log.info("{} - RIIING!", id);
+ shutdown();
}
catch(Exception e)
{
- log.error("{} - Unexpected error: {}", id, e.toString());
+ log.error("{} - Unexpected error: {}", id, e.toString(), e);
+ shutdown(e);
}
finally
{
- this.lock.lock();
- try
- {
- log.info("{} - Closing the KafkaConsumer", id);
- consumer.close();
- log.info("C - DONE!");
- stopped.signal();
- }
- finally
- {
- this.lock.unlock();
- log.info("{}: Consumed {} messages in total, exiting!", id, consumed);
- }
+ log.info("{} - Closing the KafkaConsumer", id);
+ consumer.close();
+ log.info("{} - Consumer-Thread exiting", id);
}
}
+ private void shutdown()
+ {
+ shutdown(null);
+ }
- public static void main(String[] args) throws Exception
+ private void shutdown(Exception e)
{
- SimpleConsumer instance = new SimpleConsumer();
+ lock.lock();
+ try
+ {
+ running = false;
+ exception = e;
+ condition.signal();
+ }
+ finally
+ {
+ lock.unlock();
+ }
+ }
- Runtime.getRuntime().addShutdownHook(new Thread(() ->
+ public Map<Integer, Map<String, Long>> getSeen()
+ {
+ return seen;
+ }
+
+ public void start()
+ {
+ lock.lock();
+ try
{
- instance.lock.lock();
- try
- {
- instance.consumer.wakeup();
- instance.stopped.await();
- }
- catch (InterruptedException e)
- {
- log.warn("Interrrupted while waiting for the consumer to stop!", e);
- }
- finally
- {
- instance.lock.unlock();
- }
- }));
+ if (running)
+ throw new IllegalStateException("Consumer instance " + id + " is already running!");
+
+ log.info("{} - Starting - consumed {} messages before", id, consumed);
+ running = true;
+ exception = null;
+ executor.submit(this);
+ }
+ finally
+ {
+ lock.unlock();
+ }
+ }
+
+ public synchronized void stop() throws ExecutionException, InterruptedException
+ {
+ lock.lock();
+ try
+ {
+ if (!running)
+ throw new IllegalStateException("Consumer instance " + id + " is not running!");
+
+ log.info("{} - Stopping", id);
+ consumer.wakeup();
+ condition.await();
+ log.info("{} - Stopped - consumed {} messages so far", id, consumed);
+ }
+ finally
+ {
+ lock.unlock();
+ }
+ }
- instance.run();
+ @PreDestroy
+ public void destroy() throws ExecutionException, InterruptedException
+ {
+ log.info("{} - Destroy!", id);
+ try
+ {
+ stop();
+ }
+ catch (IllegalStateException e)
+ {
+ log.info("{} - Was already stopped", id);
+ }
+ catch (Exception e)
+ {
+ log.error("{} - Unexpected exception while trying to stop the consumer", id, e);
+ }
+ finally
+ {
+ log.info("{}: Consumed {} messages in total, exiting!", id, consumed);
+ }
+ }
+
+ public boolean running()
+ {
+ lock.lock();
+ try
+ {
+ return running;
+ }
+ finally
+ {
+ lock.unlock();
+ }
+ }
+
+ public Optional<Exception> exitStatus()
+ {
+ lock.lock();
+ try
+ {
+ if (running)
+ throw new IllegalStateException("No exit-status available: Consumer instance " + id + " is running!");
+
+ return Optional.ofNullable(exception);
+ }
+ finally
+ {
+ lock.unlock();
+ }
}
}