1 package de.juplo.kafka;
3 import lombok.extern.slf4j.Slf4j;
4 import org.apache.kafka.clients.consumer.ConsumerRecord;
5 import org.apache.kafka.clients.consumer.ConsumerRecords;
6 import org.apache.kafka.clients.consumer.KafkaConsumer;
7 import org.apache.kafka.common.errors.WakeupException;
8 import org.apache.kafka.common.serialization.StringDeserializer;
10 import javax.annotation.PreDestroy;
11 import java.time.Duration;
12 import java.util.Arrays;
13 import java.util.HashMap;
15 import java.util.Properties;
16 import java.util.concurrent.ExecutionException;
17 import java.util.concurrent.ExecutorService;
18 import java.util.concurrent.Future;
19 import java.util.concurrent.atomic.AtomicBoolean;
23 public class EndlessConsumer implements Runnable
25 private final ExecutorService executor;
26 private final String bootstrapServer;
27 private final String groupId;
28 private final String id;
29 private final String topic;
30 private final String autoOffsetReset;
32 private AtomicBoolean running = new AtomicBoolean();
33 private long consumed = 0;
34 private KafkaConsumer<String, String> consumer = null;
35 private Future<?> future = null;
37 private Map<Integer, Map<String, Integer>> seen;
40 public EndlessConsumer(
41 ExecutorService executor,
42 String bootstrapServer,
46 String autoOffsetReset)
48 this.executor = executor;
49 this.bootstrapServer = bootstrapServer;
50 this.groupId = groupId;
53 this.autoOffsetReset = autoOffsetReset;
61 Properties props = new Properties();
62 props.put("bootstrap.servers", bootstrapServer);
63 props.put("group.id", groupId);
64 props.put("client.id", id);
65 props.put("auto.offset.reset", autoOffsetReset);
66 props.put("metadata.max.age.ms", "1000");
67 props.put("key.deserializer", StringDeserializer.class.getName());
68 props.put("value.deserializer", StringDeserializer.class.getName());
70 this.consumer = new KafkaConsumer<>(props);
72 log.info("{} - Subscribing to topic {}", id, topic);
73 consumer.subscribe(Arrays.asList(topic));
75 seen = new HashMap<>();
79 ConsumerRecords<String, String> records =
80 consumer.poll(Duration.ofSeconds(1));
82 // Do something with the data...
83 log.info("{} - Received {} messages", id, records.count());
84 for (ConsumerRecord<String, String> record : records)
88 "{} - {}: {}/{} - {}={}",
97 Integer partition = record.partition();
98 String key = record.key() == null ? "NULL" : record.key();
100 if (!seen.containsKey(partition))
101 seen.put(partition, new HashMap<>());
103 Map<String, Integer> byKey = seen.get(partition);
105 if (!byKey.containsKey(key))
108 int seenByKey = byKey.get(key);
110 byKey.put(key, seenByKey);
114 catch(WakeupException e)
116 log.info("{} - RIIING!", id);
120 log.error("{} - Unexpected error: {}", id, e.toString(), e);
121 running.set(false); // Mark the instance as not running
125 log.info("{} - Closing the KafkaConsumer", id);
128 for (Integer partition : seen.keySet())
130 Map<String, Integer> byKey = seen.get(partition);
131 for (String key : byKey.keySet())
134 "{} - Seen {} messages for partition={}|key={}",
143 log.info("{} - Consumer-Thread exiting", id);
147 public Map<Integer, Map<String, Integer>> getSeen()
152 public synchronized void start()
154 boolean stateChanged = running.compareAndSet(false, true);
156 throw new RuntimeException("Consumer instance " + id + " is already running!");
158 log.info("{} - Starting - consumed {} messages before", id, consumed);
159 future = executor.submit(this);
162 public synchronized void stop() throws ExecutionException, InterruptedException
164 boolean stateChanged = running.compareAndSet(true, false);
166 throw new RuntimeException("Consumer instance " + id + " is not running!");
168 log.info("{} - Stopping", id);
171 log.info("{} - Stopped - consumed {} messages so far", id, consumed);
175 public void destroy() throws ExecutionException, InterruptedException
177 log.info("{} - Destroy!", id);
182 catch (IllegalStateException e)
184 log.info("{} - Was already stopped", id);
188 log.info("{}: Consumed {} messages in total, exiting!", id, consumed);