Merge branch rebalance-listener into counting-consumer
[demos/kafka/training] / src / main / java / de / juplo / kafka / EndlessConsumer.java
1 package de.juplo.kafka;
2
3 import lombok.extern.slf4j.Slf4j;
4 import org.apache.kafka.clients.consumer.ConsumerRecord;
5 import org.apache.kafka.clients.consumer.ConsumerRecords;
6 import org.apache.kafka.clients.consumer.KafkaConsumer;
7 import org.apache.kafka.common.errors.WakeupException;
8 import org.apache.kafka.common.serialization.StringDeserializer;
9
10 import javax.annotation.PreDestroy;
11 import java.time.Duration;
12 import java.util.Arrays;
13 import java.util.HashMap;
14 import java.util.Map;
15 import java.util.Properties;
16 import java.util.concurrent.ExecutionException;
17 import java.util.concurrent.ExecutorService;
18 import java.util.concurrent.Future;
19 import java.util.concurrent.atomic.AtomicBoolean;
20
21
22 @Slf4j
23 public class EndlessConsumer implements Runnable
24 {
25   private final ExecutorService executor;
26   private final String bootstrapServer;
27   private final String groupId;
28   private final String id;
29   private final String topic;
30   private final String autoOffsetReset;
31
32   private AtomicBoolean running = new AtomicBoolean();
33   private long consumed = 0;
34   private KafkaConsumer<String, String> consumer = null;
35   private Future<?> future = null;
36
37   private Map<Integer, Map<String, Integer>> seen;
38
39
40   public EndlessConsumer(
41       ExecutorService executor,
42       String bootstrapServer,
43       String groupId,
44       String clientId,
45       String topic,
46       String autoOffsetReset)
47   {
48     this.executor = executor;
49     this.bootstrapServer = bootstrapServer;
50     this.groupId = groupId;
51     this.id = clientId;
52     this.topic = topic;
53     this.autoOffsetReset = autoOffsetReset;
54   }
55
56   @Override
57   public void run()
58   {
59     try
60     {
61       Properties props = new Properties();
62       props.put("bootstrap.servers", bootstrapServer);
63       props.put("group.id", groupId);
64       props.put("client.id", id);
65       props.put("auto.offset.reset", autoOffsetReset);
66       props.put("metadata.max.age.ms", "1000");
67       props.put("key.deserializer", StringDeserializer.class.getName());
68       props.put("value.deserializer", StringDeserializer.class.getName());
69
70       this.consumer = new KafkaConsumer<>(props);
71
72       log.info("{} - Subscribing to topic {}", id, topic);
73       consumer.subscribe(Arrays.asList(topic));
74
75       seen = new HashMap<>();
76
77       while (true)
78       {
79         ConsumerRecords<String, String> records =
80             consumer.poll(Duration.ofSeconds(1));
81
82         // Do something with the data...
83         log.info("{} - Received {} messages", id, records.count());
84         for (ConsumerRecord<String, String> record : records)
85         {
86           consumed++;
87           log.info(
88               "{} - {}: {}/{} - {}={}",
89               id,
90               record.offset(),
91               record.topic(),
92               record.partition(),
93               record.key(),
94               record.value()
95           );
96
97           Integer partition = record.partition();
98           String key = record.key() == null ? "NULL" : record.key();
99
100           if (!seen.containsKey(partition))
101             seen.put(partition, new HashMap<>());
102
103           Map<String, Integer> byKey = seen.get(partition);
104
105           if (!byKey.containsKey(key))
106             byKey.put(key, 0);
107
108           int seenByKey = byKey.get(key);
109           seenByKey++;
110           byKey.put(key, seenByKey);
111         }
112       }
113     }
114     catch(WakeupException e)
115     {
116       log.info("{} - RIIING!", id);
117     }
118     catch(Exception e)
119     {
120       log.error("{} - Unexpected error: {}", id, e.toString(), e);
121       running.set(false); // Mark the instance as not running
122     }
123     finally
124     {
125       log.info("{} - Closing the KafkaConsumer", id);
126       consumer.close();
127
128       for (Integer partition : seen.keySet())
129       {
130         Map<String, Integer> byKey = seen.get(partition);
131         for (String key : byKey.keySet())
132         {
133           log.info(
134               "{} - Seen {} messages for partition={}|key={}",
135               id,
136               byKey.get(key),
137               partition,
138               key);
139         }
140       }
141       seen = null;
142
143       log.info("{} - Consumer-Thread exiting", id);
144     }
145   }
146
147   public Map<Integer, Map<String, Integer>> getSeen()
148   {
149     return seen;
150   }
151
152   public synchronized void start()
153   {
154     boolean stateChanged = running.compareAndSet(false, true);
155     if (!stateChanged)
156       throw new RuntimeException("Consumer instance " + id + " is already running!");
157
158     log.info("{} - Starting - consumed {} messages before", id, consumed);
159     future = executor.submit(this);
160   }
161
162   public synchronized void stop() throws ExecutionException, InterruptedException
163   {
164     boolean stateChanged = running.compareAndSet(true, false);
165     if (!stateChanged)
166       throw new RuntimeException("Consumer instance " + id + " is not running!");
167
168     log.info("{} - Stopping", id);
169     consumer.wakeup();
170     future.get();
171     log.info("{} - Stopped - consumed {} messages so far", id, consumed);
172   }
173
174   @PreDestroy
175   public void destroy() throws ExecutionException, InterruptedException
176   {
177     log.info("{} - Destroy!", id);
178     try
179     {
180       stop();
181     }
182     catch (IllegalStateException e)
183     {
184       log.info("{} - Was already stopped", id);
185     }
186     finally
187     {
188       log.info("{}: Consumed {} messages in total, exiting!", id, consumed);
189     }
190   }
191 }