package de.juplo.kafka;
+import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
-import org.apache.kafka.clients.consumer.ConsumerRecord;
-import org.apache.kafka.clients.consumer.ConsumerRecords;
-import org.apache.kafka.clients.consumer.KafkaConsumer;
+import org.apache.kafka.clients.consumer.*;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.RecordDeserializationException;
import org.apache.kafka.common.errors.WakeupException;
-import org.apache.kafka.common.serialization.StringDeserializer;
import javax.annotation.PreDestroy;
import java.time.Duration;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Properties;
+import java.util.*;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
-import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
@Slf4j
-public class EndlessConsumer implements Runnable
+@RequiredArgsConstructor
+public class EndlessConsumer<K, V> implements Runnable
{
private final ExecutorService executor;
- private final String bootstrapServer;
- private final String groupId;
private final String id;
private final String topic;
- private final String autoOffsetReset;
-
- private AtomicBoolean running = new AtomicBoolean();
+ private final Consumer<K, V> consumer;
+ private final PollIntervalAwareConsumerRebalanceListener pollIntervalAwareRebalanceListener;
+ private final RecordHandler<K, V> handler;
+
+ private final Lock lock = new ReentrantLock();
+ private final Condition condition = lock.newCondition();
+ private boolean running = false;
+ private Exception exception;
private long consumed = 0;
- private KafkaConsumer<String, String> consumer = null;
- private Future<?> future = null;
-
- private Map<Integer, Map<String, Integer>> seen;
- public EndlessConsumer(
- ExecutorService executor,
- String bootstrapServer,
- String groupId,
- String clientId,
- String topic,
- String autoOffsetReset)
- {
- this.executor = executor;
- this.bootstrapServer = bootstrapServer;
- this.groupId = groupId;
- this.id = clientId;
- this.topic = topic;
- this.autoOffsetReset = autoOffsetReset;
- }
@Override
public void run()
{
try
{
- Properties props = new Properties();
- props.put("bootstrap.servers", bootstrapServer);
- props.put("group.id", groupId);
- props.put("client.id", id);
- props.put("auto.offset.reset", autoOffsetReset);
- props.put("key.deserializer", StringDeserializer.class.getName());
- props.put("value.deserializer", StringDeserializer.class.getName());
-
- this.consumer = new KafkaConsumer<>(props);
-
log.info("{} - Subscribing to topic {}", id, topic);
- consumer.subscribe(Arrays.asList(topic));
-
- seen = new HashMap<>();
+ consumer.subscribe(Arrays.asList(topic), pollIntervalAwareRebalanceListener);
while (true)
{
- ConsumerRecords<String, String> records =
+ ConsumerRecords<K, V> records =
consumer.poll(Duration.ofSeconds(1));
// Do something with the data...
log.info("{} - Received {} messages", id, records.count());
- for (ConsumerRecord<String, String> record : records)
+ for (ConsumerRecord<K, V> record : records)
{
- consumed++;
log.info(
"{} - {}: {}/{} - {}={}",
id,
record.value()
);
- Integer partition = record.partition();
- String key = record.key();
+ handler.accept(record);
- if (!seen.containsKey(partition))
- seen.put(partition, new HashMap<>());
-
- Map<String, Integer> byKey = seen.get(partition);
-
- if (!byKey.containsKey(key))
- byKey.put(key, 0);
-
- int seenByKey = byKey.get(key);
- seenByKey++;
- byKey.put(key, seenByKey);
+ consumed++;
}
+
+ pollIntervalAwareRebalanceListener.beforeNextPoll();
}
}
catch(WakeupException e)
{
- log.info("{} - RIIING!", id);
+ log.info("{} - RIIING! Request to stop consumption - commiting current offsets!", id);
+ shutdown();
+ }
+ catch(RecordDeserializationException e)
+ {
+ TopicPartition tp = e.topicPartition();
+ long offset = e.offset();
+ log.error(
+ "{} - Could not deserialize message on topic {} with offset={}: {}",
+ id,
+ tp,
+ offset,
+ e.getCause().toString());
+
+ shutdown(e);
}
catch(Exception e)
{
log.error("{} - Unexpected error: {}", id, e.toString(), e);
- running.set(false); // Mark the instance as not running
+ shutdown(e);
}
finally
{
- log.info("{} - Closing the KafkaConsumer", id);
- consumer.close();
-
- for (Integer partition : seen.keySet())
- {
- Map<String, Integer> byKey = seen.get(partition);
- for (String key : byKey.keySet())
- {
- log.info(
- "{} - Seen {} messages for partition={}|key={}",
- id,
- byKey.get(key),
- partition,
- key);
- }
- }
- seen = null;
-
log.info("{} - Consumer-Thread exiting", id);
}
}
- public Map<Integer, Map<String, Integer>> getSeen()
+ private void shutdown()
{
- return seen;
+ shutdown(null);
+ }
+
+ private void shutdown(Exception e)
+ {
+ lock.lock();
+ try
+ {
+ try
+ {
+ log.info("{} - Unsubscribing from topic {}", id, topic);
+ consumer.unsubscribe();
+ }
+ catch (Exception ue)
+ {
+ log.error(
+ "{} - Error while unsubscribing from topic {}: {}",
+ id,
+ topic,
+ ue.toString());
+ }
+ finally
+ {
+ running = false;
+ exception = e;
+ condition.signal();
+ }
+ }
+ finally
+ {
+ lock.unlock();
+ }
}
- public synchronized void start()
+ public void start()
{
- boolean stateChanged = running.compareAndSet(false, true);
- if (!stateChanged)
- throw new RuntimeException("Consumer instance " + id + " is already running!");
+ lock.lock();
+ try
+ {
+ if (running)
+ throw new IllegalStateException("Consumer instance " + id + " is already running!");
- log.info("{} - Starting - consumed {} messages before", id, consumed);
- future = executor.submit(this);
+ log.info("{} - Starting - consumed {} messages before", id, consumed);
+ running = true;
+ exception = null;
+ executor.submit(this);
+ }
+ finally
+ {
+ lock.unlock();
+ }
}
- public synchronized void stop() throws ExecutionException, InterruptedException
+ public synchronized void stop() throws InterruptedException
{
- boolean stateChanged = running.compareAndSet(true, false);
- if (!stateChanged)
- throw new RuntimeException("Consumer instance " + id + " is not running!");
-
- log.info("{} - Stopping", id);
- consumer.wakeup();
- future.get();
- log.info("{} - Stopped - consumed {} messages so far", id, consumed);
+ lock.lock();
+ try
+ {
+ if (!running)
+ throw new IllegalStateException("Consumer instance " + id + " is not running!");
+
+ log.info("{} - Stopping", id);
+ consumer.wakeup();
+ condition.await();
+ log.info("{} - Stopped - consumed {} messages so far", id, consumed);
+ }
+ finally
+ {
+ lock.unlock();
+ }
}
@PreDestroy
public void destroy() throws ExecutionException, InterruptedException
{
log.info("{} - Destroy!", id);
+ log.info("{}: Consumed {} messages in total, exiting!", id, consumed);
+ }
+
+ public boolean running()
+ {
+ lock.lock();
try
{
- stop();
+ return running;
}
- catch (IllegalStateException e)
+ finally
{
- log.info("{} - Was already stopped", id);
+ lock.unlock();
+ }
+ }
+
+ public Optional<Exception> exitStatus()
+ {
+ lock.lock();
+ try
+ {
+ if (running)
+ throw new IllegalStateException("No exit-status available: Consumer instance " + id + " is running!");
+
+ return Optional.ofNullable(exception);
}
finally
{
- log.info("{}: Consumed {} messages in total, exiting!", id, consumed);
+ lock.unlock();
}
}
}