package de.juplo.kafka;
+import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
-import org.apache.kafka.clients.consumer.ConsumerRecords;
-import org.apache.kafka.clients.consumer.KafkaConsumer;
-import org.apache.kafka.common.errors.WakeupException;
-import org.apache.kafka.common.serialization.StringDeserializer;
+import org.apache.kafka.common.TopicPartition;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.beans.factory.annotation.Value;
+import org.springframework.kafka.annotation.KafkaListener;
+import org.springframework.kafka.config.KafkaListenerEndpointRegistry;
+import org.springframework.kafka.listener.ConsumerAwareRebalanceListener;
+import org.springframework.stereotype.Component;
-import javax.annotation.PreDestroy;
-import java.time.Duration;
-import java.util.Arrays;
-import java.util.Properties;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
-import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.function.Consumer;
+@Component
@Slf4j
-public class EndlessConsumer implements Runnable
+@RequiredArgsConstructor
+public class EndlessConsumer<K, V> implements ConsumerAwareRebalanceListener
{
- private final ExecutorService executor;
- private final String bootstrapServer;
- private final String groupId;
- private final String id;
- private final String topic;
- private final String autoOffsetReset;
-
- private AtomicBoolean running = new AtomicBoolean();
+ @Autowired
+ private KafkaListenerEndpointRegistry registry;
+ @Value("${spring.kafka.consumer.client-id}")
+ String id;
+ @Autowired
+ Consumer<ConsumerRecord<K, V>> handler;
+
private long consumed = 0;
- private KafkaConsumer<String, String> consumer = null;
- private Future<?> future = null;
-
- public EndlessConsumer(
- ExecutorService executor,
- String bootstrapServer,
- String groupId,
- String clientId,
- String topic,
- String autoOffsetReset)
+
+ private final Map<Integer, Map<String, Long>> seen = new HashMap<>();
+ private final Map<Integer, Long> offsets = new HashMap<>();
+
+
+ @Override
+ public void onPartitionsRevokedBeforeCommit(
+ org.apache.kafka.clients.consumer.Consumer<?, ?> consumer,
+ Collection<TopicPartition> partitions)
{
- this.executor = executor;
- this.bootstrapServer = bootstrapServer;
- this.groupId = groupId;
- this.id = clientId;
- this.topic = topic;
- this.autoOffsetReset = autoOffsetReset;
+ partitions.forEach(tp ->
+ {
+ Integer partition = tp.partition();
+ Long newOffset = consumer.position(tp);
+ Long oldOffset = offsets.remove(partition);
+ log.info(
+ "{} - removing partition: {}, consumed {} records (offset {} -> {})",
+ id,
+ partition,
+ newOffset - oldOffset,
+ oldOffset,
+ newOffset);
+ Map<String, Long> removed = seen.remove(partition);
+ for (String key : removed.keySet())
+ {
+ log.info(
+ "{} - Seen {} messages for partition={}|key={}",
+ id,
+ removed.get(key),
+ partition,
+ key);
+ }
+ });
}
@Override
- public void run()
+ public void onPartitionsAssigned(
+ org.apache.kafka.clients.consumer.Consumer<?, ?> consumer,
+ Collection<TopicPartition> partitions)
{
- try
+ partitions.forEach(tp ->
{
- Properties props = new Properties();
- props.put("bootstrap.servers", bootstrapServer);
- props.put("group.id", groupId);
- props.put("client.id", id);
- props.put("auto.offset.reset", autoOffsetReset);
- props.put("key.deserializer", StringDeserializer.class.getName());
- props.put("value.deserializer", StringDeserializer.class.getName());
-
- this.consumer = new KafkaConsumer<>(props);
+ Integer partition = tp.partition();
+ Long offset = consumer.position(tp);
+ log.info("{} - adding partition: {}, offset={}", id, partition, offset);
+ offsets.put(partition, offset);
+ seen.put(partition, new HashMap<>());
+ });
+ }
- log.info("{} - Subscribing to topic {}", id, topic);
- consumer.subscribe(Arrays.asList(topic));
- while (true)
- {
- ConsumerRecords<String, String> records =
- consumer.poll(Duration.ofSeconds(1));
-
- // Do something with the data...
- log.info("{} - Received {} messages", id, records.count());
- for (ConsumerRecord<String, String> record : records)
- {
- consumed++;
- log.info(
- "{} - {}: {}/{} - {}={}",
- id,
- record.offset(),
- record.topic(),
- record.partition(),
- record.key(),
- record.value()
- );
- }
- }
- }
- catch(WakeupException e)
- {
- log.info("{} - RIIING!", id);
- }
- catch(Exception e)
- {
- log.error("{} - Unexpected error: {}", id, e.toString(), e);
- running.set(false); // Mark the instance as not running
- }
- finally
- {
- log.info("{} - Closing the KafkaConsumer", id);
- consumer.close();
- log.info("{} - Consumer-Thread exiting", id);
- }
+ @KafkaListener(
+ id = "${spring.kafka.consumer.client-id}",
+ idIsGroup = false,
+ topics = "${consumer.topic}",
+ autoStartup = "false")
+ public void receive(ConsumerRecord<K, V> record)
+ {
+ log.info(
+ "{} - {}: {}/{} - {}={}",
+ id,
+ record.offset(),
+ record.topic(),
+ record.partition(),
+ record.key(),
+ record.value()
+ );
+
+ handler.accept(record);
+
+ consumed++;
}
public synchronized void start()
{
- boolean stateChanged = running.compareAndSet(false, true);
- if (!stateChanged)
- throw new RuntimeException("Consumer instance " + id + " is already running!");
+ if (registry.getListenerContainer(id).isChildRunning())
+ throw new IllegalStateException("Consumer instance " + id + " is already running!");
log.info("{} - Starting - consumed {} messages before", id, consumed);
- future = executor.submit(this);
+ registry.getListenerContainer(id).start();
}
- public synchronized void stop() throws ExecutionException, InterruptedException
+ public synchronized void stop()
{
- boolean stateChanged = running.compareAndSet(true, false);
- if (!stateChanged)
- throw new RuntimeException("Consumer instance " + id + " is not running!");
+ if (!registry.getListenerContainer(id).isChildRunning())
+ throw new IllegalStateException("Consumer instance " + id + " is not running!");
log.info("{} - Stopping", id);
- consumer.wakeup();
- future.get();
+ registry.getListenerContainer(id).stop();
log.info("{} - Stopped - consumed {} messages so far", id, consumed);
}
- @PreDestroy
- public void destroy() throws ExecutionException, InterruptedException
+ public synchronized boolean isRunning()
{
- log.info("{} - Destroy!", id);
- try
- {
- stop();
- }
- catch (IllegalStateException e)
- {
- log.info("{} - Was already stopped", id);
- }
- finally
- {
- log.info("{}: Consumed {} messages in total, exiting!", id, consumed);
- }
+ return registry.getListenerContainer(id).isChildRunning();
}
}