Springify: Start/Stop prüft, ob der Container schon/noch läuft
[demos/kafka/training] / src / main / java / de / juplo / kafka / EndlessConsumer.java
index 9aa8152..a5a5ce6 100644 (file)
 package de.juplo.kafka;
 
+import lombok.RequiredArgsConstructor;
 import lombok.extern.slf4j.Slf4j;
-import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
 import org.apache.kafka.clients.consumer.ConsumerRecord;
-import org.apache.kafka.clients.consumer.ConsumerRecords;
-import org.apache.kafka.clients.consumer.KafkaConsumer;
-import org.apache.kafka.common.TopicPartition;
-import org.apache.kafka.common.errors.WakeupException;
-import org.apache.kafka.common.serialization.StringDeserializer;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.beans.factory.annotation.Value;
+import org.springframework.kafka.annotation.KafkaListener;
+import org.springframework.kafka.config.KafkaListenerEndpointRegistry;
+import org.springframework.stereotype.Component;
 
 import javax.annotation.PreDestroy;
-import java.time.Duration;
-import java.util.*;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
-import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.List;
+import java.util.function.Consumer;
 
 
+@Component
 @Slf4j
-public class EndlessConsumer implements Runnable
+@RequiredArgsConstructor
+public class EndlessConsumer<K, V>
 {
-  private final ExecutorService executor;
-  private final String bootstrapServer;
-  private final String groupId;
-  private final String id;
-  private final String topic;
-  private final String autoOffsetReset;
+  @Autowired
+  private KafkaListenerEndpointRegistry registry;
+  @Value("${consumer.client-id}")
+  String id;
+  @Autowired
+  Consumer<ConsumerRecord<K, V>> handler;
 
-  private AtomicBoolean running = new AtomicBoolean();
   private long consumed = 0;
-  private KafkaConsumer<String, String> consumer = null;
-  private Future<?> future = null;
 
-  private final Map<Integer, Map<String, Integer>> seen = new HashMap<>();
-
-
-  public EndlessConsumer(
-      ExecutorService executor,
-      String bootstrapServer,
-      String groupId,
-      String clientId,
-      String topic,
-      String autoOffsetReset)
+  @KafkaListener(
+      id = "${consumer.client-id}",
+      idIsGroup = false,
+      topics = "${consumer.topic}",
+      containerFactory = "batchFactory",
+      autoStartup = "false")
+  public void receive(List<ConsumerRecord<K, V>> records)
   {
-    this.executor = executor;
-    this.bootstrapServer = bootstrapServer;
-    this.groupId = groupId;
-    this.id = clientId;
-    this.topic = topic;
-    this.autoOffsetReset = autoOffsetReset;
-  }
-
-  @Override
-  public void run()
-  {
-    try
+    // Do something with the data...
+    log.info("{} - Received {} messages", id, records.size());
+    for (ConsumerRecord<K, V> record : records)
     {
-      Properties props = new Properties();
-      props.put("bootstrap.servers", bootstrapServer);
-      props.put("group.id", groupId);
-      props.put("client.id", id);
-      props.put("auto.offset.reset", autoOffsetReset);
-      props.put("metadata.max.age.ms", "1000");
-      props.put("key.deserializer", StringDeserializer.class.getName());
-      props.put("value.deserializer", StringDeserializer.class.getName());
-
-      this.consumer = new KafkaConsumer<>(props);
-
-      log.info("{} - Subscribing to topic {}", id, topic);
-      consumer.subscribe(Arrays.asList(topic), new ConsumerRebalanceListener()
-      {
-        @Override
-        public void onPartitionsRevoked(Collection<TopicPartition> partitions)
-        {
-          partitions.forEach(tp -> seen.remove(tp.partition()));
-        }
-
-        @Override
-        public void onPartitionsAssigned(Collection<TopicPartition> partitions)
-        {
-          partitions.forEach(tp -> seen.put(tp.partition(), new HashMap<>()));
-        }
-      });
-
-      while (true)
-      {
-        ConsumerRecords<String, String> records =
-            consumer.poll(Duration.ofSeconds(1));
-
-        // Do something with the data...
-        log.info("{} - Received {} messages", id, records.count());
-        for (ConsumerRecord<String, String> record : records)
-        {
-          consumed++;
-          log.info(
-              "{} - {}: {}/{} - {}={}",
-              id,
-              record.offset(),
-              record.topic(),
-              record.partition(),
-              record.key(),
-              record.value()
-          );
-
-          Integer partition = record.partition();
-          String key = record.key() == null ? "NULL" : record.key();
-          Map<String, Integer> byKey = seen.get(partition);
-
-          if (!byKey.containsKey(key))
-            byKey.put(key, 0);
-
-          int seenByKey = byKey.get(key);
-          seenByKey++;
-          byKey.put(key, seenByKey);
-        }
-      }
-    }
-    catch(WakeupException e)
-    {
-      log.info("{} - RIIING!", id);
-    }
-    catch(Exception e)
-    {
-      log.error("{} - Unexpected error: {}", id, e.toString(), e);
-      running.set(false); // Mark the instance as not running
-    }
-    finally
-    {
-      log.info("{} - Closing the KafkaConsumer", id);
-      consumer.close();
-
-      for (Integer partition : seen.keySet())
-      {
-        Map<String, Integer> byKey = seen.get(partition);
-        for (String key : byKey.keySet())
-        {
-          log.info(
-              "{} - Seen {} messages for partition={}|key={}",
-              id,
-              byKey.get(key),
-              partition,
-              key);
-        }
-      }
-
-      log.info("{} - Consumer-Thread exiting", id);
+      log.info(
+          "{} - {}: {}/{} - {}={}",
+          id,
+          record.offset(),
+          record.topic(),
+          record.partition(),
+          record.key(),
+          record.value()
+      );
+
+      handler.accept(record);
+
+      consumed++;
     }
   }
 
-  public Map<Integer, Map<String, Integer>> getSeen()
-  {
-    return seen;
-  }
 
   public synchronized void start()
   {
-    boolean stateChanged = running.compareAndSet(false, true);
-    if (!stateChanged)
-      throw new RuntimeException("Consumer instance " + id + " is already running!");
+    if (registry.getListenerContainer(id).isChildRunning())
+      throw new IllegalStateException("Consumer instance " + id + " is already running!");
 
     log.info("{} - Starting - consumed {} messages before", id, consumed);
-    future = executor.submit(this);
+    registry.getListenerContainer(id).start();
   }
 
-  public synchronized void stop() throws ExecutionException, InterruptedException
+  public synchronized void stop()
   {
-    boolean stateChanged = running.compareAndSet(true, false);
-    if (!stateChanged)
-      throw new RuntimeException("Consumer instance " + id + " is not running!");
+    if (!registry.getListenerContainer(id).isChildRunning())
+      throw new IllegalStateException("Consumer instance " + id + " is not running!");
 
     log.info("{} - Stopping", id);
-    consumer.wakeup();
-    future.get();
+    registry.getListenerContainer(id).stop();
     log.info("{} - Stopped - consumed {} messages so far", id, consumed);
   }
 
   @PreDestroy
-  public void destroy() throws ExecutionException, InterruptedException
+  public void destroy()
   {
     log.info("{} - Destroy!", id);
     try
@@ -188,6 +88,10 @@ public class EndlessConsumer implements Runnable
     {
       log.info("{} - Was already stopped", id);
     }
+    catch (Exception e)
+    {
+      log.error("{} - Unexpected exception while trying to stop the consumer", id, e);
+    }
     finally
     {
       log.info("{}: Consumed {} messages in total, exiting!", id, consumed);