Refaktorisierung für Tests - KafkaConsumer als eigenständige Bean
[demos/kafka/training] / src / main / java / de / juplo / kafka / EndlessConsumer.java
index adebff1..0bf5925 100644 (file)
@@ -1,17 +1,14 @@
 package de.juplo.kafka;
 
+import lombok.RequiredArgsConstructor;
 import lombok.extern.slf4j.Slf4j;
-import org.apache.kafka.clients.consumer.ConsumerRecord;
-import org.apache.kafka.clients.consumer.ConsumerRecords;
-import org.apache.kafka.clients.consumer.KafkaConsumer;
+import org.apache.kafka.clients.consumer.*;
+import org.apache.kafka.common.TopicPartition;
 import org.apache.kafka.common.errors.WakeupException;
-import org.apache.kafka.common.serialization.StringDeserializer;
 
 import javax.annotation.PreDestroy;
 import java.time.Duration;
-import java.util.Arrays;
-import java.util.Optional;
-import java.util.Properties;
+import java.util.*;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.locks.Condition;
@@ -20,56 +17,73 @@ import java.util.concurrent.locks.ReentrantLock;
 
 
 @Slf4j
+@RequiredArgsConstructor
 public class EndlessConsumer implements Runnable
 {
   private final ExecutorService executor;
-  private final String bootstrapServer;
-  private final String groupId;
   private final String id;
   private final String topic;
-  private final String autoOffsetReset;
+  private final Consumer<String, String> consumer;
 
   private final Lock lock = new ReentrantLock();
   private final Condition condition = lock.newCondition();
   private boolean running = false;
   private Exception exception;
   private long consumed = 0;
-  private KafkaConsumer<String, String> consumer = null;
 
+  private final Map<Integer, Map<String, Long>> seen = new HashMap<>();
+  private final Map<Integer, Long> offsets = new HashMap<>();
 
-  public EndlessConsumer(
-      ExecutorService executor,
-      String bootstrapServer,
-      String groupId,
-      String clientId,
-      String topic,
-      String autoOffsetReset)
-  {
-    this.executor = executor;
-    this.bootstrapServer = bootstrapServer;
-    this.groupId = groupId;
-    this.id = clientId;
-    this.topic = topic;
-    this.autoOffsetReset = autoOffsetReset;
-  }
 
   @Override
   public void run()
   {
     try
     {
-      Properties props = new Properties();
-      props.put("bootstrap.servers", bootstrapServer);
-      props.put("group.id", groupId);
-      props.put("client.id", id);
-      props.put("auto.offset.reset", autoOffsetReset);
-      props.put("key.deserializer", StringDeserializer.class.getName());
-      props.put("value.deserializer", StringDeserializer.class.getName());
-
-      this.consumer = new KafkaConsumer<>(props);
-
       log.info("{} - Subscribing to topic {}", id, topic);
-      consumer.subscribe(Arrays.asList(topic));
+      consumer.subscribe(Arrays.asList(topic), new ConsumerRebalanceListener()
+      {
+        @Override
+        public void onPartitionsRevoked(Collection<TopicPartition> partitions)
+        {
+          partitions.forEach(tp ->
+          {
+            Integer partition = tp.partition();
+            Long newOffset = consumer.position(tp);
+            Long oldOffset = offsets.remove(partition);
+            log.info(
+                "{} - removing partition: {}, consumed {} records (offset {} -> {})",
+                id,
+                partition,
+                newOffset - oldOffset,
+                oldOffset,
+                newOffset);
+            Map<String, Long> removed = seen.remove(partition);
+            for (String key : removed.keySet())
+            {
+              log.info(
+                  "{} - Seen {} messages for partition={}|key={}",
+                  id,
+                  removed.get(key),
+                  partition,
+                  key);
+            }
+          });
+        }
+
+        @Override
+        public void onPartitionsAssigned(Collection<TopicPartition> partitions)
+        {
+          partitions.forEach(tp ->
+          {
+            Integer partition = tp.partition();
+            Long offset = consumer.position(tp);
+            log.info("{} - adding partition: {}, offset={}", id, partition, offset);
+            offsets.put(partition, offset);
+            seen.put(partition, new HashMap<>());
+          });
+        }
+      });
 
       while (true)
       {
@@ -90,12 +104,24 @@ public class EndlessConsumer implements Runnable
               record.key(),
               record.value()
           );
+
+          Integer partition = record.partition();
+          String key = record.key() == null ? "NULL" : record.key();
+          Map<String, Long> byKey = seen.get(partition);
+
+          if (!byKey.containsKey(key))
+            byKey.put(key, 0l);
+
+          long seenByKey = byKey.get(key);
+          seenByKey++;
+          byKey.put(key, seenByKey);
         }
       }
     }
     catch(WakeupException e)
     {
-      log.info("{} - RIIING!", id);
+      log.info("{} - RIIING! Request to stop consumption - commiting current offsets!", id);
+      consumer.commitSync();
       shutdown();
     }
     catch(Exception e)
@@ -105,8 +131,6 @@ public class EndlessConsumer implements Runnable
     }
     finally
     {
-      log.info("{} - Closing the KafkaConsumer", id);
-      consumer.close();
       log.info("{} - Consumer-Thread exiting", id);
     }
   }
@@ -121,9 +145,25 @@ public class EndlessConsumer implements Runnable
     lock.lock();
     try
     {
-      running = false;
-      exception = e;
-      condition.signal();
+      try
+      {
+        log.info("{} - Unsubscribing from topic {}", id, topic);
+        consumer.unsubscribe();
+      }
+      catch (Exception ue)
+      {
+        log.error(
+            "{} - Error while unsubscribing from topic {}: {}",
+            id,
+            topic,
+            ue.toString());
+      }
+      finally
+      {
+        running = false;
+        exception = e;
+        condition.signal();
+      }
     }
     finally
     {
@@ -131,6 +171,11 @@ public class EndlessConsumer implements Runnable
     }
   }
 
+  public Map<Integer, Map<String, Long>> getSeen()
+  {
+    return seen;
+  }
+
   public void start()
   {
     lock.lock();