Wordcount-Implementierung mit Kafka-Boardmitteln und MongoDB als Storage
[demos/kafka/training] / src / main / java / de / juplo / kafka / EndlessConsumer.java
index b152310..01f9057 100644 (file)
 package de.juplo.kafka;
 
+import lombok.RequiredArgsConstructor;
 import lombok.extern.slf4j.Slf4j;
-import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
-import org.apache.kafka.clients.consumer.ConsumerRecord;
-import org.apache.kafka.clients.consumer.ConsumerRecords;
-import org.apache.kafka.clients.consumer.KafkaConsumer;
+import org.apache.kafka.clients.consumer.*;
 import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.RecordDeserializationException;
 import org.apache.kafka.common.errors.WakeupException;
-import org.apache.kafka.common.serialization.StringDeserializer;
 
 import javax.annotation.PreDestroy;
+import java.time.Clock;
 import java.time.Duration;
+import java.time.Instant;
 import java.util.*;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
-import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.regex.Pattern;
 
 
 @Slf4j
-public class EndlessConsumer implements Runnable
+@RequiredArgsConstructor
+public class EndlessConsumer implements ConsumerRebalanceListener, Runnable
 {
+  final static Pattern PATTERN = Pattern.compile("\\W+");
+
+
   private final ExecutorService executor;
   private final PartitionStatisticsRepository repository;
-  private final String bootstrapServer;
-  private final String groupId;
   private final String id;
   private final String topic;
-  private final String autoOffsetReset;
+  private final Clock clock;
+  private final Duration commitInterval;
+  private final Consumer<String, String> consumer;
 
-  private AtomicBoolean running = new AtomicBoolean();
+  private final Lock lock = new ReentrantLock();
+  private final Condition condition = lock.newCondition();
+  private boolean running = false;
+  private Exception exception;
   private long consumed = 0;
-  private KafkaConsumer<String, String> consumer = null;
-  private Future<?> future = null;
 
-  private final Map<TopicPartition, PartitionStatistics> seen = new HashMap<>();
+  private final Map<Integer, Map<String, Map<String, Long>>> seen = new HashMap<>();
 
 
-  public EndlessConsumer(
-      ExecutorService executor,
-      PartitionStatisticsRepository repository,
-      String bootstrapServer,
-      String groupId,
-      String clientId,
-      String topic,
-      String autoOffsetReset)
+  @Override
+  public void onPartitionsRevoked(Collection<TopicPartition> partitions)
   {
-    this.executor = executor;
-    this.repository = repository;
-    this.bootstrapServer = bootstrapServer;
-    this.groupId = groupId;
-    this.id = clientId;
-    this.topic = topic;
-    this.autoOffsetReset = autoOffsetReset;
+    partitions.forEach(tp ->
+    {
+      Integer partition = tp.partition();
+      Long newOffset = consumer.position(tp);
+      log.info(
+          "{} - removing partition: {}, offset of next message {})",
+          id,
+          partition,
+          newOffset);
+      Map<String, Map<String, Long>> removed = seen.remove(partition);
+      repository.save(new StatisticsDocument(partition, removed, consumer.position(tp)));
+    });
   }
 
   @Override
-  public void run()
+  public void onPartitionsAssigned(Collection<TopicPartition> partitions)
   {
-    try
+    partitions.forEach(tp ->
     {
-      Properties props = new Properties();
-      props.put("bootstrap.servers", bootstrapServer);
-      props.put("group.id", groupId);
-      props.put("client.id", id);
-      props.put("enable.auto.commit", false);
-      props.put("auto.offset.reset", autoOffsetReset);
-      props.put("metadata.max.age.ms", "1000");
-      props.put("key.deserializer", StringDeserializer.class.getName());
-      props.put("value.deserializer", StringDeserializer.class.getName());
+      Integer partition = tp.partition();
+      Long offset = consumer.position(tp);
+      log.info("{} - adding partition: {}, offset={}", id, partition, offset);
+      StatisticsDocument document =
+          repository
+              .findById(Integer.toString(partition))
+              .orElse(new StatisticsDocument(partition));
+      if (document.offset >= 0)
+      {
+        // Only seek, if a stored offset was found
+        // Otherwise: Use initial offset, generated by Kafka
+        consumer.seek(tp, document.offset);
+      }
+      seen.put(partition, document.statistics);
+    });
+  }
 
-      this.consumer = new KafkaConsumer<>(props);
 
+  @Override
+  public void run()
+  {
+    try
+    {
       log.info("{} - Subscribing to topic {}", id, topic);
-      consumer.subscribe(Arrays.asList(topic), new ConsumerRebalanceListener()
-      {
-        @Override
-        public void onPartitionsRevoked(Collection<TopicPartition> partitions)
-        {
-          partitions.forEach(tp ->
-          {
-            log.info("{} - removing partition: {}", id, tp);
-            PartitionStatistics removed = seen.remove(tp);
-            for (KeyCounter counter : removed.getStatistics())
-            {
-              log.info(
-                  "{} - Seen {} messages for partition={}|key={}",
-                  id,
-                  counter.getResult(),
-                  removed.getPartition(),
-                  counter.getKey());
-            }
-            repository.save(new StatisticsDocument(removed, consumer.position(tp)));
-          });
-        }
+      consumer.subscribe(Arrays.asList(topic), this);
 
-        @Override
-        public void onPartitionsAssigned(Collection<TopicPartition> partitions)
-        {
-          partitions.forEach(tp ->
-          {
-            log.info("{} - adding partition: {}", id, tp);
-            StatisticsDocument document =
-                repository
-                    .findById(tp.toString())
-                    .orElse(new StatisticsDocument(tp));
-            consumer.seek(tp, document.offset);
-            seen.put(tp, new PartitionStatistics(document));
-          });
-        }
-      });
+      Instant lastCommit = clock.instant();
 
       while (true)
       {
@@ -120,7 +103,6 @@ public class EndlessConsumer implements Runnable
         log.info("{} - Received {} messages", id, records.count());
         for (ConsumerRecord<String, String> record : records)
         {
-          consumed++;
           log.info(
               "{} - {}: {}/{} - {}={}",
               id,
@@ -131,73 +113,187 @@ public class EndlessConsumer implements Runnable
               record.value()
           );
 
-          TopicPartition partition = new TopicPartition(record.topic(), record.partition());
-          String key = record.key() == null ? "NULL" : record.key();
-          seen.get(partition).increment(key);
+          consumed++;
+
+          Integer partition = record.partition();
+          String user = record.key();
+          Map<String, Map<String, Long>> users = seen.get(partition);
+
+          Map<String, Long> words = users.get(user);
+          if (words == null)
+          {
+            words = new HashMap<>();
+            users.put(user, words);
+          }
+
+          for (String word : PATTERN.split(record.value()))
+          {
+            Long num = words.get(word);
+            if (num == null)
+            {
+              num = 1l;
+            }
+            else
+            {
+              num++;
+            }
+            words.put(word, num);
+          }
         }
 
-        seen.forEach((tp, statistics) -> repository.save(new StatisticsDocument(statistics, consumer.position(tp))));
+        if (lastCommit.plus(commitInterval).isBefore(clock.instant()))
+        {
+          log.debug("Storing data and offsets, last commit: {}", lastCommit);
+          seen.forEach((partiton, statistics) -> repository.save(
+              new StatisticsDocument(
+                  partiton,
+                  statistics,
+                  consumer.position(new TopicPartition(topic, partiton)))));
+          lastCommit = clock.instant();
+        }
       }
     }
     catch(WakeupException e)
     {
-      log.info("{} - RIIING!", id);
+      log.info("{} - RIIING! Request to stop consumption - commiting current offsets!", id);
+      shutdown();
+    }
+    catch(RecordDeserializationException e)
+    {
+      TopicPartition tp = e.topicPartition();
+      long offset = e.offset();
+      log.error(
+          "{} - Could not deserialize  message on topic {} with offset={}: {}",
+          id,
+          tp,
+          offset,
+          e.getCause().toString());
+
+      shutdown(e);
     }
     catch(Exception e)
     {
       log.error("{} - Unexpected error: {}", id, e.toString(), e);
-      running.set(false); // Mark the instance as not running
+      shutdown(e);
     }
     finally
     {
-      log.info("{} - Closing the KafkaConsumer", id);
-      consumer.close();
       log.info("{} - Consumer-Thread exiting", id);
     }
   }
 
-  public Map<TopicPartition, PartitionStatistics> getSeen()
+  private void shutdown()
+  {
+    shutdown(null);
+  }
+
+  private void shutdown(Exception e)
+  {
+    lock.lock();
+    try
+    {
+      try
+      {
+        log.info("{} - Unsubscribing from topic {}", id, topic);
+        consumer.unsubscribe();
+      }
+      catch (Exception ue)
+      {
+        log.error(
+            "{} - Error while unsubscribing from topic {}: {}",
+            id,
+            topic,
+            ue.toString());
+      }
+      finally
+      {
+        running = false;
+        exception = e;
+        condition.signal();
+      }
+    }
+    finally
+    {
+      lock.unlock();
+    }
+  }
+
+  public Map<Integer, Map<String, Map<String, Long>>> getSeen()
   {
     return seen;
   }
 
-  public synchronized void start()
+  public void start()
   {
-    boolean stateChanged = running.compareAndSet(false, true);
-    if (!stateChanged)
-      throw new RuntimeException("Consumer instance " + id + " is already running!");
+    lock.lock();
+    try
+    {
+      if (running)
+        throw new IllegalStateException("Consumer instance " + id + " is already running!");
 
-    log.info("{} - Starting - consumed {} messages before", id, consumed);
-    future = executor.submit(this);
+      log.info("{} - Starting - consumed {} messages before", id, consumed);
+      running = true;
+      exception = null;
+      executor.submit(this);
+    }
+    finally
+    {
+      lock.unlock();
+    }
   }
 
-  public synchronized void stop() throws ExecutionException, InterruptedException
+  public synchronized void stop() throws InterruptedException
   {
-    boolean stateChanged = running.compareAndSet(true, false);
-    if (!stateChanged)
-      throw new RuntimeException("Consumer instance " + id + " is not running!");
-
-    log.info("{} - Stopping", id);
-    consumer.wakeup();
-    future.get();
-    log.info("{} - Stopped - consumed {} messages so far", id, consumed);
+    lock.lock();
+    try
+    {
+      if (!running)
+        throw new IllegalStateException("Consumer instance " + id + " is not running!");
+
+      log.info("{} - Stopping", id);
+      consumer.wakeup();
+      condition.await();
+      log.info("{} - Stopped - consumed {} messages so far", id, consumed);
+    }
+    finally
+    {
+      lock.unlock();
+    }
   }
 
   @PreDestroy
   public void destroy() throws ExecutionException, InterruptedException
   {
     log.info("{} - Destroy!", id);
+    log.info("{}: Consumed {} messages in total, exiting!", id, consumed);
+  }
+
+  public boolean running()
+  {
+    lock.lock();
     try
     {
-      stop();
+      return running;
+    }
+    finally
+    {
+      lock.unlock();
     }
-    catch (IllegalStateException e)
+  }
+
+  public Optional<Exception> exitStatus()
+  {
+    lock.lock();
+    try
     {
-      log.info("{} - Was already stopped", id);
+      if (running)
+        throw new IllegalStateException("No exit-status available: Consumer instance " + id + " is running!");
+
+      return Optional.ofNullable(exception);
     }
     finally
     {
-      log.info("{}: Consumed {} messages in total, exiting!", id, consumed);
+      lock.unlock();
     }
   }
 }