Verbesserungen und Fachlogik-Test aus 'sumup-adder' gemerged
authorKai Moritz <kai@juplo.de>
Tue, 16 Aug 2022 16:58:10 +0000 (18:58 +0200)
committerKai Moritz <kai@juplo.de>
Wed, 17 Aug 2022 20:42:42 +0000 (22:42 +0200)
src/main/java/de/juplo/kafka/ApplicationConfiguration.java
src/main/java/de/juplo/kafka/ApplicationRebalanceListener.java
src/main/java/de/juplo/kafka/EndlessConsumer.java
src/main/java/de/juplo/kafka/PollIntervalAwareConsumerRebalanceListener.java
src/main/java/de/juplo/kafka/StateDocument.java
src/test/java/de/juplo/kafka/ApplicationIT.java
src/test/java/de/juplo/kafka/GenericApplicationTests.java

index f83661e..b58295f 100644 (file)
@@ -1,6 +1,5 @@
 package de.juplo.kafka;
 
-import org.apache.kafka.clients.consumer.Consumer;
 import org.apache.kafka.clients.consumer.KafkaConsumer;
 import org.apache.kafka.common.serialization.StringDeserializer;
 import org.springframework.boot.context.properties.EnableConfigurationProperties;
@@ -34,7 +33,6 @@ public class ApplicationConfiguration
       ApplicationRecordHandler recordHandler,
       AdderResults adderResults,
       StateRepository stateRepository,
-      Consumer<String, String> consumer,
       ApplicationProperties properties)
   {
     return new ApplicationRebalanceListener(
@@ -42,10 +40,8 @@ public class ApplicationConfiguration
         adderResults,
         stateRepository,
         properties.getClientId(),
-        properties.getTopic(),
         Clock.systemDefaultZone(),
-        properties.getCommitInterval(),
-        consumer);
+        properties.getCommitInterval());
   }
 
   @Bean
@@ -81,8 +77,8 @@ public class ApplicationConfiguration
     props.put("partition.assignment.strategy", "org.apache.kafka.clients.consumer.CooperativeStickyAssignor");
     props.put("group.id", properties.getGroupId());
     props.put("client.id", properties.getClientId());
-    props.put("enable.auto.commit", false);
     props.put("auto.offset.reset", properties.getAutoOffsetReset());
+    props.put("auto.commit.interval.ms", (int)properties.getCommitInterval().toMillis());
     props.put("metadata.max.age.ms", "1000");
     props.put("key.deserializer", StringDeserializer.class.getName());
     props.put("value.deserializer", StringDeserializer.class.getName());
index cd9da64..32e14e8 100644 (file)
@@ -2,7 +2,6 @@ package de.juplo.kafka;
 
 import lombok.RequiredArgsConstructor;
 import lombok.extern.slf4j.Slf4j;
-import org.apache.kafka.clients.consumer.Consumer;
 import org.apache.kafka.common.TopicPartition;
 
 import java.time.Clock;
@@ -19,15 +18,12 @@ public class ApplicationRebalanceListener implements PollIntervalAwareConsumerRe
   private final AdderResults adderResults;
   private final StateRepository stateRepository;
   private final String id;
-  private final String topic;
   private final Clock clock;
   private final Duration commitInterval;
-  private final Consumer<String, String> consumer;
 
   private final Set<Integer> partitions = new HashSet<>();
 
   private Instant lastCommit = Instant.EPOCH;
-  private boolean commitsEnabled = true;
 
   @Override
   public void onPartitionsAssigned(Collection<TopicPartition> partitions)
@@ -35,18 +31,12 @@ public class ApplicationRebalanceListener implements PollIntervalAwareConsumerRe
     partitions.forEach(tp ->
     {
       Integer partition = tp.partition();
+      log.info("{} - adding partition: {}", id, partition);
       this.partitions.add(partition);
       StateDocument document =
           stateRepository
               .findById(Integer.toString(partition))
               .orElse(new StateDocument(partition));
-      log.info("{} - adding partition: {}, offset={}", id, partition, document.offset);
-      if (document.offset >= 0)
-      {
-        // Only seek, if a stored offset was found
-        // Otherwise: Use initial offset, generated by Kafka
-        consumer.seek(tp, document.offset);
-      }
       recordHandler.addPartition(partition, document.state);
       adderResults.addPartition(partition, document.results);
     });
@@ -58,23 +48,20 @@ public class ApplicationRebalanceListener implements PollIntervalAwareConsumerRe
     partitions.forEach(tp ->
     {
       Integer partition = tp.partition();
+      log.info("{} - removing partition: {}", id, partition);
       this.partitions.remove(partition);
-      Long offset = consumer.position(tp);
-      log.info(
-          "{} - removing partition: {}, offset of next message {})",
-          id,
-          partition,
-          offset);
-      if (commitsEnabled)
-      {
-        Map<String, AdderResult> state = recordHandler.removePartition(partition);
-        Map<String, List<AdderResult>> results = adderResults.removePartition(partition);
-        stateRepository.save(new StateDocument(partition, state, results, offset));
-      }
-      else
+      Map<String, AdderResult> state = recordHandler.removePartition(partition);
+      for (String key : state.keySet())
       {
-        log.info("Offset commits are disabled! Last commit: {}", lastCommit);
+        log.info(
+            "{} - Seen {} messages for partition={}|key={}",
+            id,
+            state.get(key),
+            partition,
+            key);
       }
+      Map<String, List<AdderResult>> results = adderResults.removePartition(partition);
+      stateRepository.save(new StateDocument(partition, state, results));
     });
   }
 
@@ -82,34 +69,15 @@ public class ApplicationRebalanceListener implements PollIntervalAwareConsumerRe
   @Override
   public void beforeNextPoll()
   {
-    if (!commitsEnabled)
-    {
-      log.info("Offset commits are disabled! Last commit: {}", lastCommit);
-      return;
-    }
-
     if (lastCommit.plus(commitInterval).isBefore(clock.instant()))
     {
-      log.debug("Storing data and offsets, last commit: {}", lastCommit);
+      log.debug("Storing data, last commit: {}", lastCommit);
       partitions.forEach(partition -> stateRepository.save(
           new StateDocument(
               partition,
               recordHandler.getState(partition).getState(),
-              adderResults.getState(partition),
-              consumer.position(new TopicPartition(topic, partition)))));
+              adderResults.getState(partition))));
       lastCommit = clock.instant();
     }
   }
-
-  @Override
-  public void enableCommits()
-  {
-    commitsEnabled = true;
-  }
-
-  @Override
-  public void disableCommits()
-  {
-    commitsEnabled = false;
-  }
 }
index 58374f4..0238521 100644 (file)
@@ -42,7 +42,6 @@ public class EndlessConsumer<K, V> implements Runnable
     try
     {
       log.info("{} - Subscribing to topic {}", id, topic);
-      rebalanceListener.enableCommits();
       consumer.subscribe(Arrays.asList(topic), rebalanceListener);
 
       while (true)
@@ -75,6 +74,7 @@ public class EndlessConsumer<K, V> implements Runnable
     catch(WakeupException e)
     {
       log.info("{} - RIIING! Request to stop consumption - commiting current offsets!", id);
+      consumer.commitSync();
       shutdown();
     }
     catch(RecordDeserializationException e)
@@ -88,12 +88,12 @@ public class EndlessConsumer<K, V> implements Runnable
           offset,
           e.getCause().toString());
 
+      consumer.commitSync();
       shutdown(e);
     }
     catch(Exception e)
     {
-      log.error("{} - Unexpected error: {}, disabling commits", id, e.toString(), e);
-      rebalanceListener.disableCommits();
+      log.error("{} - Unexpected error: {}", id, e.toString(), e);
       shutdown(e);
     }
     finally
index c59418c..8abec12 100644 (file)
@@ -6,7 +6,4 @@ import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
 public interface PollIntervalAwareConsumerRebalanceListener extends ConsumerRebalanceListener
 {
   default void beforeNextPoll() {}
-
-  default void enableCommits() {}
-  default void disableCommits() {}
 }
index c10a50c..ae8eb51 100644 (file)
@@ -15,7 +15,6 @@ public class StateDocument
 {
   @Id
   public String id;
-  public long offset = -1l;
   public Map<String, AdderResult> state;
   public Map<String, List<AdderResult>> results;
 
@@ -33,12 +32,10 @@ public class StateDocument
   public StateDocument(
       Integer partition,
       Map<String, AdderResult> state,
-      Map<String, List<AdderResult>> results,
-      long offset)
+      Map<String, List<AdderResult>> results)
   {
     this.id = Integer.toString(partition);
     this.state = state;
     this.results = results;
-    this.offset = offset;
   }
 }
index cded0ee..d1d8e50 100644 (file)
@@ -32,7 +32,7 @@ public class ApplicationIT
 
 
   @Test
-  public void testApplicationStartup()
+  public void   testApplicationStartup()
   {
     restTemplate.getForObject(
         "http://localhost:" + port + "/actuator/health",
index 9a6f812..449c389 100644 (file)
@@ -1,5 +1,6 @@
 package de.juplo.kafka;
 
+import com.mongodb.client.MongoClient;
 import lombok.extern.slf4j.Slf4j;
 import org.apache.kafka.clients.consumer.ConsumerRecord;
 import org.apache.kafka.clients.consumer.KafkaConsumer;
@@ -12,6 +13,7 @@ import org.apache.kafka.common.utils.Bytes;
 import org.junit.jupiter.api.*;
 import org.springframework.beans.factory.annotation.Autowired;
 import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
+import org.springframework.boot.autoconfigure.mongo.MongoProperties;
 import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo;
 import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
 import org.springframework.boot.test.context.TestConfiguration;
@@ -60,7 +62,9 @@ abstract class GenericApplicationTests<K, V>
        @Autowired
        ExecutorService executor;
        @Autowired
-       StateRepository stateRepository;
+       MongoClient mongoClient;
+       @Autowired
+       MongoProperties mongoProperties;
        @Autowired
        PollIntervalAwareConsumerRebalanceListener rebalanceListener;
        @Autowired
@@ -227,29 +231,23 @@ abstract class GenericApplicationTests<K, V>
        void seekToEnd()
        {
                offsetConsumer.assign(partitions());
+               offsetConsumer.seekToEnd(partitions());
                partitions().forEach(tp ->
                {
+                       // seekToEnd() works lazily: it only takes effect on poll()/position()
                        Long offset = offsetConsumer.position(tp);
                        log.info("New position for {}: {}", tp, offset);
-                       Integer partition = tp.partition();
-                       StateDocument document =
-                                       stateRepository
-                                                       .findById(partition.toString())
-                                                       .orElse(new StateDocument(partition));
-                       document.offset = offset;
-                       stateRepository.save(document);
                });
+               // The new positions must be commited!
+               offsetConsumer.commitSync();
                offsetConsumer.unsubscribe();
        }
 
        void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
        {
-               partitions().forEach(tp ->
-               {
-                       String partition = Integer.toString(tp.partition());
-                       Optional<Long> offset = stateRepository.findById(partition).map(document -> document.offset);
-                       consumer.accept(tp, offset.orElse(0l));
-               });
+               offsetConsumer.assign(partitions());
+               partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp)));
+               offsetConsumer.unsubscribe();
        }
 
        List<TopicPartition> partitions()
@@ -354,6 +352,8 @@ abstract class GenericApplicationTests<K, V>
                                        }
                                };
 
+               mongoClient.getDatabase(mongoProperties.getDatabase()).drop();
+
                endlessConsumer =
                                new EndlessConsumer<>(
                                                executor,