ROT: Verbesserungen aus 'deserialization' in 'sumup-adder' gemerged
authorKai Moritz <kai@juplo.de>
Sun, 14 Aug 2022 16:09:17 +0000 (18:09 +0200)
committerKai Moritz <kai@juplo.de>
Sun, 14 Aug 2022 16:09:17 +0000 (18:09 +0200)
* Dabei: Die Verbesserungen aus 'deserialization' genutzt, um in
  `ApplicationTests` einen angepassten `RecordGenerator` zu
  implementieren.
* Da der Service derzeit mit `String` für Schlüssel und Nachricht
  arbeitet, kann keine Poison-Pill erzeugt werden (null-Nachrichten
  führen nicht zu einer `DeserializationException` und alles andere
  lässt sich in einen - fachlich ggf. sinnfreien - String konvertieren).
* Der Test für Logik-Fehler schlägt fehl, weil er einen Fehler in der
  Implementierung aufdeckt!
* Alle bisherigen Versionen von `EndlessConsumer`, die ihre Offsets in
  der Mongo-DB mit speichern führen bei einer `DeserializationException`
  einen Offset-Commit durch, wenn ihnen durch das darauf folgende
  `unsubscribe()` die Partitionen entzogen werden.
* D.h., bisher wurden in dieser Situation Nachrichten verloren!

1  2 
pom.xml
src/test/java/de/juplo/kafka/ApplicationTests.java
src/test/java/de/juplo/kafka/GenericApplicationTests.java

diff --cc pom.xml
+++ b/pom.xml
    </parent>
  
    <groupId>de.juplo.kafka</groupId>
 -  <artifactId>endless-consumer</artifactId>
 +  <artifactId>sumup-adder</artifactId>
    <version>1.0-SNAPSHOT</version>
 -  <name>Endless Consumer: a Simple Consumer-Group that reads and prints the topic and counts the received messages for each key by topic</name>
 +  <name>SumUp Adder</name>
 +  <description>Calculates the sum for the send messages</description>
  
+   <properties>
+     <java.version>11</java.version>
+   </properties>
    <dependencies>
      <dependency>
        <groupId>org.springframework.boot</groupId>
  package de.juplo.kafka;
  
- import lombok.extern.slf4j.Slf4j;
--import org.apache.kafka.clients.consumer.ConsumerRecord;
- import org.apache.kafka.clients.consumer.KafkaConsumer;
- import org.apache.kafka.clients.producer.KafkaProducer;
  import org.apache.kafka.clients.producer.ProducerRecord;
- import org.apache.kafka.common.TopicPartition;
- import org.apache.kafka.common.serialization.*;
 -import org.apache.kafka.common.serialization.LongSerializer;
+ import org.apache.kafka.common.serialization.StringSerializer;
  import org.apache.kafka.common.utils.Bytes;
- import org.junit.jupiter.api.*;
- import org.springframework.beans.factory.annotation.Autowired;
- import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
- import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo;
- import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
--import org.springframework.boot.test.context.TestConfiguration;
--import org.springframework.context.annotation.Bean;
- import org.springframework.context.annotation.Import;
- import org.springframework.kafka.test.context.EmbeddedKafka;
- import org.springframework.test.context.TestPropertySource;
- import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
 -import org.springframework.context.annotation.Primary;
 -import org.springframework.test.context.ContextConfiguration;
  
- import java.time.Duration;
- import java.util.*;
- import java.util.concurrent.ExecutionException;
- import java.util.concurrent.ExecutorService;
- import java.util.function.BiConsumer;
- import java.util.stream.Collectors;
 -import java.util.Set;
+ import java.util.function.Consumer;
 +import java.util.stream.IntStream;
  
- import static de.juplo.kafka.ApplicationTests.PARTITIONS;
- import static de.juplo.kafka.ApplicationTests.TOPIC;
- import static org.assertj.core.api.Assertions.*;
- import static org.awaitility.Awaitility.*;
  
- @SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class)
- @TestMethodOrder(MethodOrderer.OrderAnnotation.class)
- @TestPropertySource(
-               properties = {
-                               "sumup.adder.bootstrap-server=${spring.embedded.kafka.brokers}",
-                               "sumup.adder.topic=" + TOPIC,
-                               "sumup.adder.commit-interval=1s",
-                               "spring.mongodb.embedded.version=4.4.13" })
- @EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
- @EnableAutoConfiguration
- @AutoConfigureDataMongo
- @Slf4j
- class ApplicationTests
 -@ContextConfiguration(classes = ApplicationTests.Configuration.class)
 -public class ApplicationTests extends GenericApplicationTests<String, Long>
++public class ApplicationTests extends GenericApplicationTests<String, String>
  {
-       public static final String TOPIC = "FOO";
-       public static final int PARTITIONS = 10;
-       StringSerializer stringSerializer = new StringSerializer();
-       @Autowired
-       Serializer valueSerializer;
-       @Autowired
-       KafkaProducer<String, Bytes> kafkaProducer;
-       @Autowired
-       KafkaConsumer<String, String> kafkaConsumer;
-       @Autowired
-       KafkaConsumer<Bytes, Bytes> offsetConsumer;
-       @Autowired
-       PartitionStatisticsRepository partitionStatisticsRepository;
-       @Autowired
-       ApplicationProperties properties;
-       @Autowired
-       ExecutorService executor;
-       @Autowired
-       PartitionStatisticsRepository repository;
-       @Autowired
-       AdderRebalanceListener adderRebalanceListener;
-       @Autowired
-       AdderRecordHandler adderRecordHandler;
-       EndlessConsumer<String, String> endlessConsumer;
-       Map<TopicPartition, Long> oldOffsets;
-       Map<TopicPartition, Long> newOffsets;
-       Set<ConsumerRecord<String, String>> receivedRecords;
-       /** Tests methods */
-       @Test
-       @Disabled("Vorübergehend deaktivert, bis der Testfall angepasst ist")
-       void commitsCurrentOffsetsOnSuccess() throws ExecutionException, InterruptedException
-       {
-               send100Messages((partition, key, counter) ->
-               {
-                       Bytes value = new Bytes(valueSerializer.serialize(TOPIC, counter));
-                       return new ProducerRecord<>(TOPIC, partition, key, value);
-               });
-               await("100 records received")
-                               .atMost(Duration.ofSeconds(30))
-                               .pollInterval(Duration.ofSeconds(1))
-                               .until(() -> receivedRecords.size() >= 100);
-               await("Offsets committed")
-                               .atMost(Duration.ofSeconds(10))
-                               .pollInterval(Duration.ofSeconds(1))
-                               .untilAsserted(() ->
-                               {
-                                       checkSeenOffsetsForProgress();
-                                       compareToCommitedOffsets(newOffsets);
-                               });
-               assertThatExceptionOfType(IllegalStateException.class)
-                               .isThrownBy(() -> endlessConsumer.exitStatus())
-                               .describedAs("Consumer should still be running");
-       }
-       /** Helper methods for the verification of expectations */
-       void compareToCommitedOffsets(Map<TopicPartition, Long> offsetsToCheck)
-       {
-               doForCurrentOffsets((tp, offset) ->
-               {
-                       Long expected = offsetsToCheck.get(tp) + 1;
-                       log.debug("Checking, if the offset for {} is {}", tp, expected);
-                       assertThat(offset)
-                                       .describedAs("Committed offset corresponds to the offset of the consumer")
-                                       .isEqualTo(expected);
-               });
-       }
-       void checkSeenOffsetsForProgress()
-       {
-               // Be sure, that some messages were consumed...!
-               Set<TopicPartition> withProgress = new HashSet<>();
-               partitions().forEach(tp ->
-               {
-                       Long oldOffset = oldOffsets.get(tp) + 1;
-                       Long newOffset = newOffsets.get(tp) + 1;
-                       if (!oldOffset.equals(newOffset))
-                       {
-                               log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset);
-                               withProgress.add(tp);
-                       }
-               });
-               assertThat(withProgress)
-                               .describedAs("Some offsets must have changed, compared to the old offset-positions")
-                               .isNotEmpty();
-       }
-       /** Helper methods for setting up and running the tests */
-       void seekToEnd()
-       {
-               offsetConsumer.assign(partitions());
-               partitions().forEach(tp ->
-               {
-                       Long offset = offsetConsumer.position(tp);
-                       log.info("New position for {}: {}", tp, offset);
-                       Integer partition = tp.partition();
-                       StateDocument document =
-                                       partitionStatisticsRepository
-                                                       .findById(partition.toString())
-                                                       .orElse(new StateDocument(partition));
-                       document.offset = offset;
-                       partitionStatisticsRepository.save(document);
-               });
-               offsetConsumer.unsubscribe();
-       }
-       void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
-       {
-               partitions().forEach(tp ->
-               {
-                       String partition = Integer.toString(tp.partition());
-                       Optional<Long> offset = partitionStatisticsRepository.findById(partition).map(document -> document.offset);
-                       consumer.accept(tp, offset.orElse(0l));
-               });
-       }
-       List<TopicPartition> partitions()
-       {
-               return
-                               IntStream
-                                               .range(0, PARTITIONS)
-                                               .mapToObj(partition -> new TopicPartition(TOPIC, partition))
-                                               .collect(Collectors.toList());
-       }
-       public interface RecordGenerator<K, V>
-       {
-               public ProducerRecord<String, Bytes> generate(int partition, String key, long counter);
-       }
-       void send100Messages(RecordGenerator recordGenerator)
-       {
-               long i = 0;
-               for (int partition = 0; partition < 10; partition++)
-               {
-                       for (int key = 0; key < 10; key++)
-                       {
-                               ProducerRecord<String, Bytes> record =
-                                               recordGenerator.generate(partition, Integer.toString(partition*10+key%2), ++i);
-                               kafkaProducer.send(record, (metadata, e) ->
-                               {
-                                       if (metadata != null)
-                                       {
-                                               log.debug(
-                                                               "{}|{} - {}={}",
-                                                               metadata.partition(),
-                                                               metadata.offset(),
-                                                               record.key(),
-                                                               record.value());
-                                       }
-                                       else
-                                       {
-                                               log.warn(
-                                                               "Exception for {}={}: {}",
-                                                               record.key(),
-                                                               record.value(),
-                                                               e.toString());
-                                       }
-                               });
-                       }
-               }
-       }
-       @BeforeEach
-       public void init()
-       {
-               seekToEnd();
-               oldOffsets = new HashMap<>();
-               newOffsets = new HashMap<>();
-               receivedRecords = new HashSet<>();
-               doForCurrentOffsets((tp, offset) ->
-               {
-                       oldOffsets.put(tp, offset - 1);
-                       newOffsets.put(tp, offset - 1);
-               });
-               TestRecordHandler<String, String> captureOffsetAndExecuteTestHandler =
-                               new TestRecordHandler<String, String>(adderRecordHandler) {
-                                       @Override
-                                       public void onNewRecord(ConsumerRecord<String, String> record)
-                                       {
-                                               newOffsets.put(
-                                                               new TopicPartition(record.topic(), record.partition()),
-                                                               record.offset());
-                                               receivedRecords.add(record);
-                                       }
-                               };
-               endlessConsumer =
-                               new EndlessConsumer<>(
-                                               executor,
-                                               properties.getClientId(),
-                                               properties.getTopic(),
-                                               kafkaConsumer,
-                                               adderRebalanceListener,
-                                               captureOffsetAndExecuteTestHandler);
-               endlessConsumer.start();
-       }
-       @AfterEach
-       public void deinit()
-       {
-               try
-               {
-                       endlessConsumer.stop();
-               }
-               catch (Exception e)
-               {
-                       log.info("Exception while stopping the consumer: {}", e.toString());
-               }
-       }
-       @TestConfiguration
-       @Import(ApplicationConfiguration.class)
-       public static class Configuration
-       {
-               @Bean
-               Serializer<Long> serializer()
-               {
-                       return new LongSerializer();
-               }
-               @Bean
-               KafkaProducer<String, Bytes> kafkaProducer(ApplicationProperties properties)
-               {
-                       Properties props = new Properties();
-                       props.put("bootstrap.servers", properties.getBootstrapServer());
-                       props.put("linger.ms", 100);
-                       props.put("key.serializer", StringSerializer.class.getName());
-                       props.put("value.serializer", BytesSerializer.class.getName());
-                       return new KafkaProducer<>(props);
-               }
-               @Bean
-               KafkaConsumer<Bytes, Bytes> offsetConsumer(ApplicationProperties properties)
-               {
-                       Properties props = new Properties();
-                       props.put("bootstrap.servers", properties.getBootstrapServer());
-                       props.put("client.id", "OFFSET-CONSUMER");
-                       props.put("enable.auto.commit", false);
-                       props.put("auto.offset.reset", "latest");
-                       props.put("key.deserializer", BytesDeserializer.class.getName());
-                       props.put("value.deserializer", BytesDeserializer.class.getName());
-                       return new KafkaConsumer<>(props);
-               }
-       }
+   public ApplicationTests()
+   {
+     super(
+         new RecordGenerator()
+         {
++          final int[] numbers = { 1, 7, 3, 2, 33, 6, 11 };
++          final String[] dieWilden13 =
++              IntStream
++                  .range(1,14)
++                  .mapToObj(i -> "seeräuber-" + i)
++                  .toArray(i -> new String[i]);
+           final StringSerializer stringSerializer = new StringSerializer();
 -          final LongSerializer longSerializer = new LongSerializer();
++          final Bytes startMessage = new Bytes(stringSerializer.serialize(TOPIC, "START"));
++          final Bytes endMessage = new Bytes(stringSerializer.serialize(TOPIC, "END"));
++
++          int counter = 0;
+           @Override
+           public int generate(
+               boolean poisonPills,
+               boolean logicErrors,
+               Consumer<ProducerRecord<Bytes, Bytes>> messageSender)
+           {
 -            int i = 0;
++            counter = 0;
 -            for (int partition = 0; partition < 10; partition++)
++            for (int i = 0; i < 33; i++)
+             {
 -              for (int key = 0; key < 10; key++)
++              String seeräuber = dieWilden13[i%13];
++              int number = numbers[i%7];
++
++              Bytes key = new Bytes(stringSerializer.serialize(TOPIC, seeräuber));
++
++              send(key, startMessage, logicErrors, messageSender);
++              for (int message = 1; message <= number; message++)
+               {
 -                i++;
++                Bytes value = new Bytes(stringSerializer.serialize(TOPIC, Integer.toString(message)));
++                send(key, value, logicErrors, messageSender);
++              }
++              send(key, endMessage, logicErrors, messageSender);
++            }
 -                Bytes value = new Bytes(longSerializer.serialize(TOPIC, (long)i));
 -                if (i == 77)
 -                {
 -                  if (logicErrors)
 -                  {
 -                    value = new Bytes(longSerializer.serialize(TOPIC, Long.MIN_VALUE));
 -                  }
 -                  if (poisonPills)
 -                  {
 -                    value = new Bytes(stringSerializer.serialize(TOPIC, "BOOM (Poison-Pill)!"));
 -                  }
 -                }
++            return counter;
++          }
 -                ProducerRecord<Bytes, Bytes> record =
 -                    new ProducerRecord<>(
 -                        TOPIC,
 -                        partition,
 -                        new Bytes(stringSerializer.serialize(TOPIC,Integer.toString(partition*10+key%2))),
 -                        value);
++          void send(
++              Bytes key,
++              Bytes value,
++              boolean logicErrors,
++              Consumer<ProducerRecord<Bytes, Bytes>> messageSender)
++          {
++            counter++;
 -                messageSender.accept(record);
++            if (counter == 77)
++            {
++              if (logicErrors)
++              {
++                value = value.equals(startMessage) ? endMessage : startMessage;
+               }
+             }
 -            return i;
++            messageSender.accept(new ProducerRecord<>(TOPIC, key, value));
+           }
 -        });
 -  }
 -
 -  @TestConfiguration
 -  public static class Configuration
 -  {
 -    @Primary
 -    @Bean
 -    public Consumer<ConsumerRecord<String, Long>> consumer()
 -    {
 -      return (record) ->
 -      {
 -        if (record.value() == Long.MIN_VALUE)
 -          throw new RuntimeException("BOOM (Logic-Error)!");
 -      };
 -    }
++          @Override
++          public boolean canGeneratePoisonPill()
++          {
++            return false;
++          }
++        });
+   }
  }
index 0000000,ebad5a8..711a44a
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,369 +1,390 @@@
 -                              "consumer.bootstrap-server=${spring.embedded.kafka.brokers}",
 -                              "consumer.topic=" + TOPIC,
 -                              "consumer.commit-interval=1s" })
+ package de.juplo.kafka;
+ import lombok.extern.slf4j.Slf4j;
+ import org.apache.kafka.clients.consumer.ConsumerRecord;
+ import org.apache.kafka.clients.consumer.KafkaConsumer;
+ import org.apache.kafka.clients.producer.KafkaProducer;
+ import org.apache.kafka.clients.producer.ProducerRecord;
+ import org.apache.kafka.common.TopicPartition;
+ import org.apache.kafka.common.errors.RecordDeserializationException;
+ import org.apache.kafka.common.serialization.*;
+ import org.apache.kafka.common.utils.Bytes;
+ import org.junit.jupiter.api.*;
+ import org.springframework.beans.factory.annotation.Autowired;
++import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
++import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo;
+ import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
+ import org.springframework.boot.test.context.TestConfiguration;
+ import org.springframework.context.annotation.Import;
+ import org.springframework.kafka.test.context.EmbeddedKafka;
+ import org.springframework.test.context.TestPropertySource;
+ import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
+ import java.time.Duration;
+ import java.util.*;
+ import java.util.concurrent.ExecutorService;
+ import java.util.function.BiConsumer;
+ import java.util.function.Consumer;
+ import java.util.stream.Collectors;
+ import java.util.stream.IntStream;
+ import static de.juplo.kafka.GenericApplicationTests.PARTITIONS;
+ import static de.juplo.kafka.GenericApplicationTests.TOPIC;
+ import static org.assertj.core.api.Assertions.*;
+ import static org.awaitility.Awaitility.*;
+ @SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class)
+ @TestPropertySource(
+               properties = {
 -              offsetConsumer.seekToEnd(partitions());
++                              "sumup.adder.bootstrap-server=${spring.embedded.kafka.brokers}",
++                              "sumup.adder.topic=" + TOPIC,
++                              "sumup.adder.commit-interval=1s",
++                              "spring.mongodb.embedded.version=4.4.13" })
+ @EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
++@EnableAutoConfiguration
++@AutoConfigureDataMongo
+ @Slf4j
+ abstract class GenericApplicationTests<K, V>
+ {
+       public static final String TOPIC = "FOO";
+       public static final int PARTITIONS = 10;
+       @Autowired
+       KafkaConsumer<K, V> kafkaConsumer;
+       @Autowired
+       Consumer<ConsumerRecord<K, V>> consumer;
+       @Autowired
+       ApplicationProperties properties;
+       @Autowired
+       ExecutorService executor;
++      @Autowired
++      PartitionStatisticsRepository partitionStatisticsRepository;
++      @Autowired
++      PollIntervalAwareConsumerRebalanceListener rebalanceListener;
++      @Autowired
++      RecordHandler<K, V> recordHandler;
+       KafkaProducer<Bytes, Bytes> testRecordProducer;
+       KafkaConsumer<Bytes, Bytes> offsetConsumer;
+       EndlessConsumer<K, V> endlessConsumer;
+       Map<TopicPartition, Long> oldOffsets;
+       Map<TopicPartition, Long> newOffsets;
+       Set<ConsumerRecord<K, V>> receivedRecords;
+       final RecordGenerator recordGenerator;
+       final Consumer<ProducerRecord<Bytes, Bytes>> messageSender;
+       public GenericApplicationTests(RecordGenerator recordGenerator)
+       {
+               this.recordGenerator = recordGenerator;
+               this.messageSender = (record) -> sendMessage(record);
+       }
+       /** Tests methods */
+       @Test
+       void commitsCurrentOffsetsOnSuccess()
+       {
+               int numberOfGeneratedMessages =
+                               recordGenerator.generate(false, false, messageSender);
+               await(numberOfGeneratedMessages + " records received")
+                               .atMost(Duration.ofSeconds(30))
+                               .pollInterval(Duration.ofSeconds(1))
+                               .until(() -> receivedRecords.size() >= numberOfGeneratedMessages);
+               await("Offsets committed")
+                               .atMost(Duration.ofSeconds(10))
+                               .pollInterval(Duration.ofSeconds(1))
+                               .untilAsserted(() ->
+                               {
+                                       checkSeenOffsetsForProgress();
+                                       compareToCommitedOffsets(newOffsets);
+                               });
+               assertThatExceptionOfType(IllegalStateException.class)
+                               .isThrownBy(() -> endlessConsumer.exitStatus())
+                               .describedAs("Consumer should still be running");
+               recordGenerator.assertBusinessLogic();
+       }
+       @Test
+       @SkipWhenErrorCannotBeGenerated(poisonPill = true)
+       void commitsOffsetOfErrorForReprocessingOnDeserializationError()
+       {
+               int numberOfGeneratedMessages =
+                               recordGenerator.generate(true, false, messageSender);
+               await("Consumer failed")
+                               .atMost(Duration.ofSeconds(30))
+                               .pollInterval(Duration.ofSeconds(1))
+                               .until(() -> !endlessConsumer.running());
+               checkSeenOffsetsForProgress();
+               compareToCommitedOffsets(newOffsets);
+               endlessConsumer.start();
+               await("Consumer failed")
+                               .atMost(Duration.ofSeconds(30))
+                               .pollInterval(Duration.ofSeconds(1))
+                               .until(() -> !endlessConsumer.running());
+               checkSeenOffsetsForProgress();
+               compareToCommitedOffsets(newOffsets);
+               assertThat(receivedRecords.size())
+                               .describedAs("Received not all sent events")
+                               .isLessThan(numberOfGeneratedMessages);
+               assertThatNoException()
+                               .describedAs("Consumer should not be running")
+                               .isThrownBy(() -> endlessConsumer.exitStatus());
+               assertThat(endlessConsumer.exitStatus())
+                               .describedAs("Consumer should have exited abnormally")
+                               .containsInstanceOf(RecordDeserializationException.class);
+               recordGenerator.assertBusinessLogic();
+       }
+       @Test
+       @SkipWhenErrorCannotBeGenerated(logicError = true)
+       void doesNotCommitOffsetsOnLogicError()
+       {
+               int numberOfGeneratedMessages =
+                               recordGenerator.generate(false, true, messageSender);
+               await("Consumer failed")
+                               .atMost(Duration.ofSeconds(30))
+                               .pollInterval(Duration.ofSeconds(1))
+                               .until(() -> !endlessConsumer.running());
+               checkSeenOffsetsForProgress();
+               compareToCommitedOffsets(oldOffsets);
+               endlessConsumer.start();
+               await("Consumer failed")
+                               .atMost(Duration.ofSeconds(30))
+                               .pollInterval(Duration.ofSeconds(1))
+                               .until(() -> !endlessConsumer.running());
+               checkSeenOffsetsForProgress();
+               compareToCommitedOffsets(oldOffsets);
+               assertThat(receivedRecords.size())
+                               .describedAs("Received not all sent events")
+                               .isLessThan(numberOfGeneratedMessages);
+               assertThatNoException()
+                               .describedAs("Consumer should not be running")
+                               .isThrownBy(() -> endlessConsumer.exitStatus());
+               assertThat(endlessConsumer.exitStatus())
+                               .describedAs("Consumer should have exited abnormally")
+                               .containsInstanceOf(RuntimeException.class);
+               recordGenerator.assertBusinessLogic();
+       }
+       /** Helper methods for the verification of expectations */
+       void compareToCommitedOffsets(Map<TopicPartition, Long> offsetsToCheck)
+       {
+               doForCurrentOffsets((tp, offset) ->
+               {
+                       Long expected = offsetsToCheck.get(tp) + 1;
+                       log.debug("Checking, if the offset for {} is {}", tp, expected);
+                       assertThat(offset)
+                                       .describedAs("Committed offset corresponds to the offset of the consumer")
+                                       .isEqualTo(expected);
+               });
+       }
+       void checkSeenOffsetsForProgress()
+       {
+               // Be sure, that some messages were consumed...!
+               Set<TopicPartition> withProgress = new HashSet<>();
+               partitions().forEach(tp ->
+               {
+                       Long oldOffset = oldOffsets.get(tp) + 1;
+                       Long newOffset = newOffsets.get(tp) + 1;
+                       if (!oldOffset.equals(newOffset))
+                       {
+                               log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset);
+                               withProgress.add(tp);
+                       }
+               });
+               assertThat(withProgress)
+                               .describedAs("Some offsets must have changed, compared to the old offset-positions")
+                               .isNotEmpty();
+       }
+       /** Helper methods for setting up and running the tests */
+       void seekToEnd()
+       {
+               offsetConsumer.assign(partitions());
 -                      // seekToEnd() works lazily: it only takes effect on poll()/position()
+               partitions().forEach(tp ->
+               {
 -              // The new positions must be commited!
 -              offsetConsumer.commitSync();
+                       Long offset = offsetConsumer.position(tp);
+                       log.info("New position for {}: {}", tp, offset);
++                      Integer partition = tp.partition();
++                      StateDocument document =
++                                      partitionStatisticsRepository
++                                                      .findById(partition.toString())
++                                                      .orElse(new StateDocument(partition));
++                      document.offset = offset;
++                      partitionStatisticsRepository.save(document);
+               });
 -              offsetConsumer.assign(partitions());
 -              partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp)));
 -              offsetConsumer.unsubscribe();
+               offsetConsumer.unsubscribe();
+       }
+       void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
+       {
 -              Consumer<ConsumerRecord<K, V>> captureOffsetAndExecuteTestHandler =
 -                              record ->
++              partitions().forEach(tp ->
++              {
++                      String partition = Integer.toString(tp.partition());
++                      Optional<Long> offset = partitionStatisticsRepository.findById(partition).map(document -> document.offset);
++                      consumer.accept(tp, offset.orElse(0l));
++              });
+       }
+       List<TopicPartition> partitions()
+       {
+               return
+                               IntStream
+                                               .range(0, PARTITIONS)
+                                               .mapToObj(partition -> new TopicPartition(TOPIC, partition))
+                                               .collect(Collectors.toList());
+       }
+       public interface RecordGenerator
+       {
+               int generate(
+                               boolean poisonPills,
+                               boolean logicErrors,
+                               Consumer<ProducerRecord<Bytes, Bytes>> messageSender);
+               default boolean canGeneratePoisonPill()
+               {
+                       return true;
+               }
+               default boolean canGenerateLogicError()
+               {
+                       return true;
+               }
+               default void assertBusinessLogic()
+               {
+                       log.debug("No business-logic to assert");
+               }
+       }
+       void sendMessage(ProducerRecord<Bytes, Bytes> record)
+       {
+               testRecordProducer.send(record, (metadata, e) ->
+               {
+                       if (metadata != null)
+                       {
+                               log.debug(
+                                               "{}|{} - {}={}",
+                                               metadata.partition(),
+                                               metadata.offset(),
+                                               record.key(),
+                                               record.value());
+                       }
+                       else
+                       {
+                               log.warn(
+                                               "Exception for {}={}: {}",
+                                               record.key(),
+                                               record.value(),
+                                               e.toString());
+                       }
+               });
+       }
+       @BeforeEach
+       public void init()
+       {
+               Properties props;
+               props = new Properties();
+               props.put("bootstrap.servers", properties.getBootstrapServer());
+               props.put("linger.ms", 100);
+               props.put("key.serializer", BytesSerializer.class.getName());
+               props.put("value.serializer", BytesSerializer.class.getName());
+               testRecordProducer = new KafkaProducer<>(props);
+               props = new Properties();
+               props.put("bootstrap.servers", properties.getBootstrapServer());
+               props.put("client.id", "OFFSET-CONSUMER");
+               props.put("group.id", properties.getGroupId());
+               props.put("key.deserializer", BytesDeserializer.class.getName());
+               props.put("value.deserializer", BytesDeserializer.class.getName());
+               offsetConsumer = new KafkaConsumer<>(props);
+               seekToEnd();
+               oldOffsets = new HashMap<>();
+               newOffsets = new HashMap<>();
+               receivedRecords = new HashSet<>();
+               doForCurrentOffsets((tp, offset) ->
+               {
+                       oldOffsets.put(tp, offset - 1);
+                       newOffsets.put(tp, offset - 1);
+               });
 -                                      newOffsets.put(
 -                                                      new TopicPartition(record.topic(), record.partition()),
 -                                                      record.offset());
 -                                      receivedRecords.add(record);
 -                                      consumer.accept(record);
++              TestRecordHandler<K, V> captureOffsetAndExecuteTestHandler =
++                              new TestRecordHandler<K, V>(recordHandler)
+                               {
++                                      @Override
++                                      public void onNewRecord(ConsumerRecord<K, V> record)
++                                      {
++                                              newOffsets.put(
++                                                              new TopicPartition(record.topic(), record.partition()),
++                                                              record.offset());
++                                              receivedRecords.add(record);
++                                      }
+                               };
+               endlessConsumer =
+                               new EndlessConsumer<>(
+                                               executor,
+                                               properties.getClientId(),
+                                               properties.getTopic(),
+                                               kafkaConsumer,
++                                              rebalanceListener,
+                                               captureOffsetAndExecuteTestHandler);
+               endlessConsumer.start();
+       }
+       @AfterEach
+       public void deinit()
+       {
+               try
+               {
+                       endlessConsumer.stop();
+                       testRecordProducer.close();
+                       offsetConsumer.close();
+               }
+               catch (Exception e)
+               {
+                       log.info("Exception while stopping the consumer: {}", e.toString());
+               }
+       }
+       @TestConfiguration
+       @Import(ApplicationConfiguration.class)
+       public static class Configuration
+       {
+       }
+ }