--- /dev/null
 -import java.time.Clock;
+ package de.juplo.kafka;
+ 
+ import org.apache.kafka.clients.consumer.KafkaConsumer;
+ import org.apache.kafka.common.serialization.LongDeserializer;
+ import org.apache.kafka.common.serialization.StringDeserializer;
+ import org.springframework.boot.context.properties.EnableConfigurationProperties;
+ import org.springframework.context.annotation.Bean;
+ import org.springframework.context.annotation.Configuration;
+ 
 -      PartitionStatisticsRepository repository,
+ import java.util.Properties;
+ import java.util.concurrent.ExecutorService;
+ import java.util.concurrent.Executors;
+ 
+ 
+ @Configuration
+ @EnableConfigurationProperties(ApplicationProperties.class)
+ public class ApplicationConfiguration
+ {
+   @Bean
+   public KeyCountingRecordHandler keyCountingRecordHandler()
+   {
+     return new KeyCountingRecordHandler();
+   }
+ 
+   @Bean
+   public KeyCountingRebalanceListener keyCountingRebalanceListener(
+       KeyCountingRecordHandler keyCountingRecordHandler,
 -        repository,
 -        properties.getClientId(),
 -        Clock.systemDefaultZone(),
 -        properties.getCommitInterval());
+       ApplicationProperties properties)
+   {
+     return new KeyCountingRebalanceListener(
+         keyCountingRecordHandler,
++        properties.getClientId());
+   }
+ 
+   @Bean
+   public EndlessConsumer<String, Long> endlessConsumer(
+       KafkaConsumer<String, Long> kafkaConsumer,
+       ExecutorService executor,
+       KeyCountingRebalanceListener keyCountingRebalanceListener,
+       KeyCountingRecordHandler keyCountingRecordHandler,
+       ApplicationProperties properties)
+   {
+     return
+         new EndlessConsumer<>(
+             executor,
+             properties.getClientId(),
+             properties.getTopic(),
+             kafkaConsumer,
+             keyCountingRebalanceListener,
+             keyCountingRecordHandler);
+   }
+ 
+   @Bean
+   public ExecutorService executor()
+   {
+     return Executors.newSingleThreadExecutor();
+   }
+ 
+   @Bean(destroyMethod = "close")
+   public KafkaConsumer<String, Long> kafkaConsumer(ApplicationProperties properties)
+   {
+     Properties props = new Properties();
+ 
+     props.put("bootstrap.servers", properties.getBootstrapServer());
+     props.put("partition.assignment.strategy", "org.apache.kafka.clients.consumer.CooperativeStickyAssignor");
+     props.put("group.id", properties.getGroupId());
+     props.put("client.id", properties.getClientId());
+     props.put("auto.offset.reset", properties.getAutoOffsetReset());
+     props.put("auto.commit.interval.ms", (int)properties.getCommitInterval().toMillis());
+     props.put("metadata.max.age.ms", "1000");
+     props.put("key.deserializer", StringDeserializer.class.getName());
+     props.put("value.deserializer", LongDeserializer.class.getName());
+ 
+     return new KafkaConsumer<>(props);
+   }
+ }
 
  
  
  @Slf4j
- public class EndlessConsumer implements Runnable
+ @RequiredArgsConstructor
+ public class EndlessConsumer<K, V> implements Runnable
  {
    private final ExecutorService executor;
-   private final String bootstrapServer;
-   private final String groupId;
    private final String id;
    private final String topic;
-   private final String autoOffsetReset;
+   private final Consumer<K, V> consumer;
 -  private final PollIntervalAwareConsumerRebalanceListener pollIntervalAwareRebalanceListener;
++  private final ConsumerRebalanceListener consumerRebalanceListener;
+   private final RecordHandler<K, V> handler;
  
    private final Lock lock = new ReentrantLock();
    private final Condition condition = lock.newCondition();
    {
      try
      {
-       Properties props = new Properties();
-       props.put("bootstrap.servers", bootstrapServer);
-       props.put("group.id", groupId);
-       props.put("client.id", id);
-       props.put("auto.offset.reset", autoOffsetReset);
-       props.put("metadata.max.age.ms", "1000");
-       props.put("key.deserializer", StringDeserializer.class.getName());
-       props.put("value.deserializer", StringDeserializer.class.getName());
- 
-       this.consumer = new KafkaConsumer<>(props);
- 
        log.info("{} - Subscribing to topic {}", id, topic);
-       consumer.subscribe(Arrays.asList(topic), new ConsumerRebalanceListener()
-       {
-         @Override
-         public void onPartitionsRevoked(Collection<TopicPartition> partitions)
-         {
-           partitions.forEach(tp ->
-           {
-             log.info("{} - removing partition: {}", id, tp);
-             Map<String, Integer> removed = seen.remove(tp.partition());
-             for (String key : removed.keySet())
-             {
-               log.info(
-                   "{} - Seen {} messages for partition={}|key={}",
-                   id,
-                   removed.get(key),
-                   tp.partition(),
-                   key);
-             }
-           });
-         }
- 
-         @Override
-         public void onPartitionsAssigned(Collection<TopicPartition> partitions)
-         {
-           partitions.forEach(tp ->
-           {
-             log.info("{} - adding partition: {}", id, tp);
-             seen.put(tp.partition(), new HashMap<>());
-           });
-         }
-       });
 -      consumer.subscribe(Arrays.asList(topic), pollIntervalAwareRebalanceListener);
++      consumer.subscribe(Arrays.asList(topic), consumerRebalanceListener);
  
        while (true)
        {
                record.value()
            );
  
-           Integer partition = record.partition();
-           String key = record.key() == null ? "NULL" : record.key();
-           Map<String, Integer> byKey = seen.get(partition);
+           handler.accept(record);
  
-           if (!byKey.containsKey(key))
-             byKey.put(key, 0);
- 
-           int seenByKey = byKey.get(key);
-           seenByKey++;
-           byKey.put(key, seenByKey);
+           consumed++;
          }
 -
 -        pollIntervalAwareRebalanceListener.beforeNextPoll();
        }
      }
      catch(WakeupException e)
 
--- /dev/null
 -import java.time.Clock;
 -import java.time.Duration;
 -import java.time.Instant;
+ package de.juplo.kafka;
+ 
+ import lombok.RequiredArgsConstructor;
+ import lombok.extern.slf4j.Slf4j;
++import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
+ import org.apache.kafka.common.TopicPartition;
+ 
 -public class KeyCountingRebalanceListener implements PollIntervalAwareConsumerRebalanceListener
+ import java.util.Collection;
++import java.util.HashMap;
+ import java.util.Map;
+ 
+ 
+ @RequiredArgsConstructor
+ @Slf4j
 -  private final PartitionStatisticsRepository repository;
++public class KeyCountingRebalanceListener implements ConsumerRebalanceListener
+ {
+   private final KeyCountingRecordHandler handler;
 -  private final Clock clock;
 -  private final Duration commitInterval;
 -
 -  private Instant lastCommit = Instant.EPOCH;
+   private final String id;
 -      StatisticsDocument document =
 -          repository
 -              .findById(Integer.toString(partition))
 -              .orElse(new StatisticsDocument(partition));
 -      handler.addPartition(partition, document.statistics);
+ 
+   @Override
+   public void onPartitionsAssigned(Collection<TopicPartition> partitions)
+   {
+     partitions.forEach(tp ->
+     {
+       Integer partition = tp.partition();
+       log.info("{} - adding partition: {}", id, partition);
 -      repository.save(new StatisticsDocument(partition, removed));
++      handler.addPartition(partition, new HashMap<>());
+     });
+   }
+ 
+   @Override
+   public void onPartitionsRevoked(Collection<TopicPartition> partitions)
+   {
+     partitions.forEach(tp ->
+     {
+       Integer partition = tp.partition();
+       log.info("{} - removing partition: {}", id, partition);
+       Map<String, Long> removed = handler.removePartition(partition);
+       for (String key : removed.keySet())
+       {
+         log.info(
+             "{} - Seen {} messages for partition={}|key={}",
+             id,
+             removed.get(key),
+             partition,
+             key);
+       }
 -
 -
 -  @Override
 -  public void beforeNextPoll()
 -  {
 -    if (lastCommit.plus(commitInterval).isBefore(clock.instant()))
 -    {
 -      log.debug("Storing data, last commit: {}", lastCommit);
 -      handler.getSeen().forEach((partiton, statistics) -> repository.save(
 -          new StatisticsDocument(
 -              partiton,
 -              statistics)));
 -      lastCommit = clock.instant();
 -    }
 -  }
+     });
+   }
+ }
 
--- /dev/null
 -import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo;
+ package de.juplo.kafka;
+ 
+ import lombok.extern.slf4j.Slf4j;
+ import org.apache.kafka.clients.consumer.ConsumerRecord;
+ import org.apache.kafka.clients.consumer.KafkaConsumer;
+ import org.apache.kafka.clients.producer.KafkaProducer;
+ import org.apache.kafka.clients.producer.ProducerRecord;
+ import org.apache.kafka.common.TopicPartition;
+ import org.apache.kafka.common.errors.RecordDeserializationException;
+ import org.apache.kafka.common.serialization.*;
+ import org.apache.kafka.common.utils.Bytes;
+ import org.junit.jupiter.api.*;
+ import org.springframework.beans.factory.annotation.Autowired;
+ import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
 -                              "consumer.commit-interval=1s",
 -                              "spring.mongodb.embedded.version=4.4.13" })
+ import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
+ import org.springframework.boot.test.context.TestConfiguration;
+ import org.springframework.context.annotation.Bean;
+ import org.springframework.context.annotation.Import;
+ import org.springframework.kafka.test.context.EmbeddedKafka;
+ import org.springframework.test.context.TestPropertySource;
+ import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
+ 
+ import java.time.Duration;
+ import java.util.*;
+ import java.util.concurrent.ExecutionException;
+ import java.util.concurrent.ExecutorService;
+ import java.util.function.BiConsumer;
+ import java.util.stream.Collectors;
+ import java.util.stream.IntStream;
+ 
+ import static de.juplo.kafka.ApplicationTests.PARTITIONS;
+ import static de.juplo.kafka.ApplicationTests.TOPIC;
+ import static org.assertj.core.api.Assertions.*;
+ import static org.awaitility.Awaitility.*;
+ 
+ 
+ @SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class)
+ @TestMethodOrder(MethodOrderer.OrderAnnotation.class)
+ @TestPropertySource(
+               properties = {
+                               "consumer.bootstrap-server=${spring.embedded.kafka.brokers}",
+                               "consumer.topic=" + TOPIC,
 -@AutoConfigureDataMongo
++                              "consumer.commit-interval=1s" })
+ @EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
+ @EnableAutoConfiguration
+ @Slf4j
+ class ApplicationTests
+ {
+       public static final String TOPIC = "FOO";
+       public static final int PARTITIONS = 10;
+ 
+ 
+       StringSerializer stringSerializer = new StringSerializer();
+ 
+       @Autowired
+       Serializer valueSerializer;
+       @Autowired
+       KafkaProducer<String, Bytes> kafkaProducer;
+       @Autowired
+       KafkaConsumer<String, Long> kafkaConsumer;
+       @Autowired
+       KafkaConsumer<Bytes, Bytes> offsetConsumer;
+       @Autowired
+       ApplicationProperties properties;
+       @Autowired
+       ExecutorService executor;
+       @Autowired
+       KeyCountingRebalanceListener keyCountingRebalanceListener;
+       @Autowired
+       KeyCountingRecordHandler keyCountingRecordHandler;
+ 
+       EndlessConsumer<String, Long> endlessConsumer;
+       Map<TopicPartition, Long> oldOffsets;
+       Map<TopicPartition, Long> newOffsets;
+       Set<ConsumerRecord<String, Long>> receivedRecords;
+ 
+ 
+       /** Tests methods */
+ 
+       @Test
+       void commitsCurrentOffsetsOnSuccess() throws ExecutionException, InterruptedException
+       {
+               send100Messages((partition, key, counter) ->
+               {
+                       Bytes value = new Bytes(valueSerializer.serialize(TOPIC, counter));
+                       return new ProducerRecord<>(TOPIC, partition, key, value);
+               });
+ 
+               await("100 records received")
+                               .atMost(Duration.ofSeconds(30))
+                               .pollInterval(Duration.ofSeconds(1))
+                               .until(() -> receivedRecords.size() >= 100);
+ 
+               await("Offsets committed")
+                               .atMost(Duration.ofSeconds(10))
+                               .pollInterval(Duration.ofSeconds(1))
+                               .untilAsserted(() ->
+                               {
+                                       checkSeenOffsetsForProgress();
+                                       compareToCommitedOffsets(newOffsets);
+                               });
+ 
+               assertThatExceptionOfType(IllegalStateException.class)
+                               .isThrownBy(() -> endlessConsumer.exitStatus())
+                               .describedAs("Consumer should still be running");
+       }
+ 
+       @Test
+       void commitsOffsetOfErrorForReprocessingOnDeserializationError()
+       {
+               send100Messages((partition, key, counter) ->
+               {
+                       Bytes value = counter == 77
+                                       ? new Bytes(stringSerializer.serialize(TOPIC, "BOOM!"))
+                                       : new Bytes(valueSerializer.serialize(TOPIC, counter));
+                       return new ProducerRecord<>(TOPIC, partition, key, value);
+               });
+ 
+               await("Consumer failed")
+                               .atMost(Duration.ofSeconds(30))
+                               .pollInterval(Duration.ofSeconds(1))
+                               .until(() -> !endlessConsumer.running());
+ 
+               checkSeenOffsetsForProgress();
+               compareToCommitedOffsets(newOffsets);
+ 
+               endlessConsumer.start();
+               await("Consumer failed")
+                               .atMost(Duration.ofSeconds(30))
+                               .pollInterval(Duration.ofSeconds(1))
+                               .until(() -> !endlessConsumer.running());
+ 
+               checkSeenOffsetsForProgress();
+               compareToCommitedOffsets(newOffsets);
+               assertThat(receivedRecords.size())
+                               .describedAs("Received not all sent events")
+                               .isLessThan(100);
+ 
+               assertThatNoException()
+                               .describedAs("Consumer should not be running")
+                               .isThrownBy(() -> endlessConsumer.exitStatus());
+               assertThat(endlessConsumer.exitStatus())
+                               .describedAs("Consumer should have exited abnormally")
+                               .containsInstanceOf(RecordDeserializationException.class);
+       }
+ 
+ 
+       /** Helper methods for the verification of expectations */
+ 
+       void compareToCommitedOffsets(Map<TopicPartition, Long> offsetsToCheck)
+       {
+               doForCurrentOffsets((tp, offset) ->
+               {
+                       Long expected = offsetsToCheck.get(tp) + 1;
+                       log.debug("Checking, if the offset for {} is {}", tp, expected);
+                       assertThat(offset)
+                                       .describedAs("Committed offset corresponds to the offset of the consumer")
+                                       .isEqualTo(expected);
+               });
+       }
+ 
+       void checkSeenOffsetsForProgress()
+       {
+               // Be sure, that some messages were consumed...!
+               Set<TopicPartition> withProgress = new HashSet<>();
+               partitions().forEach(tp ->
+               {
+                       Long oldOffset = oldOffsets.get(tp) + 1;
+                       Long newOffset = newOffsets.get(tp) + 1;
+                       if (!oldOffset.equals(newOffset))
+                       {
+                               log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset);
+                               withProgress.add(tp);
+                       }
+               });
+               assertThat(withProgress)
+                               .describedAs("Some offsets must have changed, compared to the old offset-positions")
+                               .isNotEmpty();
+       }
+ 
+ 
+       /** Helper methods for setting up and running the tests */
+ 
+       void seekToEnd()
+       {
+               offsetConsumer.assign(partitions());
+               offsetConsumer.seekToEnd(partitions());
+               partitions().forEach(tp ->
+               {
+                       // seekToEnd() works lazily: it only takes effect on poll()/position()
+                       Long offset = offsetConsumer.position(tp);
+                       log.info("New position for {}: {}", tp, offset);
+               });
+               // The new positions must be commited!
+               offsetConsumer.commitSync();
+               offsetConsumer.unsubscribe();
+       }
+ 
+       void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
+       {
+               offsetConsumer.assign(partitions());
+               partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp)));
+               offsetConsumer.unsubscribe();
+       }
+ 
+       List<TopicPartition> partitions()
+       {
+               return
+                               IntStream
+                                               .range(0, PARTITIONS)
+                                               .mapToObj(partition -> new TopicPartition(TOPIC, partition))
+                                               .collect(Collectors.toList());
+       }
+ 
+ 
+       public interface RecordGenerator<K, V>
+       {
+               public ProducerRecord<String, Bytes> generate(int partition, String key, long counter);
+       }
+ 
+       void send100Messages(RecordGenerator recordGenerator)
+       {
+               long i = 0;
+ 
+               for (int partition = 0; partition < 10; partition++)
+               {
+                       for (int key = 0; key < 10; key++)
+                       {
+                               ProducerRecord<String, Bytes> record =
+                                               recordGenerator.generate(partition, Integer.toString(partition*10+key%2), ++i);
+ 
+                               kafkaProducer.send(record, (metadata, e) ->
+                               {
+                                       if (metadata != null)
+                                       {
+                                               log.debug(
+                                                               "{}|{} - {}={}",
+                                                               metadata.partition(),
+                                                               metadata.offset(),
+                                                               record.key(),
+                                                               record.value());
+                                       }
+                                       else
+                                       {
+                                               log.warn(
+                                                               "Exception for {}={}: {}",
+                                                               record.key(),
+                                                               record.value(),
+                                                               e.toString());
+                                       }
+                               });
+                       }
+               }
+       }
+ 
+ 
+       @BeforeEach
+       public void init()
+       {
+               seekToEnd();
+ 
+               oldOffsets = new HashMap<>();
+               newOffsets = new HashMap<>();
+               receivedRecords = new HashSet<>();
+ 
+               doForCurrentOffsets((tp, offset) ->
+               {
+                       oldOffsets.put(tp, offset - 1);
+                       newOffsets.put(tp, offset - 1);
+               });
+ 
+               TestRecordHandler<String, Long> captureOffsetAndExecuteTestHandler =
+                               new TestRecordHandler<String, Long>(keyCountingRecordHandler) {
+                                       @Override
+                                       public void onNewRecord(ConsumerRecord<String, Long> record)
+                                       {
+                                               newOffsets.put(
+                                                               new TopicPartition(record.topic(), record.partition()),
+                                                               record.offset());
+                                               receivedRecords.add(record);
+                                       }
+                               };
+ 
+               endlessConsumer =
+                               new EndlessConsumer<>(
+                                               executor,
+                                               properties.getClientId(),
+                                               properties.getTopic(),
+                                               kafkaConsumer,
+                                               keyCountingRebalanceListener,
+                                               captureOffsetAndExecuteTestHandler);
+ 
+               endlessConsumer.start();
+       }
+ 
+       @AfterEach
+       public void deinit()
+       {
+               try
+               {
+                       endlessConsumer.stop();
+               }
+               catch (Exception e)
+               {
+                       log.info("Exception while stopping the consumer: {}", e.toString());
+               }
+       }
+ 
+ 
+       @TestConfiguration
+       @Import(ApplicationConfiguration.class)
+       public static class Configuration
+       {
+               @Bean
+               Serializer<Long> serializer()
+               {
+                       return new LongSerializer();
+               }
+ 
+               @Bean
+               KafkaProducer<String, Bytes> kafkaProducer(ApplicationProperties properties)
+               {
+                       Properties props = new Properties();
+                       props.put("bootstrap.servers", properties.getBootstrapServer());
+                       props.put("linger.ms", 100);
+                       props.put("key.serializer", StringSerializer.class.getName());
+                       props.put("value.serializer", BytesSerializer.class.getName());
+ 
+                       return new KafkaProducer<>(props);
+               }
+ 
+               @Bean
+               KafkaConsumer<Bytes, Bytes> offsetConsumer(ApplicationProperties properties)
+               {
+                       Properties props = new Properties();
+                       props.put("bootstrap.servers", properties.getBootstrapServer());
+                       props.put("client.id", "OFFSET-CONSUMER");
+                       props.put("group.id", properties.getGroupId());
+                       props.put("key.deserializer", BytesDeserializer.class.getName());
+                       props.put("value.deserializer", BytesDeserializer.class.getName());
+ 
+                       return new KafkaConsumer<>(props);
+               }
+       }
+ }