Merge branch 'deserialization' into sumup-adder--ohne--stored-offsets
[demos/kafka/training] / src / test / java / de / juplo / kafka / GenericApplicationTests.java
index 711a44a..e16aea7 100644 (file)
@@ -1,5 +1,6 @@
 package de.juplo.kafka;
 
+import com.mongodb.client.MongoClient;
 import lombok.extern.slf4j.Slf4j;
 import org.apache.kafka.clients.consumer.ConsumerRecord;
 import org.apache.kafka.clients.consumer.KafkaConsumer;
@@ -12,9 +13,11 @@ import org.apache.kafka.common.utils.Bytes;
 import org.junit.jupiter.api.*;
 import org.springframework.beans.factory.annotation.Autowired;
 import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
+import org.springframework.boot.autoconfigure.mongo.MongoProperties;
 import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo;
 import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
 import org.springframework.boot.test.context.TestConfiguration;
+import org.springframework.context.annotation.Bean;
 import org.springframework.context.annotation.Import;
 import org.springframework.kafka.test.context.EmbeddedKafka;
 import org.springframework.test.context.TestPropertySource;
@@ -22,7 +25,6 @@ import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
 
 import java.time.Duration;
 import java.util.*;
-import java.util.concurrent.ExecutorService;
 import java.util.function.BiConsumer;
 import java.util.function.Consumer;
 import java.util.stream.Collectors;
@@ -39,7 +41,7 @@ import static org.awaitility.Awaitility.*;
                properties = {
                                "sumup.adder.bootstrap-server=${spring.embedded.kafka.brokers}",
                                "sumup.adder.topic=" + TOPIC,
-                               "sumup.adder.commit-interval=1s",
+                               "sumup.adder.commit-interval=500ms",
                                "spring.mongodb.embedded.version=4.4.13" })
 @EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
 @EnableAutoConfiguration
@@ -52,26 +54,25 @@ abstract class GenericApplicationTests<K, V>
 
 
        @Autowired
-       KafkaConsumer<K, V> kafkaConsumer;
+       org.apache.kafka.clients.consumer.Consumer<K, V> kafkaConsumer;
        @Autowired
        Consumer<ConsumerRecord<K, V>> consumer;
        @Autowired
-       ApplicationProperties properties;
+       ApplicationProperties applicationProperties;
        @Autowired
-       ExecutorService executor;
+       MongoClient mongoClient;
        @Autowired
-       PartitionStatisticsRepository partitionStatisticsRepository;
+       MongoProperties mongoProperties;
        @Autowired
-       PollIntervalAwareConsumerRebalanceListener rebalanceListener;
+       RebalanceListener rebalanceListener;
        @Autowired
-       RecordHandler<K, V> recordHandler;
+       TestRecordHandler<K, V> recordHandler;
+       @Autowired
+       EndlessConsumer<K, V> endlessConsumer;
 
        KafkaProducer<Bytes, Bytes> testRecordProducer;
        KafkaConsumer<Bytes, Bytes> offsetConsumer;
-       EndlessConsumer<K, V> endlessConsumer;
        Map<TopicPartition, Long> oldOffsets;
-       Map<TopicPartition, Long> newOffsets;
-       Set<ConsumerRecord<K, V>> receivedRecords;
 
 
        final RecordGenerator recordGenerator;
@@ -87,7 +88,7 @@ abstract class GenericApplicationTests<K, V>
        /** Tests methods */
 
        @Test
-       void commitsCurrentOffsetsOnSuccess()
+       void commitsCurrentOffsetsOnSuccess() throws Exception
        {
                int numberOfGeneratedMessages =
                                recordGenerator.generate(false, false, messageSender);
@@ -95,7 +96,7 @@ abstract class GenericApplicationTests<K, V>
                await(numberOfGeneratedMessages + " records received")
                                .atMost(Duration.ofSeconds(30))
                                .pollInterval(Duration.ofSeconds(1))
-                               .until(() -> receivedRecords.size() >= numberOfGeneratedMessages);
+                               .until(() -> recordHandler.receivedRecords.size() >= numberOfGeneratedMessages);
 
                await("Offsets committed")
                                .atMost(Duration.ofSeconds(10))
@@ -103,13 +104,14 @@ abstract class GenericApplicationTests<K, V>
                                .untilAsserted(() ->
                                {
                                        checkSeenOffsetsForProgress();
-                                       compareToCommitedOffsets(newOffsets);
+                                       assertSeenOffsetsEqualCommittedOffsets(recordHandler.seenOffsets);
                                });
 
                assertThatExceptionOfType(IllegalStateException.class)
                                .isThrownBy(() -> endlessConsumer.exitStatus())
                                .describedAs("Consumer should still be running");
 
+               endlessConsumer.stop();
                recordGenerator.assertBusinessLogic();
        }
 
@@ -126,7 +128,7 @@ abstract class GenericApplicationTests<K, V>
                                .until(() -> !endlessConsumer.running());
 
                checkSeenOffsetsForProgress();
-               compareToCommitedOffsets(newOffsets);
+               assertSeenOffsetsEqualCommittedOffsets(recordHandler.seenOffsets);
 
                endlessConsumer.start();
                await("Consumer failed")
@@ -135,8 +137,8 @@ abstract class GenericApplicationTests<K, V>
                                .until(() -> !endlessConsumer.running());
 
                checkSeenOffsetsForProgress();
-               compareToCommitedOffsets(newOffsets);
-               assertThat(receivedRecords.size())
+               assertSeenOffsetsEqualCommittedOffsets(recordHandler.seenOffsets);
+               assertThat(recordHandler.receivedRecords.size())
                                .describedAs("Received not all sent events")
                                .isLessThan(numberOfGeneratedMessages);
 
@@ -163,7 +165,7 @@ abstract class GenericApplicationTests<K, V>
                                .until(() -> !endlessConsumer.running());
 
                checkSeenOffsetsForProgress();
-               compareToCommitedOffsets(oldOffsets);
+               assertSeenOffsetsAreBehindCommittedOffsets(recordHandler.seenOffsets);
 
                endlessConsumer.start();
                await("Consumer failed")
@@ -171,11 +173,7 @@ abstract class GenericApplicationTests<K, V>
                                .pollInterval(Duration.ofSeconds(1))
                                .until(() -> !endlessConsumer.running());
 
-               checkSeenOffsetsForProgress();
-               compareToCommitedOffsets(oldOffsets);
-               assertThat(receivedRecords.size())
-                               .describedAs("Received not all sent events")
-                               .isLessThan(numberOfGeneratedMessages);
+               assertSeenOffsetsAreBehindCommittedOffsets(recordHandler.seenOffsets);
 
                assertThatNoException()
                                .describedAs("Consumer should not be running")
@@ -190,18 +188,37 @@ abstract class GenericApplicationTests<K, V>
 
        /** Helper methods for the verification of expectations */
 
-       void compareToCommitedOffsets(Map<TopicPartition, Long> offsetsToCheck)
+       void assertSeenOffsetsEqualCommittedOffsets(Map<TopicPartition, Long> offsetsToCheck)
        {
                doForCurrentOffsets((tp, offset) ->
                {
                        Long expected = offsetsToCheck.get(tp) + 1;
-                       log.debug("Checking, if the offset for {} is {}", tp, expected);
+                       log.debug("Checking, if the offset {} for {} is exactly {}", offset, tp, expected);
                        assertThat(offset)
                                        .describedAs("Committed offset corresponds to the offset of the consumer")
                                        .isEqualTo(expected);
                });
        }
 
+       void assertSeenOffsetsAreBehindCommittedOffsets(Map<TopicPartition, Long> offsetsToCheck)
+       {
+               List<Boolean> isOffsetBehindSeen = new LinkedList<>();
+
+               doForCurrentOffsets((tp, offset) ->
+               {
+                       Long expected = offsetsToCheck.get(tp) + 1;
+                       log.debug("Checking, if the offset {} for {} is at most {}", offset, tp, expected);
+                       assertThat(offset)
+                                       .describedAs("Committed offset must be at most equal to the offset of the consumer")
+                                       .isLessThanOrEqualTo(expected);
+                       isOffsetBehindSeen.add(offset < expected);
+               });
+
+               assertThat(isOffsetBehindSeen.stream().reduce(false, (result, next) -> result | next))
+                               .describedAs("Committed offsets are behind seen offsets")
+                               .isTrue();
+       }
+
        void checkSeenOffsetsForProgress()
        {
                // Be sure, that some messages were consumed...!
@@ -209,7 +226,7 @@ abstract class GenericApplicationTests<K, V>
                partitions().forEach(tp ->
                {
                        Long oldOffset = oldOffsets.get(tp) + 1;
-                       Long newOffset = newOffsets.get(tp) + 1;
+                       Long newOffset = recordHandler.seenOffsets.get(tp) + 1;
                        if (!oldOffset.equals(newOffset))
                        {
                                log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset);
@@ -227,29 +244,23 @@ abstract class GenericApplicationTests<K, V>
        void seekToEnd()
        {
                offsetConsumer.assign(partitions());
+               offsetConsumer.seekToEnd(partitions());
                partitions().forEach(tp ->
                {
+                       // seekToEnd() works lazily: it only takes effect on poll()/position()
                        Long offset = offsetConsumer.position(tp);
                        log.info("New position for {}: {}", tp, offset);
-                       Integer partition = tp.partition();
-                       StateDocument document =
-                                       partitionStatisticsRepository
-                                                       .findById(partition.toString())
-                                                       .orElse(new StateDocument(partition));
-                       document.offset = offset;
-                       partitionStatisticsRepository.save(document);
                });
+               // The new positions must be commited!
+               offsetConsumer.commitSync();
                offsetConsumer.unsubscribe();
        }
 
        void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
        {
-               partitions().forEach(tp ->
-               {
-                       String partition = Integer.toString(tp.partition());
-                       Optional<Long> offset = partitionStatisticsRepository.findById(partition).map(document -> document.offset);
-                       consumer.accept(tp, offset.orElse(0l));
-               });
+               offsetConsumer.assign(partitions());
+               partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp)));
+               offsetConsumer.unsubscribe();
        }
 
        List<TopicPartition> partitions()
@@ -315,54 +326,33 @@ abstract class GenericApplicationTests<K, V>
        {
                Properties props;
                props = new Properties();
-               props.put("bootstrap.servers", properties.getBootstrapServer());
+               props.put("bootstrap.servers", applicationProperties.getBootstrapServer());
                props.put("linger.ms", 100);
                props.put("key.serializer", BytesSerializer.class.getName());
                props.put("value.serializer", BytesSerializer.class.getName());
                testRecordProducer = new KafkaProducer<>(props);
 
                props = new Properties();
-               props.put("bootstrap.servers", properties.getBootstrapServer());
+               props.put("bootstrap.servers", applicationProperties.getBootstrapServer());
                props.put("client.id", "OFFSET-CONSUMER");
-               props.put("group.id", properties.getGroupId());
+               props.put("group.id", applicationProperties.getGroupId());
                props.put("key.deserializer", BytesDeserializer.class.getName());
                props.put("value.deserializer", BytesDeserializer.class.getName());
                offsetConsumer = new KafkaConsumer<>(props);
 
+               mongoClient.getDatabase(mongoProperties.getDatabase()).drop();
                seekToEnd();
 
                oldOffsets = new HashMap<>();
-               newOffsets = new HashMap<>();
-               receivedRecords = new HashSet<>();
+               recordHandler.seenOffsets = new HashMap<>();
+               recordHandler.receivedRecords = new HashSet<>();
 
                doForCurrentOffsets((tp, offset) ->
                {
                        oldOffsets.put(tp, offset - 1);
-                       newOffsets.put(tp, offset - 1);
+                       recordHandler.seenOffsets.put(tp, offset - 1);
                });
 
-               TestRecordHandler<K, V> captureOffsetAndExecuteTestHandler =
-                               new TestRecordHandler<K, V>(recordHandler)
-                               {
-                                       @Override
-                                       public void onNewRecord(ConsumerRecord<K, V> record)
-                                       {
-                                               newOffsets.put(
-                                                               new TopicPartition(record.topic(), record.partition()),
-                                                               record.offset());
-                                               receivedRecords.add(record);
-                                       }
-                               };
-
-               endlessConsumer =
-                               new EndlessConsumer<>(
-                                               executor,
-                                               properties.getClientId(),
-                                               properties.getTopic(),
-                                               kafkaConsumer,
-                                               rebalanceListener,
-                                               captureOffsetAndExecuteTestHandler);
-
                endlessConsumer.start();
        }
 
@@ -372,6 +362,14 @@ abstract class GenericApplicationTests<K, V>
                try
                {
                        endlessConsumer.stop();
+               }
+               catch (Exception e)
+               {
+                       log.debug("{}", e.toString());
+               }
+
+               try
+               {
                        testRecordProducer.close();
                        offsetConsumer.close();
                }
@@ -386,5 +384,10 @@ abstract class GenericApplicationTests<K, V>
        @Import(ApplicationConfiguration.class)
        public static class Configuration
        {
+               @Bean
+               public RecordHandler recordHandler(RecordHandler applicationRecordHandler)
+               {
+                       return new TestRecordHandler(applicationRecordHandler);
+               }
        }
 }