Wordcount-Implementierung mit Kafka-Boardmitteln und MongoDB als Storage
[demos/kafka/training] / src / test / java / de / juplo / kafka / ApplicationTests.java
index 6f58180..aa6dd4d 100644 (file)
@@ -6,13 +6,12 @@ import org.apache.kafka.clients.consumer.KafkaConsumer;
 import org.apache.kafka.clients.producer.KafkaProducer;
 import org.apache.kafka.clients.producer.ProducerRecord;
 import org.apache.kafka.common.TopicPartition;
-import org.apache.kafka.common.serialization.BytesDeserializer;
-import org.apache.kafka.common.serialization.BytesSerializer;
-import org.apache.kafka.common.serialization.LongSerializer;
-import org.apache.kafka.common.serialization.StringSerializer;
+import org.apache.kafka.common.serialization.*;
 import org.apache.kafka.common.utils.Bytes;
 import org.junit.jupiter.api.*;
 import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
+import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo;
 import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
 import org.springframework.boot.test.context.TestConfiguration;
 import org.springframework.context.annotation.Bean;
@@ -21,6 +20,7 @@ import org.springframework.kafka.test.context.EmbeddedKafka;
 import org.springframework.test.context.TestPropertySource;
 import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
 
+import java.time.Clock;
 import java.time.Duration;
 import java.util.*;
 import java.util.concurrent.ExecutionException;
@@ -33,7 +33,7 @@ import java.util.stream.IntStream;
 
 import static de.juplo.kafka.ApplicationTests.PARTITIONS;
 import static de.juplo.kafka.ApplicationTests.TOPIC;
-import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.*;
 import static org.awaitility.Awaitility.*;
 
 
@@ -42,8 +42,12 @@ import static org.awaitility.Awaitility.*;
 @TestPropertySource(
                properties = {
                                "consumer.bootstrap-server=${spring.embedded.kafka.brokers}",
-                               "consumer.topic=" + TOPIC })
+                               "consumer.topic=" + TOPIC,
+                               "consumer.commit-interval=1s",
+                               "spring.mongodb.embedded.version=4.4.13" })
 @EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
+@EnableAutoConfiguration
+@AutoConfigureDataMongo
 @Slf4j
 class ApplicationTests
 {
@@ -52,39 +56,39 @@ class ApplicationTests
 
 
        StringSerializer stringSerializer = new StringSerializer();
-       LongSerializer longSerializer = new LongSerializer();
 
+       @Autowired
+       Serializer valueSerializer;
        @Autowired
        KafkaProducer<String, Bytes> kafkaProducer;
        @Autowired
-       KafkaConsumer<String, Long> kafkaConsumer;
+       KafkaConsumer<String, String> kafkaConsumer;
        @Autowired
-       KafkaConsumer<Bytes, Bytes> offsetConsumer;
+       PartitionStatisticsRepository partitionStatisticsRepository;
        @Autowired
        ApplicationProperties properties;
        @Autowired
        ExecutorService executor;
+       @Autowired
+       PartitionStatisticsRepository repository;
 
-       Consumer<ConsumerRecord<String, Long>> testHandler;
-       EndlessConsumer<String, Long> endlessConsumer;
+       Consumer<ConsumerRecord<String, String>> testHandler;
+       EndlessConsumer endlessConsumer;
        Map<TopicPartition, Long> oldOffsets;
        Map<TopicPartition, Long> newOffsets;
+       Set<ConsumerRecord<String, String>> receivedRecords;
 
 
        /** Tests methods */
 
        @Test
-       @Order(1) // << The poistion pill is not skipped. Hence, this test must run first
        void commitsCurrentOffsetsOnSuccess() throws ExecutionException, InterruptedException
        {
-               send100Messages(i ->  new Bytes(longSerializer.serialize(TOPIC, i)));
-
-               Set<ConsumerRecord<String, Long>> received = new HashSet<>();
-               testHandler = record -> received.add(record);
+               send100Messages(i ->  new Bytes(valueSerializer.serialize(TOPIC, i)));
 
                await("100 records received")
                                .atMost(Duration.ofSeconds(30))
-                               .until(() -> received.size() >= 100);
+                               .until(() -> receivedRecords.size() >= 100);
 
                await("Offsets committed")
                                .atMost(Duration.ofSeconds(10))
@@ -93,31 +97,10 @@ class ApplicationTests
                                        checkSeenOffsetsForProgress();
                                        compareToCommitedOffsets(newOffsets);
                                });
-       }
-
-       @Test
-       @Order(2)
-       void commitsOffsetOfErrorForReprocessingOnError()
-       {
-               send100Messages(counter ->
-                               counter == 77
-                                               ? new Bytes(stringSerializer.serialize(TOPIC, "BOOM!"))
-                                               : new Bytes(longSerializer.serialize(TOPIC, counter)));
 
-               await("Consumer failed")
-                               .atMost(Duration.ofSeconds(30))
-                               .until(() -> !endlessConsumer.running());
-
-               checkSeenOffsetsForProgress();
-               compareToCommitedOffsets(newOffsets);
-
-               endlessConsumer.start();
-               await("Consumer failed")
-                               .atMost(Duration.ofSeconds(30))
-                               .until(() -> !endlessConsumer.running());
-
-               checkSeenOffsetsForProgress();
-               compareToCommitedOffsets(newOffsets);
+               assertThatExceptionOfType(IllegalStateException.class)
+                               .isThrownBy(() -> endlessConsumer.exitStatus())
+                               .describedAs("Consumer should still be running");
        }
 
 
@@ -129,7 +112,9 @@ class ApplicationTests
                {
                        Long expected = offsetsToCheck.get(tp) + 1;
                        log.debug("Checking, if the offset for {} is {}", tp, expected);
-                       assertThat(offset).isEqualTo(expected);
+                       assertThat(offset)
+                                       .describedAs("Committed offset corresponds to the offset of the consumer")
+                                       .isEqualTo(expected);
                });
        }
 
@@ -147,7 +132,9 @@ class ApplicationTests
                                withProgress.add(tp);
                        }
                });
-               assertThat(withProgress).isNotEmpty().describedAs("Found no partitions with any offset-progress");
+               assertThat(withProgress)
+                               .describedAs("Some offsets must have changed, compared to the old offset-positions")
+                               .isNotEmpty();
        }
 
 
@@ -155,9 +142,12 @@ class ApplicationTests
 
        void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
        {
-               offsetConsumer.assign(partitions());
-               partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp)));
-               offsetConsumer.unsubscribe();
+               partitions().forEach(tp ->
+               {
+                       String partition = Integer.toString(tp.partition());
+                       Optional<Long> offset = partitionStatisticsRepository.findById(partition).map(document -> document.offset);
+                       consumer.accept(tp, offset.orElse(0l));
+               });
        }
 
        List<TopicPartition> partitions()
@@ -219,6 +209,7 @@ class ApplicationTests
 
                oldOffsets = new HashMap<>();
                newOffsets = new HashMap<>();
+               receivedRecords = new HashSet<>();
 
                doForCurrentOffsets((tp, offset) ->
                {
@@ -226,22 +217,25 @@ class ApplicationTests
                        newOffsets.put(tp, offset - 1);
                });
 
-               Consumer<ConsumerRecord<String, Long>> captureOffsetAndExecuteTestHandler =
+               Consumer<ConsumerRecord<String, String>> captureOffsetAndExecuteTestHandler =
                                record ->
                                {
                                        newOffsets.put(
                                                        new TopicPartition(record.topic(), record.partition()),
                                                        record.offset());
+                                       receivedRecords.add(record);
                                        testHandler.accept(record);
                                };
 
                endlessConsumer =
-                               new EndlessConsumer<>(
+                               new EndlessConsumer(
                                                executor,
+                                               repository,
                                                properties.getClientId(),
                                                properties.getTopic(),
-                                               kafkaConsumer,
-                                               captureOffsetAndExecuteTestHandler);
+                                               Clock.systemDefaultZone(),
+                                               properties.getCommitInterval(),
+                                               kafkaConsumer);
 
                endlessConsumer.start();
        }
@@ -264,6 +258,12 @@ class ApplicationTests
        @Import(ApplicationConfiguration.class)
        public static class Configuration
        {
+               @Bean
+               Serializer<Long> serializer()
+               {
+                       return new LongSerializer();
+               }
+
                @Bean
                KafkaProducer<String, Bytes> kafkaProducer(ApplicationProperties properties)
                {
@@ -275,18 +275,5 @@ class ApplicationTests
 
                        return new KafkaProducer<>(props);
                }
-
-               @Bean
-               KafkaConsumer<Bytes, Bytes> offsetConsumer(ApplicationProperties properties)
-               {
-                       Properties props = new Properties();
-                       props.put("bootstrap.servers", properties.getBootstrapServer());
-                       props.put("client.id", "OFFSET-CONSUMER");
-                       props.put("group.id", properties.getGroupId());
-                       props.put("key.deserializer", BytesDeserializer.class.getName());
-                       props.put("value.deserializer", BytesDeserializer.class.getName());
-
-                       return new KafkaConsumer<>(props);
-               }
        }
 }