Merge der Refaktorisierung des EndlessConsumer (Branch 'stored-state')
[demos/kafka/training] / src / test / java / de / juplo / kafka / ApplicationTests.java
diff --git a/src/test/java/de/juplo/kafka/ApplicationTests.java b/src/test/java/de/juplo/kafka/ApplicationTests.java
new file mode 100644 (file)
index 0000000..4b7ef36
--- /dev/null
@@ -0,0 +1,313 @@
+package de.juplo.kafka;
+
+import lombok.extern.slf4j.Slf4j;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.clients.consumer.KafkaConsumer;
+import org.apache.kafka.clients.producer.KafkaProducer;
+import org.apache.kafka.clients.producer.ProducerRecord;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.RecordDeserializationException;
+import org.apache.kafka.common.serialization.*;
+import org.apache.kafka.common.utils.Bytes;
+import org.junit.jupiter.api.*;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
+import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo;
+import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
+import org.springframework.boot.test.context.TestConfiguration;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Import;
+import org.springframework.kafka.test.context.EmbeddedKafka;
+import org.springframework.test.context.TestPropertySource;
+import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
+
+import java.time.Duration;
+import java.util.*;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.function.BiConsumer;
+import java.util.function.Consumer;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+import static de.juplo.kafka.ApplicationTests.PARTITIONS;
+import static de.juplo.kafka.ApplicationTests.TOPIC;
+import static org.assertj.core.api.Assertions.*;
+import static org.awaitility.Awaitility.*;
+
+
+@SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class)
+@TestMethodOrder(MethodOrderer.OrderAnnotation.class)
+@TestPropertySource(
+               properties = {
+                               "consumer.bootstrap-server=${spring.embedded.kafka.brokers}",
+                               "consumer.topic=" + TOPIC,
+                               "spring.mongodb.embedded.version=4.4.13" })
+@EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
+@EnableAutoConfiguration
+@AutoConfigureDataMongo
+@Slf4j
+class ApplicationTests
+{
+       public static final String TOPIC = "FOO";
+       public static final int PARTITIONS = 10;
+
+
+       StringSerializer stringSerializer = new StringSerializer();
+
+       @Autowired
+       Serializer valueSerializer;
+       @Autowired
+       KafkaProducer<String, Bytes> kafkaProducer;
+       @Autowired
+       KafkaConsumer<String, Long> kafkaConsumer;
+       @Autowired
+       PartitionStatisticsRepository partitionStatisticsRepository;
+       @Autowired
+       ApplicationProperties properties;
+       @Autowired
+       ExecutorService executor;
+       @Autowired
+       PartitionStatisticsRepository repository;
+
+       Consumer<ConsumerRecord<String, Long>> testHandler;
+       EndlessConsumer<String, Long> endlessConsumer;
+       Map<TopicPartition, Long> oldOffsets;
+       Map<TopicPartition, Long> newOffsets;
+       Set<ConsumerRecord<String, Long>> receivedRecords;
+
+
+       /** Tests methods */
+
+       @Test
+       @Order(1) // << The poistion pill is not skipped. Hence, this test must run first
+       void commitsCurrentOffsetsOnSuccess() throws ExecutionException, InterruptedException
+       {
+               send100Messages(i ->  new Bytes(valueSerializer.serialize(TOPIC, i)));
+
+               await("100 records received")
+                               .atMost(Duration.ofSeconds(30))
+                               .until(() -> receivedRecords.size() >= 100);
+
+               await("Offsets committed")
+                               .atMost(Duration.ofSeconds(10))
+                               .untilAsserted(() ->
+                               {
+                                       checkSeenOffsetsForProgress();
+                                       compareToCommitedOffsets(newOffsets);
+                               });
+
+               assertThatExceptionOfType(IllegalStateException.class)
+                               .isThrownBy(() -> endlessConsumer.exitStatus())
+                               .describedAs("Consumer should still be running");
+       }
+
+       @Test
+       @Order(2)
+       void commitsOffsetOfErrorForReprocessingOnError()
+       {
+               send100Messages(counter ->
+                               counter == 77
+                                               ? new Bytes(stringSerializer.serialize(TOPIC, "BOOM!"))
+                                               : new Bytes(valueSerializer.serialize(TOPIC, counter)));
+
+               await("Consumer failed")
+                               .atMost(Duration.ofSeconds(30))
+                               .until(() -> !endlessConsumer.running());
+
+               checkSeenOffsetsForProgress();
+               compareToCommitedOffsets(newOffsets);
+
+               endlessConsumer.start();
+               await("Consumer failed")
+                               .atMost(Duration.ofSeconds(30))
+                               .until(() -> !endlessConsumer.running());
+
+               checkSeenOffsetsForProgress();
+               compareToCommitedOffsets(newOffsets);
+               assertThat(receivedRecords.size())
+                               .describedAs("Received not all sent events")
+                               .isLessThan(100);
+
+               assertThatNoException()
+                               .describedAs("Consumer should not be running")
+                               .isThrownBy(() -> endlessConsumer.exitStatus());
+               assertThat(endlessConsumer.exitStatus())
+                               .describedAs("Consumer should have exited abnormally")
+                               .containsInstanceOf(RecordDeserializationException.class);
+       }
+
+
+       /** Helper methods for the verification of expectations */
+
+       void compareToCommitedOffsets(Map<TopicPartition, Long> offsetsToCheck)
+       {
+               doForCurrentOffsets((tp, offset) ->
+               {
+                       Long expected = offsetsToCheck.get(tp) + 1;
+                       log.debug("Checking, if the offset for {} is {}", tp, expected);
+                       assertThat(offset)
+                                       .describedAs("Committed offset corresponds to the offset of the consumer")
+                                       .isEqualTo(expected);
+               });
+       }
+
+       void checkSeenOffsetsForProgress()
+       {
+               // Be sure, that some messages were consumed...!
+               Set<TopicPartition> withProgress = new HashSet<>();
+               partitions().forEach(tp ->
+               {
+                       Long oldOffset = oldOffsets.get(tp);
+                       Long newOffset = newOffsets.get(tp);
+                       if (!oldOffset.equals(newOffset))
+                       {
+                               log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset);
+                               withProgress.add(tp);
+                       }
+               });
+               assertThat(withProgress)
+                               .describedAs("Some offsets must have changed, compared to the old offset-positions")
+                               .isNotEmpty();
+       }
+
+
+       /** Helper methods for setting up and running the tests */
+
+       void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
+       {
+               partitions().forEach(tp ->
+               {
+                       String partition = Integer.toString(tp.partition());
+                       Optional<Long> offset = partitionStatisticsRepository.findById(partition).map(document -> document.offset);
+                       consumer.accept(tp, offset.orElse(0l));
+               });
+       }
+
+       List<TopicPartition> partitions()
+       {
+               return
+                               IntStream
+                                               .range(0, PARTITIONS)
+                                               .mapToObj(partition -> new TopicPartition(TOPIC, partition))
+                                               .collect(Collectors.toList());
+       }
+
+
+       void send100Messages(Function<Long, Bytes> messageGenerator)
+       {
+               long i = 0;
+
+               for (int partition = 0; partition < 10; partition++)
+               {
+                       for (int key = 0; key < 10; key++)
+                       {
+                               Bytes value = messageGenerator.apply(++i);
+
+                               ProducerRecord<String, Bytes> record =
+                                               new ProducerRecord<>(
+                                                               TOPIC,
+                                                               partition,
+                                                               Integer.toString(key%2),
+                                                               value);
+
+                               kafkaProducer.send(record, (metadata, e) ->
+                               {
+                                       if (metadata != null)
+                                       {
+                                               log.debug(
+                                                               "{}|{} - {}={}",
+                                                               metadata.partition(),
+                                                               metadata.offset(),
+                                                               record.key(),
+                                                               record.value());
+                                       }
+                                       else
+                                       {
+                                               log.warn(
+                                                               "Exception for {}={}: {}",
+                                                               record.key(),
+                                                               record.value(),
+                                                               e.toString());
+                                       }
+                               });
+                       }
+               }
+       }
+
+
+       @BeforeEach
+       public void init()
+       {
+               testHandler = record -> {} ;
+
+               oldOffsets = new HashMap<>();
+               newOffsets = new HashMap<>();
+               receivedRecords = new HashSet<>();
+
+               doForCurrentOffsets((tp, offset) ->
+               {
+                       oldOffsets.put(tp, offset - 1);
+                       newOffsets.put(tp, offset - 1);
+               });
+
+               Consumer<ConsumerRecord<String, Long>> captureOffsetAndExecuteTestHandler =
+                               record ->
+                               {
+                                       newOffsets.put(
+                                                       new TopicPartition(record.topic(), record.partition()),
+                                                       record.offset());
+                                       receivedRecords.add(record);
+                                       testHandler.accept(record);
+                               };
+
+               endlessConsumer =
+                               new EndlessConsumer<>(
+                                               executor,
+                                               repository,
+                                               properties.getClientId(),
+                                               properties.getTopic(),
+                                               kafkaConsumer,
+                                               captureOffsetAndExecuteTestHandler);
+
+               endlessConsumer.start();
+       }
+
+       @AfterEach
+       public void deinit()
+       {
+               try
+               {
+                       endlessConsumer.stop();
+               }
+               catch (Exception e)
+               {
+                       log.info("Exception while stopping the consumer: {}", e.toString());
+               }
+       }
+
+
+       @TestConfiguration
+       @Import(ApplicationConfiguration.class)
+       public static class Configuration
+       {
+               @Bean
+               Serializer<Long> serializer()
+               {
+                       return new LongSerializer();
+               }
+
+               @Bean
+               KafkaProducer<String, Bytes> kafkaProducer(ApplicationProperties properties)
+               {
+                       Properties props = new Properties();
+                       props.put("bootstrap.servers", properties.getBootstrapServer());
+                       props.put("linger.ms", 100);
+                       props.put("key.serializer", StringSerializer.class.getName());
+                       props.put("value.serializer", BytesSerializer.class.getName());
+
+                       return new KafkaProducer<>(props);
+               }
+       }
+}