Springify: Gemeinsame DLQ für Poison Pills und Fachlogik-Fehler konfiguriert
[demos/kafka/training] / src / test / java / de / juplo / kafka / ApplicationTests.java
index bf1cdb8..43a4f61 100644 (file)
@@ -6,42 +6,47 @@ import org.apache.kafka.clients.consumer.KafkaConsumer;
 import org.apache.kafka.clients.producer.KafkaProducer;
 import org.apache.kafka.clients.producer.ProducerRecord;
 import org.apache.kafka.common.TopicPartition;
-import org.apache.kafka.common.errors.WakeupException;
-import org.apache.kafka.common.serialization.BytesSerializer;
-import org.apache.kafka.common.serialization.LongSerializer;
-import org.apache.kafka.common.serialization.StringSerializer;
+import org.apache.kafka.common.serialization.*;
 import org.apache.kafka.common.utils.Bytes;
-import org.junit.jupiter.api.MethodOrderer;
-import org.junit.jupiter.api.Order;
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.TestMethodOrder;
+import org.junit.jupiter.api.*;
 import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.boot.autoconfigure.kafka.KafkaAutoConfiguration;
+import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
 import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
 import org.springframework.boot.test.context.TestConfiguration;
 import org.springframework.context.annotation.Bean;
 import org.springframework.context.annotation.Import;
+import org.springframework.context.annotation.Primary;
+import org.springframework.kafka.support.serializer.JsonSerializer;
 import org.springframework.kafka.test.context.EmbeddedKafka;
 import org.springframework.test.context.TestPropertySource;
 import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
 
+import java.time.Duration;
 import java.util.*;
-import java.util.concurrent.ExecutorService;
+import java.util.concurrent.ExecutionException;
 import java.util.function.BiConsumer;
+import java.util.function.BiFunction;
 import java.util.function.Consumer;
-import java.util.function.Function;
 import java.util.stream.Collectors;
 import java.util.stream.IntStream;
 
 import static de.juplo.kafka.ApplicationTests.PARTITIONS;
 import static de.juplo.kafka.ApplicationTests.TOPIC;
-import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.*;
+import static org.awaitility.Awaitility.*;
 
 
-@SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class)
-@TestMethodOrder(MethodOrderer.OrderAnnotation.class)
+@SpringJUnitConfig(
+               initializers = ConfigDataApplicationContextInitializer.class,
+               classes = {
+                               EndlessConsumer.class,
+                               KafkaAutoConfiguration.class,
+                               ApplicationTests.Configuration.class })
 @TestPropertySource(
                properties = {
-                               "consumer.bootstrap-server=${spring.embedded.kafka.brokers}",
+                               "spring.kafka.consumer.bootstrap-servers=${spring.embedded.kafka.brokers}",
+                               "spring.kafka.producer.bootstrap-servers=${spring.embedded.kafka.brokers}",
                                "consumer.topic=" + TOPIC })
 @EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
 @Slf4j
@@ -52,63 +57,131 @@ class ApplicationTests
 
 
        StringSerializer stringSerializer = new StringSerializer();
-       LongSerializer longSerializer = new LongSerializer();
 
+       @Autowired
+       Serializer valueSerializer;
        @Autowired
        KafkaProducer<String, Bytes> kafkaProducer;
        @Autowired
-       KafkaConsumer<String, Long> kafkaConsumer;
+       org.apache.kafka.clients.consumer.Consumer<String, ClientMessage> kafkaConsumer;
+       @Autowired
+       KafkaConsumer<Bytes, Bytes> offsetConsumer;
+       @Autowired
+       ApplicationProperties applicationProperties;
+       @Autowired
+       KafkaProperties kafkaProperties;
        @Autowired
-       ApplicationProperties properties;
+       EndlessConsumer endlessConsumer;
        @Autowired
-       ExecutorService executor;
+       RecordHandler recordHandler;
 
+       Map<TopicPartition, Long> oldOffsets;
+       Map<TopicPartition, Long> newOffsets;
+       Set<ConsumerRecord<String, ClientMessage>> receivedRecords;
+
+
+       /** Tests methods */
 
        @Test
-       @Order(1) // << The poistion pill is not skipped. Hence, this test must run first
-       void commitsCurrentOffsetsOnSuccess()
+       void commitsCurrentOffsetsOnSuccess() throws ExecutionException, InterruptedException
        {
-               send100Messages(i ->  new Bytes(longSerializer.serialize(TOPIC, i)));
+               send100Messages((key, counter) -> serialize(key, counter));
 
-               Map<TopicPartition, Long> oldOffsets = new HashMap<>();
-               doForCurrentOffsets((tp, offset) -> oldOffsets.put(tp, offset -1));
-               Set<ConsumerRecord<String, Long>> received = new HashSet<>();
-               Map<TopicPartition, Long> newOffsets = runEndlessConsumer(record ->
-               {
-                       received.add(record);
-                       if (received.size() == 100)
-                               throw new WakeupException();
-               });
+               await("100 records received")
+                               .atMost(Duration.ofSeconds(30))
+                               .until(() -> receivedRecords.size() == 100);
 
-               Set<TopicPartition> withProgress = new HashSet<>();
-               partitions().forEach(tp ->
-               {
-                       Long oldOffset = oldOffsets.get(tp);
-                       Long newOffset = newOffsets.get(tp);
-                       if (!oldOffset.equals(newOffset))
-                       {
-                               log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset);
-                               withProgress.add(tp);
-                       }
-               });
-               assertThat(withProgress).isNotEmpty().describedAs("Found no partitions with any offset-progress");
+               await("Offsets committed")
+                               .atMost(Duration.ofSeconds(10))
+                               .untilAsserted(() ->
+                               {
+                                       checkSeenOffsetsForProgress();
+                                       compareToCommitedOffsets(newOffsets);
+                               });
 
-               check(newOffsets);
+               assertThat(endlessConsumer.isRunning())
+                               .describedAs("Consumer should still be running")
+                               .isTrue();
        }
 
        @Test
-       @Order(2)
-       void commitsNoOffsetsOnError()
+       void commitsCurrentOffsetsOnDeserializationError()
        {
-               send100Messages(counter ->
+               send100Messages((key, counter) ->
                                counter == 77
                                                ? new Bytes(stringSerializer.serialize(TOPIC, "BOOM!"))
-                                               : new Bytes(longSerializer.serialize(TOPIC, counter)));
+                                               : serialize(key, counter));
 
-               Map<TopicPartition, Long> oldOffsets = new HashMap<>();
-               doForCurrentOffsets((tp, offset) -> oldOffsets.put(tp, offset -1));
-               Map<TopicPartition, Long> newOffsets = runEndlessConsumer((record) -> {});
+               await("99 records received")
+                               .atMost(Duration.ofSeconds(30))
+                               .until(() -> receivedRecords.size() == 99);
 
+               await("Offsets committed")
+                               .atMost(Duration.ofSeconds(10))
+                               .untilAsserted(() ->
+                               {
+                                       // UNSCHÖN:
+                                       // Funktioniert nur, weil nach der Nachrichten, die den
+                                       // Deserialisierungs-Fehler auslöst noch valide Nachrichten
+                                       // gelesen werden.
+                                       // GRUND:
+                                       // Der MessageHandler sieht den Offset der Fehlerhaften
+                                       // Nachricht nicht!
+                                       checkSeenOffsetsForProgress();
+                                       compareToCommitedOffsets(newOffsets);
+                               });
+
+               assertThat(endlessConsumer.isRunning())
+                               .describedAs("Consumer should still be running")
+                               .isTrue();
+       }
+
+       @Test
+       void commitsOffsetOnProgramLogicErrorFoo()
+       {
+               recordHandler.testHandler = (record) ->
+               {
+                       if (Integer.parseInt(record.value().message)%10 ==0)
+                               throw new RuntimeException("BOOM: " + record.value().message + "%10 == 0");
+               };
+
+               send100Messages((key, counter) -> serialize(key, counter));
+
+               await("80 records received")
+                               .atMost(Duration.ofSeconds(30))
+                               .until(() -> receivedRecords.size() == 100);
+
+               await("Offsets committed")
+                               .atMost(Duration.ofSeconds(10))
+                               .untilAsserted(() ->
+                               {
+                                       checkSeenOffsetsForProgress();
+                                       compareToCommitedOffsets(newOffsets);
+                               });
+
+               assertThat(endlessConsumer.isRunning())
+                               .describedAs("Consumer should still be running")
+                               .isTrue();
+       }
+
+
+       /** Helper methods for the verification of expectations */
+
+       void compareToCommitedOffsets(Map<TopicPartition, Long> offsetsToCheck)
+       {
+               doForCurrentOffsets((tp, offset) ->
+               {
+                       Long expected = offsetsToCheck.get(tp) + 1;
+                       log.debug("TEST: Comparing the expected offset of {} for {} to {}", expected, tp, offset);
+                       assertThat(offset)
+                                       .describedAs("Committed offset corresponds to the offset of the consumer")
+                                       .isEqualTo(expected);
+               });
+       }
+
+       void checkSeenOffsetsForProgress()
+       {
+               // Be sure, that some messages were consumed...!
                Set<TopicPartition> withProgress = new HashSet<>();
                partitions().forEach(tp ->
                {
@@ -116,17 +189,37 @@ class ApplicationTests
                        Long newOffset = newOffsets.get(tp);
                        if (!oldOffset.equals(newOffset))
                        {
-                               log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset);
+                               log.debug("TEST: Progress for {}: {} -> {}", tp, oldOffset, newOffset);
                                withProgress.add(tp);
                        }
                });
-               assertThat(withProgress).isNotEmpty().describedAs("Found no partitions with any offset-progress");
+               log.debug("TEST: Offsets with progress: {}", withProgress);
+               assertThat(withProgress)
+                               .describedAs("Some offsets must have changed, compared to the old offset-positions")
+                               .isNotEmpty();
+       }
+
+
+       /** Helper methods for setting up and running the tests */
 
-               check(oldOffsets);
+       void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
+       {
+               offsetConsumer.assign(partitions());
+               partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp)));
+               offsetConsumer.unsubscribe();
        }
 
+       List<TopicPartition> partitions()
+       {
+               return
+                               IntStream
+                                               .range(0, PARTITIONS)
+                                               .mapToObj(partition -> new TopicPartition(TOPIC, partition))
+                                               .collect(Collectors.toList());
+       }
 
-       void send100Messages(Function<Long, Bytes> messageGenerator)
+
+       void send100Messages(BiFunction<Integer, Long, Bytes> messageGenerator)
        {
                long i = 0;
 
@@ -134,7 +227,7 @@ class ApplicationTests
                {
                        for (int key = 0; key < 10; key++)
                        {
-                               Bytes value = messageGenerator.apply(++i);
+                               Bytes value = messageGenerator.apply(key, ++i);
 
                                ProducerRecord<String, Bytes> record =
                                                new ProducerRecord<>(
@@ -143,12 +236,13 @@ class ApplicationTests
                                                                Integer.toString(key%2),
                                                                value);
 
+                               record.headers().add("__TypeId__", "message".getBytes());
                                kafkaProducer.send(record, (metadata, e) ->
                                {
                                        if (metadata != null)
                                        {
                                                log.debug(
-                                                               "{}|{} - {}={}",
+                                                               "TEST: Sending partition={}, offset={} - {}={}",
                                                                metadata.partition(),
                                                                metadata.offset(),
                                                                record.key(),
@@ -157,7 +251,7 @@ class ApplicationTests
                                        else
                                        {
                                                log.warn(
-                                                               "Exception for {}={}: {}",
+                                                               "TEST: Exception for {}={}: {}",
                                                                record.key(),
                                                                record.value(),
                                                                e.toString());
@@ -167,69 +261,111 @@ class ApplicationTests
                }
        }
 
-       Map<TopicPartition, Long> runEndlessConsumer(Consumer<ConsumerRecord<String, Long>> consumer)
+       Bytes serialize(Integer key, Long value)
        {
-               Map<TopicPartition, Long> offsets = new HashMap<>();
-               doForCurrentOffsets((tp, offset) -> offsets.put(tp, offset -1));
-               Consumer<ConsumerRecord<String, Long>> captureOffset =
-                               record ->
-                                               offsets.put(
-                                                               new TopicPartition(record.topic(), record.partition()),
-                                                               record.offset());
-               EndlessConsumer<String, Long> endlessConsumer =
-                               new EndlessConsumer<>(
-                                               executor,
-                                               properties.getClientId(),
-                                               properties.getTopic(),
-                                               kafkaConsumer,
-                                               captureOffset.andThen(consumer));
-
-               endlessConsumer.run();
-
-               return offsets;
+               ClientMessage message = new ClientMessage();
+               message.setClient(key.toString());
+               message.setMessage(value.toString());
+               return new Bytes(valueSerializer.serialize(TOPIC, message));
        }
 
-       List<TopicPartition> partitions()
+
+       @BeforeEach
+       public void init()
        {
-               return
-                               IntStream
-                                               .range(0, PARTITIONS)
-                                               .mapToObj(partition -> new TopicPartition(TOPIC, partition))
-                                               .collect(Collectors.toList());
+               recordHandler.testHandler = (record) -> {};
+
+               oldOffsets = new HashMap<>();
+               newOffsets = new HashMap<>();
+               receivedRecords = new HashSet<>();
+
+               doForCurrentOffsets((tp, offset) ->
+               {
+                       oldOffsets.put(tp, offset - 1);
+                       newOffsets.put(tp, offset - 1);
+               });
+
+               recordHandler.captureOffsets =
+                               record ->
+                               {
+                                       receivedRecords.add(record);
+                                       log.debug("TEST: Processing record #{}: {}", receivedRecords.size(), record.value());
+                                       newOffsets.put(
+                                                       new TopicPartition(record.topic(), record.partition()),
+                                                       record.offset());
+                               };
+
+               endlessConsumer.start();
        }
 
-       void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
+       @AfterEach
+       public void deinit()
        {
-               kafkaConsumer.assign(partitions());
-               partitions().forEach(tp -> consumer.accept(tp, kafkaConsumer.position(tp)));
-               kafkaConsumer.unsubscribe();
+               try
+               {
+                       endlessConsumer.stop();
+               }
+               catch (Exception e)
+               {
+                       log.info("TEST: Exception while stopping the consumer: {}", e.toString());
+               }
        }
 
-       void check(Map<TopicPartition, Long> offsets)
+       public static class RecordHandler implements Consumer<ConsumerRecord<String, ClientMessage>>
        {
-               doForCurrentOffsets((tp, offset) ->
+               Consumer<ConsumerRecord<String, ClientMessage>> captureOffsets;
+               Consumer<ConsumerRecord<String, ClientMessage>> testHandler;
+
+
+               @Override
+               public void accept(ConsumerRecord<String, ClientMessage> record)
                {
-                       Long expected = offsets.get(tp) + 1;
-                       log.debug("Checking, if the offset for {} is {}", tp, expected);
-                       assertThat(offset).isEqualTo(expected);
-               });
+                       captureOffsets
+                                       .andThen(testHandler)
+                                       .accept(record);
+               }
        }
 
-
        @TestConfiguration
        @Import(ApplicationConfiguration.class)
        public static class Configuration
        {
+               @Primary
                @Bean
-               KafkaProducer<String, Bytes> kafkaProducer(ApplicationProperties properties)
+               public Consumer<ConsumerRecord<String, ClientMessage>> testHandler()
+               {
+                       return new RecordHandler();
+               }
+
+               @Bean
+               Serializer<ClientMessage> serializer()
+               {
+                       return new JsonSerializer<>();
+               }
+
+               @Bean
+               KafkaProducer<String, Bytes> kafkaProducer(KafkaProperties properties)
                {
                        Properties props = new Properties();
-                       props.put("bootstrap.servers", properties.getBootstrapServer());
+                       props.put("bootstrap.servers", properties.getConsumer().getBootstrapServers());
                        props.put("linger.ms", 100);
                        props.put("key.serializer", StringSerializer.class.getName());
                        props.put("value.serializer", BytesSerializer.class.getName());
 
                        return new KafkaProducer<>(props);
                }
+
+               @Bean
+               KafkaConsumer<Bytes, Bytes> offsetConsumer(KafkaProperties properties)
+               {
+                       Properties props = new Properties();
+                       props.put("bootstrap.servers", properties.getConsumer().getBootstrapServers());
+                       props.put("client.id", "OFFSET-CONSUMER");
+                       props.put("group.id", properties.getConsumer().getGroupId());
+                       props.put("key.deserializer", BytesDeserializer.class.getName());
+                       props.put("value.deserializer", BytesDeserializer.class.getName());
+
+                       return new KafkaConsumer<>(props);
+               }
        }
 }