Merge branch 'deserialization' into springified-consumer--serialization
[demos/kafka/training] / src / test / java / de / juplo / kafka / ApplicationTests.java
index bf38b05..6c25bcd 100644 (file)
@@ -6,10 +6,8 @@ import org.apache.kafka.clients.consumer.KafkaConsumer;
 import org.apache.kafka.clients.producer.KafkaProducer;
 import org.apache.kafka.clients.producer.ProducerRecord;
 import org.apache.kafka.common.TopicPartition;
-import org.apache.kafka.common.serialization.BytesDeserializer;
-import org.apache.kafka.common.serialization.BytesSerializer;
-import org.apache.kafka.common.serialization.LongSerializer;
-import org.apache.kafka.common.serialization.StringSerializer;
+import org.apache.kafka.common.errors.RecordDeserializationException;
+import org.apache.kafka.common.serialization.*;
 import org.apache.kafka.common.utils.Bytes;
 import org.junit.jupiter.api.*;
 import org.springframework.beans.factory.annotation.Autowired;
@@ -17,6 +15,7 @@ import org.springframework.boot.test.context.ConfigDataApplicationContextInitial
 import org.springframework.boot.test.context.TestConfiguration;
 import org.springframework.context.annotation.Bean;
 import org.springframework.context.annotation.Import;
+import org.springframework.kafka.support.serializer.JsonSerializer;
 import org.springframework.kafka.test.context.EmbeddedKafka;
 import org.springframework.test.context.TestPropertySource;
 import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
@@ -26,14 +25,14 @@ import java.util.*;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.function.BiConsumer;
+import java.util.function.BiFunction;
 import java.util.function.Consumer;
-import java.util.function.Function;
 import java.util.stream.Collectors;
 import java.util.stream.IntStream;
 
 import static de.juplo.kafka.ApplicationTests.PARTITIONS;
 import static de.juplo.kafka.ApplicationTests.TOPIC;
-import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.*;
 import static org.awaitility.Awaitility.*;
 
 
@@ -52,12 +51,13 @@ class ApplicationTests
 
 
        StringSerializer stringSerializer = new StringSerializer();
-       LongSerializer longSerializer = new LongSerializer();
 
+       @Autowired
+       Serializer valueSerializer;
        @Autowired
        KafkaProducer<String, Bytes> kafkaProducer;
        @Autowired
-       KafkaConsumer<String, Long> kafkaConsumer;
+       KafkaConsumer<String, ClientMessage> kafkaConsumer;
        @Autowired
        KafkaConsumer<Bytes, Bytes> offsetConsumer;
        @Autowired
@@ -65,50 +65,128 @@ class ApplicationTests
        @Autowired
        ExecutorService executor;
 
-       Consumer<ConsumerRecord<String, Long>> testHandler;
-       EndlessConsumer<String, Long> endlessConsumer;
+       Consumer<ConsumerRecord<String, ClientMessage>> testHandler;
+       EndlessConsumer<String, ClientMessage> endlessConsumer;
        Map<TopicPartition, Long> oldOffsets;
        Map<TopicPartition, Long> newOffsets;
+       Set<ConsumerRecord<String, ClientMessage>> receivedRecords;
+
 
+       /** Tests methods */
 
        @Test
        @Order(1) // << The poistion pill is not skipped. Hence, this test must run first
        void commitsCurrentOffsetsOnSuccess() throws ExecutionException, InterruptedException
        {
-               send100Messages(i ->  new Bytes(longSerializer.serialize(TOPIC, i)));
-
-               Set<ConsumerRecord<String, Long>> received = new HashSet<>();
-               testHandler = record -> received.add(record);
+               send100Messages((key, counter) -> serialize(key, counter));
 
                await("100 records received")
                                .atMost(Duration.ofSeconds(30))
-                               .until(() -> received.size() >= 100);
+                               .until(() -> receivedRecords.size() >= 100);
 
-               endlessConsumer.stop();
+               await("Offsets committed")
+                               .atMost(Duration.ofSeconds(10))
+                               .untilAsserted(() ->
+                               {
+                                       checkSeenOffsetsForProgress();
+                                       compareToCommitedOffsets(newOffsets);
+                               });
 
-               checkSeenOffsetsForProgress();
-               compareToCommitedOffsets(newOffsets);
+               assertThatExceptionOfType(IllegalStateException.class)
+                               .isThrownBy(() -> endlessConsumer.exitStatus())
+                               .describedAs("Consumer should still be running");
        }
 
        @Test
        @Order(2)
-       void commitsNoOffsetsOnError()
+       void commitsOffsetOfErrorForReprocessingOnError()
        {
-               send100Messages(counter ->
+               send100Messages((key, counter) ->
                                counter == 77
                                                ? new Bytes(stringSerializer.serialize(TOPIC, "BOOM!"))
-                                               : new Bytes(longSerializer.serialize(TOPIC, counter)));
+                                               : serialize(key, counter));
+
+               await("Consumer failed")
+                               .atMost(Duration.ofSeconds(30))
+                               .until(() -> !endlessConsumer.running());
+
+               checkSeenOffsetsForProgress();
+               compareToCommitedOffsets(newOffsets);
 
+               endlessConsumer.start();
                await("Consumer failed")
                                .atMost(Duration.ofSeconds(30))
                                .until(() -> !endlessConsumer.running());
 
                checkSeenOffsetsForProgress();
-               compareToCommitedOffsets(oldOffsets);
+               compareToCommitedOffsets(newOffsets);
+               assertThat(receivedRecords.size())
+                               .describedAs("Received not all sent events")
+                               .isLessThan(100);
+
+               assertThatNoException()
+                               .describedAs("Consumer should not be running")
+                               .isThrownBy(() -> endlessConsumer.exitStatus());
+               assertThat(endlessConsumer.exitStatus())
+                               .describedAs("Consumer should have exited abnormally")
+                               .containsInstanceOf(RecordDeserializationException.class);
+       }
+
+
+       /** Helper methods for the verification of expectations */
+
+       void compareToCommitedOffsets(Map<TopicPartition, Long> offsetsToCheck)
+       {
+               doForCurrentOffsets((tp, offset) ->
+               {
+                       Long expected = offsetsToCheck.get(tp) + 1;
+                       log.debug("Checking, if the offset for {} is {}", tp, expected);
+                       assertThat(offset)
+                                       .describedAs("Committed offset corresponds to the offset of the consumer")
+                                       .isEqualTo(expected);
+               });
+       }
+
+       void checkSeenOffsetsForProgress()
+       {
+               // Be sure, that some messages were consumed...!
+               Set<TopicPartition> withProgress = new HashSet<>();
+               partitions().forEach(tp ->
+               {
+                       Long oldOffset = oldOffsets.get(tp);
+                       Long newOffset = newOffsets.get(tp);
+                       if (!oldOffset.equals(newOffset))
+                       {
+                               log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset);
+                               withProgress.add(tp);
+                       }
+               });
+               assertThat(withProgress)
+                               .describedAs("Some offsets must have changed, compared to the old offset-positions")
+                               .isNotEmpty();
+       }
+
+
+       /** Helper methods for setting up and running the tests */
+
+       void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
+       {
+               offsetConsumer.assign(partitions());
+               partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp)));
+               offsetConsumer.unsubscribe();
+       }
+
+       List<TopicPartition> partitions()
+       {
+               return
+                               IntStream
+                                               .range(0, PARTITIONS)
+                                               .mapToObj(partition -> new TopicPartition(TOPIC, partition))
+                                               .collect(Collectors.toList());
        }
 
 
-       void send100Messages(Function<Long, Bytes> messageGenerator)
+       void send100Messages(BiFunction<Integer, Long, Bytes> messageGenerator)
        {
                long i = 0;
 
@@ -116,7 +194,7 @@ class ApplicationTests
                {
                        for (int key = 0; key < 10; key++)
                        {
-                               Bytes value = messageGenerator.apply(++i);
+                               Bytes value = messageGenerator.apply(key, ++i);
 
                                ProducerRecord<String, Bytes> record =
                                                new ProducerRecord<>(
@@ -125,6 +203,7 @@ class ApplicationTests
                                                                Integer.toString(key%2),
                                                                value);
 
+                               record.headers().add("__TypeId__", "message".getBytes());
                                kafkaProducer.send(record, (metadata, e) ->
                                {
                                        if (metadata != null)
@@ -149,6 +228,15 @@ class ApplicationTests
                }
        }
 
+       Bytes serialize(Integer key, Long value)
+       {
+               ClientMessage message = new ClientMessage();
+               message.setClient(key.toString());
+               message.setMessage(value.toString());
+               return new Bytes(valueSerializer.serialize(TOPIC, message));
+       }
+
+
        @BeforeEach
        public void init()
        {
@@ -156,6 +244,7 @@ class ApplicationTests
 
                oldOffsets = new HashMap<>();
                newOffsets = new HashMap<>();
+               receivedRecords = new HashSet<>();
 
                doForCurrentOffsets((tp, offset) ->
                {
@@ -163,12 +252,13 @@ class ApplicationTests
                        newOffsets.put(tp, offset - 1);
                });
 
-               Consumer<ConsumerRecord<String, Long>> captureOffsetAndExecuteTestHandler =
+               Consumer<ConsumerRecord<String, ClientMessage>> captureOffsetAndExecuteTestHandler =
                                record ->
                                {
                                        newOffsets.put(
                                                        new TopicPartition(record.topic(), record.partition()),
                                                        record.offset());
+                                       receivedRecords.add(record);
                                        testHandler.accept(record);
                                };
 
@@ -183,50 +273,6 @@ class ApplicationTests
                endlessConsumer.start();
        }
 
-       List<TopicPartition> partitions()
-       {
-               return
-                               IntStream
-                                               .range(0, PARTITIONS)
-                                               .mapToObj(partition -> new TopicPartition(TOPIC, partition))
-                                               .collect(Collectors.toList());
-       }
-
-       void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
-       {
-               offsetConsumer.assign(partitions());
-               partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp)));
-               offsetConsumer.unsubscribe();
-       }
-
-       void checkSeenOffsetsForProgress()
-       {
-               // Be sure, that some messages were consumed...!
-               Set<TopicPartition> withProgress = new HashSet<>();
-               partitions().forEach(tp ->
-               {
-                       Long oldOffset = oldOffsets.get(tp);
-                       Long newOffset = newOffsets.get(tp);
-                       if (!oldOffset.equals(newOffset))
-                       {
-                               log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset);
-                               withProgress.add(tp);
-                       }
-               });
-               assertThat(withProgress).isNotEmpty().describedAs("Found no partitions with any offset-progress");
-       }
-
-       void compareToCommitedOffsets(Map<TopicPartition, Long> offsetsToCheck)
-       {
-               doForCurrentOffsets((tp, offset) ->
-               {
-                       Long expected = offsetsToCheck.get(tp) + 1;
-                       log.debug("Checking, if the offset for {} is {}", tp, expected);
-                       assertThat(offset).isEqualTo(expected);
-               });
-       }
-
-
        @AfterEach
        public void deinit()
        {
@@ -240,10 +286,17 @@ class ApplicationTests
                }
        }
 
+
        @TestConfiguration
        @Import(ApplicationConfiguration.class)
        public static class Configuration
        {
+               @Bean
+               Serializer<ClientMessage> serializer()
+               {
+                       return new JsonSerializer<>();
+               }
+
                @Bean
                KafkaProducer<String, Bytes> kafkaProducer(ApplicationProperties properties)
                {