ROT: Fehler in Test-Logik aufgedeckt
[demos/kafka/training] / src / test / java / de / juplo / kafka / ApplicationTests.java
index 24d3a9e..1e73040 100644 (file)
 package de.juplo.kafka;
 
-import lombok.extern.slf4j.Slf4j;
 import org.apache.kafka.clients.consumer.ConsumerRecord;
-import org.apache.kafka.clients.consumer.KafkaConsumer;
-import org.apache.kafka.clients.producer.KafkaProducer;
 import org.apache.kafka.clients.producer.ProducerRecord;
-import org.apache.kafka.common.TopicPartition;
-import org.apache.kafka.common.errors.RecordDeserializationException;
-import org.apache.kafka.common.serialization.*;
+import org.apache.kafka.common.serialization.LongSerializer;
+import org.apache.kafka.common.serialization.StringSerializer;
 import org.apache.kafka.common.utils.Bytes;
-import org.junit.jupiter.api.*;
-import org.springframework.beans.factory.annotation.Autowired;
-import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
 import org.springframework.boot.test.context.TestConfiguration;
 import org.springframework.context.annotation.Bean;
-import org.springframework.context.annotation.Import;
-import org.springframework.kafka.support.serializer.JsonSerializer;
-import org.springframework.kafka.test.context.EmbeddedKafka;
-import org.springframework.test.context.TestPropertySource;
-import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
+import org.springframework.context.annotation.Primary;
+import org.springframework.test.context.ContextConfiguration;
 
-import java.time.Duration;
-import java.time.LocalDateTime;
-import java.util.*;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.function.BiConsumer;
+import java.util.Set;
 import java.util.function.Consumer;
-import java.util.stream.Collectors;
-import java.util.stream.IntStream;
 
-import static de.juplo.kafka.ApplicationTests.PARTITIONS;
-import static de.juplo.kafka.ApplicationTests.TOPIC;
-import static org.assertj.core.api.Assertions.*;
-import static org.awaitility.Awaitility.*;
 
-
-@SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class)
-@TestMethodOrder(MethodOrderer.OrderAnnotation.class)
-@TestPropertySource(
-               properties = {
-                               "consumer.bootstrap-server=${spring.embedded.kafka.brokers}",
-                               "consumer.topic=" + TOPIC,
-                               "consumer.commit-interval=1s" })
-@EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
-@Slf4j
-class ApplicationTests
+@ContextConfiguration(classes = ApplicationTests.Configuration.class)
+public class ApplicationTests extends GenericApplicationTests<String, Long>
 {
-       public static final String TOPIC = "FOO";
-       public static final int PARTITIONS = 10;
-
-
-       StringSerializer stringSerializer = new StringSerializer();
-
-       @Autowired
-       Serializer valueSerializer;
-       @Autowired
-       KafkaProducer<String, Bytes> kafkaProducer;
-       @Autowired
-       KafkaConsumer<String, ValidMessage> kafkaConsumer;
-       @Autowired
-       KafkaConsumer<Bytes, Bytes> offsetConsumer;
-       @Autowired
-       ApplicationProperties properties;
-       @Autowired
-       ExecutorService executor;
-
-       Consumer<ConsumerRecord<String, ValidMessage>> testHandler;
-       EndlessConsumer<String, ValidMessage> endlessConsumer;
-       Map<TopicPartition, Long> oldOffsets;
-       Map<TopicPartition, Long> newOffsets;
-       Set<ConsumerRecord<String, ValidMessage>> receivedRecords;
-
-
-       /** Tests methods */
-
-       @Test
-       void commitsCurrentOffsetsOnSuccess() throws ExecutionException, InterruptedException
-       {
-               send100Messages((partition, key, counter) ->
-               {
-                       Bytes value;
-                       String type;
-
-                       if (counter%3 != 0)
-                       {
-                               value = serializeClientMessage(key, counter);
-                               type = "message";
-                       }
-                       else {
-                               value = serializeGreeting(key, counter);
-                               type = "greeting";
-                       }
-
-                       return toRecord(partition, key, value, type);
-               });
-
-               await("100 records received")
-                               .atMost(Duration.ofSeconds(30))
-                               .pollInterval(Duration.ofSeconds(1))
-                               .until(() -> receivedRecords.size() >= 100);
-
-               await("Offsets committed")
-                               .atMost(Duration.ofSeconds(10))
-                               .pollInterval(Duration.ofSeconds(1))
-                               .untilAsserted(() ->
-                               {
-                                       checkSeenOffsetsForProgress();
-                                       compareToCommitedOffsets(newOffsets);
-                               });
-
-               assertThatExceptionOfType(IllegalStateException.class)
-                               .isThrownBy(() -> endlessConsumer.exitStatus())
-                               .describedAs("Consumer should still be running");
-       }
-
-       @Test
-       void commitsOffsetOfErrorForReprocessingOnDeserializationError()
-       {
-               send100Messages((partition, key, counter) ->
-               {
-                       Bytes value;
-                       String type;
-
-                       if (counter == 77)
-                       {
-                               value = serializeFooMessage(key, counter);
-                               type = "foo";
-                       }
-                       else
-                       {
-                               if (counter%3 != 0)
-                               {
-                                       value = serializeClientMessage(key, counter);
-                                       type = "message";
-                               }
-                               else {
-                                       value = serializeGreeting(key, counter);
-                                       type = "greeting";
-                               }
-                       }
-
-                       return toRecord(partition, key, value, type);
-               });
-
-               await("Consumer failed")
-                               .atMost(Duration.ofSeconds(30))
-                               .pollInterval(Duration.ofSeconds(1))
-                               .until(() -> !endlessConsumer.running());
-
-               checkSeenOffsetsForProgress();
-               compareToCommitedOffsets(newOffsets);
-
-               endlessConsumer.start();
-               await("Consumer failed")
-                               .atMost(Duration.ofSeconds(30))
-                               .pollInterval(Duration.ofSeconds(1))
-                               .until(() -> !endlessConsumer.running());
-
-               checkSeenOffsetsForProgress();
-               compareToCommitedOffsets(newOffsets);
-               assertThat(receivedRecords.size())
-                               .describedAs("Received not all sent events")
-                               .isLessThan(100);
-
-               assertThatNoException()
-                               .describedAs("Consumer should not be running")
-                               .isThrownBy(() -> endlessConsumer.exitStatus());
-               assertThat(endlessConsumer.exitStatus())
-                               .describedAs("Consumer should have exited abnormally")
-                               .containsInstanceOf(RecordDeserializationException.class);
-       }
-
-
-       /** Helper methods for the verification of expectations */
-
-       void compareToCommitedOffsets(Map<TopicPartition, Long> offsetsToCheck)
-       {
-               doForCurrentOffsets((tp, offset) ->
-               {
-                       Long expected = offsetsToCheck.get(tp) + 1;
-                       log.debug("Checking, if the offset for {} is {}", tp, expected);
-                       assertThat(offset)
-                                       .describedAs("Committed offset corresponds to the offset of the consumer")
-                                       .isEqualTo(expected);
-               });
-       }
-
-       void checkSeenOffsetsForProgress()
-       {
-               // Be sure, that some messages were consumed...!
-               Set<TopicPartition> withProgress = new HashSet<>();
-               partitions().forEach(tp ->
-               {
-                       Long oldOffset = oldOffsets.get(tp) + 1;
-                       Long newOffset = newOffsets.get(tp) + 1;
-                       if (!oldOffset.equals(newOffset))
-                       {
-                               log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset);
-                               withProgress.add(tp);
-                       }
-               });
-               assertThat(withProgress)
-                               .describedAs("Some offsets must have changed, compared to the old offset-positions")
-                               .isNotEmpty();
-       }
-
-
-       /** Helper methods for setting up and running the tests */
-
-       void seekToEnd()
-       {
-               offsetConsumer.assign(partitions());
-               offsetConsumer.seekToEnd(partitions());
-               partitions().forEach(tp ->
-               {
-                       // seekToEnd() works lazily: it only takes effect on poll()/position()
-                       Long offset = offsetConsumer.position(tp);
-                       log.info("New position for {}: {}", tp, offset);
-               });
-               // The new positions must be commited!
-               offsetConsumer.commitSync();
-               offsetConsumer.unsubscribe();
-       }
-
-       void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
-       {
-               offsetConsumer.assign(partitions());
-               partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp)));
-               offsetConsumer.unsubscribe();
-       }
-
-       List<TopicPartition> partitions()
-       {
-               return
-                               IntStream
-                                               .range(0, PARTITIONS)
-                                               .mapToObj(partition -> new TopicPartition(TOPIC, partition))
-                                               .collect(Collectors.toList());
-       }
-
-
-       public interface RecordGenerator<K, V>
-       {
-               public ProducerRecord<String, Bytes> generate(int partition, String key, long counter);
-       }
-
-       void send100Messages(RecordGenerator recordGenerator)
-       {
-               long i = 0;
-
-               for (int partition = 0; partition < 10; partition++)
-               {
-                       for (int key = 0; key < 10; key++)
-                       {
-                               ProducerRecord<String, Bytes> record =
-                                               recordGenerator.generate(partition, Integer.toString(partition*10+key%2), ++i);
-
-                               kafkaProducer.send(record, (metadata, e) ->
-                               {
-                                       if (metadata != null)
-                                       {
-                                               log.debug(
-                                                               "{}|{} - {}={}",
-                                                               metadata.partition(),
-                                                               metadata.offset(),
-                                                               record.key(),
-                                                               record.value());
-                                       }
-                                       else
-                                       {
-                                               log.warn(
-                                                               "Exception for {}={}: {}",
-                                                               record.key(),
-                                                               record.value(),
-                                                               e.toString());
-                                       }
-                               });
-                       }
-               }
-       }
-
-       ProducerRecord<String, Bytes> toRecord(int partition, String key, Bytes value, String type)
-       {
-               ProducerRecord<String, Bytes> record =
-                               new ProducerRecord<>(TOPIC, partition, key, value);
-               record.headers().add("__TypeId__", type.getBytes());
-               return record;
-       }
-
-       Bytes serializeClientMessage(String key, Long value)
-       {
-               TestClientMessage message = new TestClientMessage(key, value.toString());
-               return new Bytes(valueSerializer.serialize(TOPIC, message));
-       }
-
-       Bytes serializeGreeting(String key, Long value)
-       {
-               TestGreeting message = new TestGreeting(key, LocalDateTime.now());
-               return new Bytes(valueSerializer.serialize(TOPIC, message));
-       }
-
-       Bytes serializeFooMessage(String key, Long value)
-       {
-               TestFooMessage message = new TestFooMessage(key, value);
-               return new Bytes(valueSerializer.serialize(TOPIC, message));
-       }
-
-       @BeforeEach
-       public void init()
-       {
-               testHandler = record -> {} ;
-
-               seekToEnd();
-
-               oldOffsets = new HashMap<>();
-               newOffsets = new HashMap<>();
-               receivedRecords = new HashSet<>();
-
-               doForCurrentOffsets((tp, offset) ->
-               {
-                       oldOffsets.put(tp, offset - 1);
-                       newOffsets.put(tp, offset - 1);
-               });
-
-               Consumer<ConsumerRecord<String, ValidMessage>> captureOffsetAndExecuteTestHandler =
-                               record ->
-                               {
-                                       newOffsets.put(
-                                                       new TopicPartition(record.topic(), record.partition()),
-                                                       record.offset());
-                                       receivedRecords.add(record);
-                                       testHandler.accept(record);
-                               };
-
-               endlessConsumer =
-                               new EndlessConsumer<>(
-                                               executor,
-                                               properties.getClientId(),
-                                               properties.getTopic(),
-                                               kafkaConsumer,
-                                               captureOffsetAndExecuteTestHandler);
-
-               endlessConsumer.start();
-       }
-
-       @AfterEach
-       public void deinit()
-       {
-               try
-               {
-                       endlessConsumer.stop();
-               }
-               catch (Exception e)
-               {
-                       log.info("Exception while stopping the consumer: {}", e.toString());
-               }
-       }
-
-
-       @TestConfiguration
-       @Import(ApplicationConfiguration.class)
-       public static class Configuration
-       {
-               @Bean
-               Serializer<ValidMessage> serializer()
-               {
-                       return new JsonSerializer<>();
-               }
-
-               @Bean
-               KafkaProducer<String, Bytes> kafkaProducer(ApplicationProperties properties)
-               {
-                       Properties props = new Properties();
-                       props.put("bootstrap.servers", properties.getBootstrapServer());
-                       props.put("linger.ms", 100);
-                       props.put("key.serializer", StringSerializer.class.getName());
-                       props.put("value.serializer", BytesSerializer.class.getName());
-
-                       return new KafkaProducer<>(props);
-               }
-
-               @Bean
-               KafkaConsumer<Bytes, Bytes> offsetConsumer(ApplicationProperties properties)
-               {
-                       Properties props = new Properties();
-                       props.put("bootstrap.servers", properties.getBootstrapServer());
-                       props.put("client.id", "OFFSET-CONSUMER");
-                       props.put("group.id", properties.getGroupId());
-                       props.put("key.deserializer", BytesDeserializer.class.getName());
-                       props.put("value.deserializer", BytesDeserializer.class.getName());
-
-                       return new KafkaConsumer<>(props);
-               }
-       }
+  public ApplicationTests()
+  {
+    super(
+        new RecordGenerator()
+        {
+          final StringSerializer stringSerializer = new StringSerializer();
+          final LongSerializer longSerializer = new LongSerializer();
+
+
+          @Override
+          public int generate(
+              boolean poisonPills,
+              boolean logicErrors,
+              Consumer<ProducerRecord<Bytes, Bytes>> messageSender)
+          {
+            int i = 0;
+
+            for (int partition = 0; partition < 10; partition++)
+            {
+              for (int key = 0; key < 10000; key++)
+              {
+                i++;
+
+                Bytes value = new Bytes(longSerializer.serialize(TOPIC, (long)i));
+                if (i == 99977)
+                {
+                  if (logicErrors)
+                  {
+                    value = new Bytes(longSerializer.serialize(TOPIC, Long.MIN_VALUE));
+                  }
+                  if (poisonPills)
+                  {
+                    value = new Bytes(stringSerializer.serialize(TOPIC, "BOOM (Poison-Pill)!"));
+                  }
+                }
+
+                ProducerRecord<Bytes, Bytes> record =
+                    new ProducerRecord<>(
+                        TOPIC,
+                        partition,
+                        new Bytes(stringSerializer.serialize(TOPIC,Integer.toString(partition*10+key%2))),
+                        value);
+
+                messageSender.accept(record);
+              }
+            }
+
+            return i;
+          }
+        });
+  }
+
+
+  @TestConfiguration
+  public static class Configuration
+  {
+    @Primary
+    @Bean
+    public Consumer<ConsumerRecord<String, Long>> consumer()
+    {
+      return (record) ->
+      {
+        if (record.value() == Long.MIN_VALUE)
+          throw new RuntimeException("BOOM (Logic-Error)!");
+      };
+    }
+  }
 }