Tests aus gemerged springified-consumer--serialization -> deserialization
authorKai Moritz <kai@juplo.de>
Sun, 14 Aug 2022 10:06:01 +0000 (12:06 +0200)
committerKai Moritz <kai@juplo.de>
Sun, 14 Aug 2022 10:55:12 +0000 (12:55 +0200)
* Es wurde nur der hinzugefügte Test übernommen.
* Der hinzugefügte Test wurde an das von Spring-Kafka abweichende
  Verhalten bei einem Logik-Fehler angepasst: Kafka führt nicht automatisch
  Seeks oder einene Commit durch. Da `EndlessConsumer` bei einem
  Logik-Fehler explizit ein `unsubscribe()` durchführt, wird kein
  Offset-Commit durchgefürt, so dass die alten Offset-Positionen gültig
  bleiben.
* Der Test wurde entsprechend umbenannt.
* `RecordGenerator` wurde um einen weiteren Integer-Set erweitert, über
  den die Indizes der zu erzeugenden Logik-Fehler gesetzt werden können.
* Der hinzugefügte Test wurde auf die überarbeitete Methode zur Erzeugung
  der Test-Nachrichten umgestellt.
* `ApplicationTest` wurde so ergänzt, dass der für den hinzugefügten Test
  benötigte Logik-Fehler erzeugt wird.

13 files changed:
README.sh
docker-compose.yml
pom.xml
src/main/java/de/juplo/kafka/ApplicationConfiguration.java
src/main/java/de/juplo/kafka/ClientMessage.java [deleted file]
src/main/java/de/juplo/kafka/Greeting.java [deleted file]
src/main/java/de/juplo/kafka/ValidMessage.java [deleted file]
src/test/java/de/juplo/kafka/ApplicationTest.java [new file with mode: 0644]
src/test/java/de/juplo/kafka/ApplicationTests.java [deleted file]
src/test/java/de/juplo/kafka/GenericApplicationTest.java [new file with mode: 0644]
src/test/java/de/juplo/kafka/TestClientMessage.java [deleted file]
src/test/java/de/juplo/kafka/TestFooMessage.java [deleted file]
src/test/java/de/juplo/kafka/TestGreeting.java [deleted file]

index 72f0c60..2a1e5d8 100755 (executable)
--- a/README.sh
+++ b/README.sh
@@ -25,22 +25,44 @@ fi
 echo "Waiting for the Kafka-Cluster to become ready..."
 docker-compose exec cli cub kafka-ready -b kafka:9092 1 60 > /dev/null 2>&1 || exit 1
 docker-compose up setup
-docker-compose up -d producer consumer
+docker-compose up -d
+
+while ! [[ $(http 0:8081/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "Waiting for consumer-1..."; sleep 1; done
+while ! [[ $(http 0:8082/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "Waiting for consumer-2..."; sleep 1; done
+while ! [[ $(http 0:8083/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "Waiting for consumer-3..."; sleep 1; done
+while ! [[ $(http 0:8084/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "Waiting for consumer-4..."; sleep 1; done
+while ! [[ $(http 0:8085/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "Waiting for consumer-5..."; sleep 1; done
+
 sleep 5
+
 docker-compose exec -T cli bash << 'EOF'
 echo "Writing poison pill into topic test..."
 # tag::poisonpill[]
 echo 'BOOM!' | kafkacat -P -b kafka:9092 -t test
 # end::poisonpill[]
 EOF
-while [[ $(http 0:8081/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "Consumer is still running..."; sleep 1; done
+
+while [[ $(http 0:8081/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "consumer-1 is still running..."; sleep 1; done
+while [[ $(http 0:8082/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "consumer-2 is still running..."; sleep 1; done
+while [[ $(http 0:8083/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "consumer-3 is still running..."; sleep 1; done
+while [[ $(http 0:8084/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "consumer-4 is still running..."; sleep 1; done
+while [[ $(http 0:8085/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "consumer-5 is still running..."; sleep 1; done
+
 http -v :8081/actuator/health
-echo "Restarting consumer"
+echo "Restarting consumer-1"
 http -v post :8081/start
-while ! [[ $(http 0:8081/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "Waiting for consumer..."; sleep 1; done
-while [[ $(http 0:8081/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "Consumer is still running..."; sleep 1; done
+
+echo "Waiting for consumer-1 to come up"
+while ! [[ $(http 0:8081/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "Waiting for consumer-1..."; sleep 1; done
+http -v :8081/actuator/health
+
+echo "Waiting for consumer-1 to crash"
+while [[ $(http 0:8081/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "consumer-1 is still running..."; sleep 1; done
 http -v :8081/actuator/health
-http -v post :8081/actuator/shutdown
+
 docker-compose stop producer
-docker-compose ps
-docker-compose logs --tail=100 consumer
+docker-compose logs --tail=10 consumer-1
+docker-compose logs --tail=10 consumer-2
+docker-compose logs --tail=10 consumer-3
+docker-compose logs --tail=10 consumer-4
+docker-compose logs --tail=10 consumer-5
index 159f9cb..d36e851 100644 (file)
@@ -37,7 +37,7 @@ services:
     command: sleep infinity
 
   producer:
-    image: juplo/endless-producer:1.0-SNAPSHOT
+    image: juplo/endless-long-producer:1.0-SNAPSHOT
     ports:
       - 8080:8080
     environment:
@@ -48,13 +48,47 @@ services:
       producer.throttle-ms: 200
 
 
-  consumer:
+  consumer-1:
     image: juplo/endless-consumer:1.0-SNAPSHOT
     ports:
       - 8081:8080
     environment:
       server.port: 8080
       consumer.bootstrap-server: kafka:9092
-      consumer.client-id: my-group
-      consumer.client-id: consumer
-      consumer.topic: test
+      consumer.client-id: consumer-1
+
+  consumer-2:
+    image: juplo/endless-consumer:1.0-SNAPSHOT
+    ports:
+      - 8082:8080
+    environment:
+      server.port: 8080
+      consumer.bootstrap-server: kafka:9092
+      consumer.client-id: consumer-2
+
+  consumer-3:
+    image: juplo/endless-consumer:1.0-SNAPSHOT
+    ports:
+      - 8083:8080
+    environment:
+      server.port: 8080
+      consumer.bootstrap-server: kafka:9092
+      consumer.client-id: consumer-3
+
+  consumer-4:
+    image: juplo/endless-consumer:1.0-SNAPSHOT
+    ports:
+      - 8084:8080
+    environment:
+      server.port: 8080
+      consumer.bootstrap-server: kafka:9092
+      consumer.client-id: consumer-4
+
+  consumer-5:
+    image: juplo/endless-consumer:1.0-SNAPSHOT
+    ports:
+      - 8085:8080
+    environment:
+      server.port: 8080
+      consumer.bootstrap-server: kafka:9092
+      consumer.client-id: consumer-5
diff --git a/pom.xml b/pom.xml
index 0889d23..6fd5d5f 100644 (file)
--- a/pom.xml
+++ b/pom.xml
   <version>1.0-SNAPSHOT</version>
   <name>Endless Consumer: a Simple Consumer-Group that reads and prints the topic and counts the received messages for each key by topic</name>
 
+  <properties>
+    <java.version>11</java.version>
+  </properties>
+
   <dependencies>
     <dependency>
       <groupId>org.springframework.boot</groupId>
       <groupId>org.apache.kafka</groupId>
       <artifactId>kafka-clients</artifactId>
     </dependency>
-    <dependency>
-      <groupId>org.springframework.kafka</groupId>
-      <artifactId>spring-kafka</artifactId>
-    </dependency>
     <dependency>
       <groupId>org.projectlombok</groupId>
       <artifactId>lombok</artifactId>
index 9fc0c70..766740b 100644 (file)
@@ -2,11 +2,11 @@ package de.juplo.kafka;
 
 import org.apache.kafka.clients.consumer.ConsumerRecord;
 import org.apache.kafka.clients.consumer.KafkaConsumer;
+import org.apache.kafka.common.serialization.LongDeserializer;
 import org.apache.kafka.common.serialization.StringDeserializer;
 import org.springframework.boot.context.properties.EnableConfigurationProperties;
 import org.springframework.context.annotation.Bean;
 import org.springframework.context.annotation.Configuration;
-import org.springframework.kafka.support.serializer.JsonDeserializer;
 
 import java.util.Properties;
 import java.util.concurrent.ExecutorService;
@@ -19,7 +19,7 @@ import java.util.function.Consumer;
 public class ApplicationConfiguration
 {
   @Bean
-  public Consumer<ConsumerRecord<String, ValidMessage>> consumer()
+  public Consumer<ConsumerRecord<String, Long>> consumer()
   {
     return (record) ->
     {
@@ -28,10 +28,10 @@ public class ApplicationConfiguration
   }
 
   @Bean
-  public EndlessConsumer<String, ValidMessage> endlessConsumer(
-      KafkaConsumer<String, ValidMessage> kafkaConsumer,
+  public EndlessConsumer<String, Long> endlessConsumer(
+      KafkaConsumer<String, Long> kafkaConsumer,
       ExecutorService executor,
-      Consumer<ConsumerRecord<String, ValidMessage>> handler,
+      Consumer<ConsumerRecord<String, Long>> handler,
       ApplicationProperties properties)
   {
     return
@@ -50,7 +50,7 @@ public class ApplicationConfiguration
   }
 
   @Bean(destroyMethod = "close")
-  public KafkaConsumer<String, ValidMessage> kafkaConsumer(ApplicationProperties properties)
+  public KafkaConsumer<String, Long> kafkaConsumer(ApplicationProperties properties)
   {
     Properties props = new Properties();
 
@@ -61,11 +61,7 @@ public class ApplicationConfiguration
     props.put("auto.commit.interval.ms", (int)properties.getCommitInterval().toMillis());
     props.put("metadata.max.age.ms", "1000");
     props.put("key.deserializer", StringDeserializer.class.getName());
-    props.put("value.deserializer", JsonDeserializer.class.getName());
-    props.put(JsonDeserializer.TYPE_MAPPINGS,
-        "message:" + ClientMessage.class.getName() + "," +
-        "greeting:" + Greeting.class.getName());
-    props.put(JsonDeserializer.TRUSTED_PACKAGES, "de.juplo.kafka");
+    props.put("value.deserializer", LongDeserializer.class.getName());
 
     return new KafkaConsumer<>(props);
   }
diff --git a/src/main/java/de/juplo/kafka/ClientMessage.java b/src/main/java/de/juplo/kafka/ClientMessage.java
deleted file mode 100644 (file)
index a158907..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-package de.juplo.kafka;
-
-import lombok.EqualsAndHashCode;
-import lombok.Getter;
-import lombok.Setter;
-import lombok.ToString;
-
-
-@Getter
-@Setter
-@EqualsAndHashCode
-@ToString
-public class ClientMessage extends ValidMessage
-{
-  String client;
-  String message;
-
-  ClientMessage()
-  {
-    super(Type.CLIENT_MESSAGE);
-  }
-}
diff --git a/src/main/java/de/juplo/kafka/Greeting.java b/src/main/java/de/juplo/kafka/Greeting.java
deleted file mode 100644 (file)
index 4421a50..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-package de.juplo.kafka;
-
-import lombok.*;
-
-import java.time.LocalDateTime;
-
-
-@Getter
-@Setter
-@EqualsAndHashCode
-@ToString
-public class Greeting extends ValidMessage
-{
-  String name;
-  LocalDateTime when;
-
-  public Greeting()
-  {
-    super(Type.GREETING);
-  }
-}
diff --git a/src/main/java/de/juplo/kafka/ValidMessage.java b/src/main/java/de/juplo/kafka/ValidMessage.java
deleted file mode 100644 (file)
index 217d8f3..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-package de.juplo.kafka;
-
-import lombok.Getter;
-import lombok.RequiredArgsConstructor;
-
-
-@RequiredArgsConstructor
-public abstract class ValidMessage
-{
-  enum Type { CLIENT_MESSAGE, GREETING }
-
-  @Getter
-  private final Type type;
-}
diff --git a/src/test/java/de/juplo/kafka/ApplicationTest.java b/src/test/java/de/juplo/kafka/ApplicationTest.java
new file mode 100644 (file)
index 0000000..ed93a21
--- /dev/null
@@ -0,0 +1,84 @@
+package de.juplo.kafka;
+
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.clients.producer.ProducerRecord;
+import org.apache.kafka.common.serialization.LongSerializer;
+import org.apache.kafka.common.serialization.StringSerializer;
+import org.apache.kafka.common.utils.Bytes;
+import org.springframework.boot.test.context.TestConfiguration;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Primary;
+import org.springframework.test.context.ContextConfiguration;
+
+import java.util.Set;
+import java.util.function.Consumer;
+
+
+@ContextConfiguration(classes = ApplicationTest.Configuration.class)
+public class ApplicationTest extends GenericApplicationTest<String, Long>
+{
+  public ApplicationTest()
+  {
+    super(
+        new RecordGenerator()
+        {
+          final StringSerializer stringSerializer = new StringSerializer();
+          final LongSerializer longSerializer = new LongSerializer();
+
+
+          @Override
+          public void generate(
+              int numberOfMessagesToGenerate,
+              Set<Integer> poisonPills,
+              Set<Integer> logicErrors,
+              Consumer<ProducerRecord<Bytes, Bytes>> messageSender)
+          {
+            int i = 0;
+
+            for (int partition = 0; partition < 10; partition++)
+            {
+              for (int key = 0; key < 10; key++)
+              {
+                if (++i > numberOfMessagesToGenerate)
+                  return;
+
+                Bytes value = new Bytes(longSerializer.serialize(TOPIC, (long)i));
+                if (logicErrors.contains(i))
+                {
+                  value = new Bytes(longSerializer.serialize(TOPIC, Long.MIN_VALUE));
+                }
+                if (poisonPills.contains(i))
+                {
+                  value = new Bytes(stringSerializer.serialize(TOPIC, "BOOM (Poison-Pill)!"));
+                }
+
+                ProducerRecord<Bytes, Bytes> record =
+                    new ProducerRecord<>(
+                        TOPIC,
+                        partition,
+                        new Bytes(stringSerializer.serialize(TOPIC,Integer.toString(partition*10+key%2))),
+                        value);
+
+                messageSender.accept(record);
+              }
+            }
+          }
+        });
+  }
+
+
+  @TestConfiguration
+  public static class Configuration
+  {
+    @Primary
+    @Bean
+    public Consumer<ConsumerRecord<String, Long>> consumer()
+    {
+      return (record) ->
+      {
+        if (record.value() == Long.MIN_VALUE)
+          throw new RuntimeException("BOOM (Logic-Error)!");
+      };
+    }
+  }
+}
diff --git a/src/test/java/de/juplo/kafka/ApplicationTests.java b/src/test/java/de/juplo/kafka/ApplicationTests.java
deleted file mode 100644 (file)
index b5644b6..0000000
+++ /dev/null
@@ -1,455 +0,0 @@
-package de.juplo.kafka;
-
-import lombok.extern.slf4j.Slf4j;
-import org.apache.kafka.clients.consumer.ConsumerRecord;
-import org.apache.kafka.clients.consumer.KafkaConsumer;
-import org.apache.kafka.clients.producer.KafkaProducer;
-import org.apache.kafka.clients.producer.ProducerRecord;
-import org.apache.kafka.common.TopicPartition;
-import org.apache.kafka.common.errors.RecordDeserializationException;
-import org.apache.kafka.common.serialization.*;
-import org.apache.kafka.common.utils.Bytes;
-import org.junit.jupiter.api.*;
-import org.springframework.beans.factory.annotation.Autowired;
-import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
-import org.springframework.boot.test.context.TestConfiguration;
-import org.springframework.context.annotation.Bean;
-import org.springframework.context.annotation.Import;
-import org.springframework.kafka.support.serializer.JsonSerializer;
-import org.springframework.kafka.test.context.EmbeddedKafka;
-import org.springframework.test.context.TestPropertySource;
-import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
-
-import java.time.Duration;
-import java.time.LocalDateTime;
-import java.util.*;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.function.BiConsumer;
-import java.util.function.Consumer;
-import java.util.stream.Collectors;
-import java.util.stream.IntStream;
-
-import static de.juplo.kafka.ApplicationTests.PARTITIONS;
-import static de.juplo.kafka.ApplicationTests.TOPIC;
-import static org.assertj.core.api.Assertions.*;
-import static org.awaitility.Awaitility.*;
-
-
-@SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class)
-@TestMethodOrder(MethodOrderer.OrderAnnotation.class)
-@TestPropertySource(
-               properties = {
-                               "consumer.bootstrap-server=${spring.embedded.kafka.brokers}",
-                               "consumer.topic=" + TOPIC,
-                               "consumer.commit-interval=1s" })
-@EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
-@Slf4j
-class ApplicationTests
-{
-       public static final String TOPIC = "FOO";
-       public static final int PARTITIONS = 10;
-
-
-       StringSerializer stringSerializer = new StringSerializer();
-
-       @Autowired
-       Serializer valueSerializer;
-       @Autowired
-       KafkaProducer<String, Bytes> kafkaProducer;
-       @Autowired
-       KafkaConsumer<String, ValidMessage> kafkaConsumer;
-       @Autowired
-       KafkaConsumer<Bytes, Bytes> offsetConsumer;
-       @Autowired
-       ApplicationProperties properties;
-       @Autowired
-       ExecutorService executor;
-
-       Consumer<ConsumerRecord<String, ValidMessage>> testHandler;
-       EndlessConsumer<String, ValidMessage> endlessConsumer;
-       Map<TopicPartition, Long> oldOffsets;
-       Map<TopicPartition, Long> newOffsets;
-       Set<ConsumerRecord<String, ValidMessage>> receivedRecords;
-
-
-       /** Tests methods */
-
-       @Test
-       void commitsCurrentOffsetsOnSuccess()
-       {
-               send100Messages((partition, key, counter) ->
-               {
-                       Bytes value;
-                       String type;
-
-                       if (counter%3 != 0)
-                       {
-                               value = serializeClientMessage(key, counter);
-                               type = "message";
-                       }
-                       else {
-                               value = serializeGreeting(key);
-                               type = "greeting";
-                       }
-
-                       return toRecord(partition, key, value, Optional.of(type));
-               });
-
-               await("100 records received")
-                               .atMost(Duration.ofSeconds(30))
-                               .pollInterval(Duration.ofSeconds(1))
-                               .until(() -> receivedRecords.size() >= 100);
-
-               await("Offsets committed")
-                               .atMost(Duration.ofSeconds(10))
-                               .pollInterval(Duration.ofSeconds(1))
-                               .untilAsserted(() ->
-                               {
-                                       checkSeenOffsetsForProgress();
-                                       compareToCommitedOffsets(newOffsets);
-                               });
-
-               assertThatExceptionOfType(IllegalStateException.class)
-                               .isThrownBy(() -> endlessConsumer.exitStatus())
-                               .describedAs("Consumer should still be running");
-       }
-
-       @Test
-       void commitsOffsetOfErrorForReprocessingOnDeserializationErrorInvalidMessage()
-       {
-               send100Messages((partition, key, counter) ->
-               {
-                       Bytes value;
-                       String type;
-
-                       if (counter == 77)
-                       {
-                               value = serializeFooMessage(key, counter);
-                               type = null;
-                       }
-                       else
-                       {
-                               if (counter%3 != 0)
-                               {
-                                       value = serializeClientMessage(key, counter);
-                                       type = "message";
-                               }
-                               else {
-                                       value = serializeGreeting(key);
-                                       type = "greeting";
-                               }
-                       }
-
-                       return toRecord(partition, key, value, Optional.ofNullable(type));
-               });
-
-               await("Consumer failed")
-                               .atMost(Duration.ofSeconds(30))
-                               .pollInterval(Duration.ofSeconds(1))
-                               .until(() -> !endlessConsumer.running());
-
-               checkSeenOffsetsForProgress();
-               compareToCommitedOffsets(newOffsets);
-
-               endlessConsumer.start();
-               await("Consumer failed")
-                               .atMost(Duration.ofSeconds(30))
-                               .pollInterval(Duration.ofSeconds(1))
-                               .until(() -> !endlessConsumer.running());
-
-               checkSeenOffsetsForProgress();
-               compareToCommitedOffsets(newOffsets);
-               assertThat(receivedRecords.size())
-                               .describedAs("Received not all sent events")
-                               .isLessThan(100);
-
-               assertThatNoException()
-                               .describedAs("Consumer should not be running")
-                               .isThrownBy(() -> endlessConsumer.exitStatus());
-               assertThat(endlessConsumer.exitStatus())
-                               .describedAs("Consumer should have exited abnormally")
-                               .containsInstanceOf(RecordDeserializationException.class);
-       }
-
-       @Test
-       void commitsOffsetOfErrorForReprocessingOnDeserializationErrorOnUnknownMessage()
-       {
-               send100Messages((partition, key, counter) ->
-               {
-                       Bytes value;
-                       String type;
-
-                       if (counter == 77)
-                       {
-                               value = serializeFooMessage(key, counter);
-                               type = "foo";
-                       }
-                       else
-                       {
-                               if (counter%3 != 0)
-                               {
-                                       value = serializeClientMessage(key, counter);
-                                       type = "message";
-                               }
-                               else {
-                                       value = serializeGreeting(key);
-                                       type = "greeting";
-                               }
-                       }
-
-                       return toRecord(partition, key, value, Optional.of(type));
-               });
-
-               await("Consumer failed")
-                               .atMost(Duration.ofSeconds(30))
-                               .pollInterval(Duration.ofSeconds(1))
-                               .until(() -> !endlessConsumer.running());
-
-               checkSeenOffsetsForProgress();
-               compareToCommitedOffsets(newOffsets);
-
-               endlessConsumer.start();
-               await("Consumer failed")
-                               .atMost(Duration.ofSeconds(30))
-                               .pollInterval(Duration.ofSeconds(1))
-                               .until(() -> !endlessConsumer.running());
-
-               checkSeenOffsetsForProgress();
-               compareToCommitedOffsets(newOffsets);
-               assertThat(receivedRecords.size())
-                               .describedAs("Received not all sent events")
-                               .isLessThan(100);
-
-               assertThatNoException()
-                               .describedAs("Consumer should not be running")
-                               .isThrownBy(() -> endlessConsumer.exitStatus());
-               assertThat(endlessConsumer.exitStatus())
-                               .describedAs("Consumer should have exited abnormally")
-                               .containsInstanceOf(RecordDeserializationException.class);
-       }
-
-
-       /** Helper methods for the verification of expectations */
-
-       void compareToCommitedOffsets(Map<TopicPartition, Long> offsetsToCheck)
-       {
-               doForCurrentOffsets((tp, offset) ->
-               {
-                       Long expected = offsetsToCheck.get(tp) + 1;
-                       log.debug("Checking, if the offset for {} is {}", tp, expected);
-                       assertThat(offset)
-                                       .describedAs("Committed offset corresponds to the offset of the consumer")
-                                       .isEqualTo(expected);
-               });
-       }
-
-       void checkSeenOffsetsForProgress()
-       {
-               // Be sure, that some messages were consumed...!
-               Set<TopicPartition> withProgress = new HashSet<>();
-               partitions().forEach(tp ->
-               {
-                       Long oldOffset = oldOffsets.get(tp) + 1;
-                       Long newOffset = newOffsets.get(tp) + 1;
-                       if (!oldOffset.equals(newOffset))
-                       {
-                               log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset);
-                               withProgress.add(tp);
-                       }
-               });
-               assertThat(withProgress)
-                               .describedAs("Some offsets must have changed, compared to the old offset-positions")
-                               .isNotEmpty();
-       }
-
-
-       /** Helper methods for setting up and running the tests */
-
-       void seekToEnd()
-       {
-               offsetConsumer.assign(partitions());
-               offsetConsumer.seekToEnd(partitions());
-               partitions().forEach(tp ->
-               {
-                       // seekToEnd() works lazily: it only takes effect on poll()/position()
-                       Long offset = offsetConsumer.position(tp);
-                       log.info("New position for {}: {}", tp, offset);
-               });
-               // The new positions must be commited!
-               offsetConsumer.commitSync();
-               offsetConsumer.unsubscribe();
-       }
-
-       void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
-       {
-               offsetConsumer.assign(partitions());
-               partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp)));
-               offsetConsumer.unsubscribe();
-       }
-
-       List<TopicPartition> partitions()
-       {
-               return
-                               IntStream
-                                               .range(0, PARTITIONS)
-                                               .mapToObj(partition -> new TopicPartition(TOPIC, partition))
-                                               .collect(Collectors.toList());
-       }
-
-
-       public interface RecordGenerator<K, V>
-       {
-               public ProducerRecord<String, Bytes> generate(int partition, String key, int counter);
-       }
-
-       void send100Messages(RecordGenerator recordGenerator)
-       {
-               int i = 0;
-
-               for (int partition = 0; partition < 10; partition++)
-               {
-                       for (int key = 0; key < 10; key++)
-                       {
-                               ProducerRecord<String, Bytes> record =
-                                               recordGenerator.generate(partition, Integer.toString(partition*10+key%2), ++i);
-
-                               kafkaProducer.send(record, (metadata, e) ->
-                               {
-                                       if (metadata != null)
-                                       {
-                                               log.debug(
-                                                               "{}|{} - {}={}",
-                                                               metadata.partition(),
-                                                               metadata.offset(),
-                                                               record.key(),
-                                                               record.value());
-                                       }
-                                       else
-                                       {
-                                               log.warn(
-                                                               "Exception for {}={}: {}",
-                                                               record.key(),
-                                                               record.value(),
-                                                               e.toString());
-                                       }
-                               });
-                       }
-               }
-       }
-
-       ProducerRecord<String, Bytes> toRecord(int partition, String key, Bytes value, Optional<String> type)
-               {
-               ProducerRecord<String, Bytes> record =
-                               new ProducerRecord<>(TOPIC, partition, key, value);
-
-               type.ifPresent(typeId -> record.headers().add("__TypeId__", typeId.getBytes()));
-               return record;
-       }
-
-       Bytes serializeClientMessage(String key, int value)
-       {
-               TestClientMessage message = new TestClientMessage(key, Integer.toString(value));
-               return new Bytes(valueSerializer.serialize(TOPIC, message));
-       }
-
-       Bytes serializeGreeting(String key)
-       {
-               TestGreeting message = new TestGreeting(key, LocalDateTime.now());
-               return new Bytes(valueSerializer.serialize(TOPIC, message));
-       }
-
-       Bytes serializeFooMessage(String key, int value)
-       {
-               TestFooMessage message = new TestFooMessage(key, (long)value);
-               return new Bytes(valueSerializer.serialize(TOPIC, message));
-       }
-
-       @BeforeEach
-       public void init()
-       {
-               testHandler = record -> {} ;
-
-               seekToEnd();
-
-               oldOffsets = new HashMap<>();
-               newOffsets = new HashMap<>();
-               receivedRecords = new HashSet<>();
-
-               doForCurrentOffsets((tp, offset) ->
-               {
-                       oldOffsets.put(tp, offset - 1);
-                       newOffsets.put(tp, offset - 1);
-               });
-
-               Consumer<ConsumerRecord<String, ValidMessage>> captureOffsetAndExecuteTestHandler =
-                               record ->
-                               {
-                                       newOffsets.put(
-                                                       new TopicPartition(record.topic(), record.partition()),
-                                                       record.offset());
-                                       receivedRecords.add(record);
-                                       testHandler.accept(record);
-                               };
-
-               endlessConsumer =
-                               new EndlessConsumer<>(
-                                               executor,
-                                               properties.getClientId(),
-                                               properties.getTopic(),
-                                               kafkaConsumer,
-                                               captureOffsetAndExecuteTestHandler);
-
-               endlessConsumer.start();
-       }
-
-       @AfterEach
-       public void deinit()
-       {
-               try
-               {
-                       endlessConsumer.stop();
-               }
-               catch (Exception e)
-               {
-                       log.info("Exception while stopping the consumer: {}", e.toString());
-               }
-       }
-
-
-       @TestConfiguration
-       @Import(ApplicationConfiguration.class)
-       public static class Configuration
-       {
-               @Bean
-               Serializer<ValidMessage> serializer()
-               {
-                       return new JsonSerializer<>();
-               }
-
-               @Bean
-               KafkaProducer<String, Bytes> kafkaProducer(ApplicationProperties properties)
-               {
-                       Properties props = new Properties();
-                       props.put("bootstrap.servers", properties.getBootstrapServer());
-                       props.put("linger.ms", 100);
-                       props.put("key.serializer", StringSerializer.class.getName());
-                       props.put("value.serializer", BytesSerializer.class.getName());
-
-                       return new KafkaProducer<>(props);
-               }
-
-               @Bean
-               KafkaConsumer<Bytes, Bytes> offsetConsumer(ApplicationProperties properties)
-               {
-                       Properties props = new Properties();
-                       props.put("bootstrap.servers", properties.getBootstrapServer());
-                       props.put("client.id", "OFFSET-CONSUMER");
-                       props.put("group.id", properties.getGroupId());
-                       props.put("key.deserializer", BytesDeserializer.class.getName());
-                       props.put("value.deserializer", BytesDeserializer.class.getName());
-
-                       return new KafkaConsumer<>(props);
-               }
-       }
-}
diff --git a/src/test/java/de/juplo/kafka/GenericApplicationTest.java b/src/test/java/de/juplo/kafka/GenericApplicationTest.java
new file mode 100644 (file)
index 0000000..a6d6aa1
--- /dev/null
@@ -0,0 +1,344 @@
+package de.juplo.kafka;
+
+import lombok.extern.slf4j.Slf4j;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.clients.consumer.KafkaConsumer;
+import org.apache.kafka.clients.producer.KafkaProducer;
+import org.apache.kafka.clients.producer.ProducerRecord;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.RecordDeserializationException;
+import org.apache.kafka.common.serialization.*;
+import org.apache.kafka.common.utils.Bytes;
+import org.junit.jupiter.api.*;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
+import org.springframework.boot.test.context.TestConfiguration;
+import org.springframework.context.annotation.Import;
+import org.springframework.kafka.test.context.EmbeddedKafka;
+import org.springframework.test.context.TestPropertySource;
+import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
+
+import java.time.Duration;
+import java.util.*;
+import java.util.concurrent.ExecutorService;
+import java.util.function.BiConsumer;
+import java.util.function.Consumer;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+import static de.juplo.kafka.GenericApplicationTest.PARTITIONS;
+import static de.juplo.kafka.GenericApplicationTest.TOPIC;
+import static org.assertj.core.api.Assertions.*;
+import static org.awaitility.Awaitility.*;
+
+
+@SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class)
+@TestPropertySource(
+               properties = {
+                               "consumer.bootstrap-server=${spring.embedded.kafka.brokers}",
+                               "consumer.topic=" + TOPIC,
+                               "consumer.commit-interval=1s" })
+@EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
+@Slf4j
+abstract class GenericApplicationTest<K, V>
+{
+       public static final String TOPIC = "FOO";
+       public static final int PARTITIONS = 10;
+
+
+       @Autowired
+       KafkaConsumer<K, V> kafkaConsumer;
+       @Autowired
+       Consumer<ConsumerRecord<K, V>> consumer;
+       @Autowired
+       ApplicationProperties properties;
+       @Autowired
+       ExecutorService executor;
+
+       KafkaProducer<Bytes, Bytes> testRecordProducer;
+       KafkaConsumer<Bytes, Bytes> offsetConsumer;
+       EndlessConsumer<K, V> endlessConsumer;
+       Map<TopicPartition, Long> oldOffsets;
+       Map<TopicPartition, Long> newOffsets;
+       Set<ConsumerRecord<K, V>> receivedRecords;
+
+
+       final RecordGenerator recordGenerator;
+       final Consumer<ProducerRecord<Bytes, Bytes>> messageSender;
+
+       public GenericApplicationTest(RecordGenerator recordGenerator)
+       {
+               this.recordGenerator = recordGenerator;
+               this.messageSender = (record) -> sendMessage(record);
+       }
+
+
+       /** Tests methods */
+
+       @Test
+       void commitsCurrentOffsetsOnSuccess()
+       {
+               recordGenerator.generate(100, Set.of(), Set.of(), messageSender);
+
+               await("100 records received")
+                               .atMost(Duration.ofSeconds(30))
+                               .pollInterval(Duration.ofSeconds(1))
+                               .until(() -> receivedRecords.size() >= 100);
+
+               await("Offsets committed")
+                               .atMost(Duration.ofSeconds(10))
+                               .pollInterval(Duration.ofSeconds(1))
+                               .untilAsserted(() ->
+                               {
+                                       checkSeenOffsetsForProgress();
+                                       compareToCommitedOffsets(newOffsets);
+                               });
+
+               assertThatExceptionOfType(IllegalStateException.class)
+                               .isThrownBy(() -> endlessConsumer.exitStatus())
+                               .describedAs("Consumer should still be running");
+       }
+
+       @Test
+       void commitsOffsetOfErrorForReprocessingOnDeserializationError()
+       {
+               recordGenerator.generate(100, Set.of(77), Set.of(), messageSender);
+
+               await("Consumer failed")
+                               .atMost(Duration.ofSeconds(30))
+                               .pollInterval(Duration.ofSeconds(1))
+                               .until(() -> !endlessConsumer.running());
+
+               checkSeenOffsetsForProgress();
+               compareToCommitedOffsets(newOffsets);
+
+               endlessConsumer.start();
+               await("Consumer failed")
+                               .atMost(Duration.ofSeconds(30))
+                               .pollInterval(Duration.ofSeconds(1))
+                               .until(() -> !endlessConsumer.running());
+
+               checkSeenOffsetsForProgress();
+               compareToCommitedOffsets(newOffsets);
+               assertThat(receivedRecords.size())
+                               .describedAs("Received not all sent events")
+                               .isLessThan(100);
+
+               assertThatNoException()
+                               .describedAs("Consumer should not be running")
+                               .isThrownBy(() -> endlessConsumer.exitStatus());
+               assertThat(endlessConsumer.exitStatus())
+                               .describedAs("Consumer should have exited abnormally")
+                               .containsInstanceOf(RecordDeserializationException.class);
+       }
+
+       @Test
+       void doesNotCommitOffsetsOnLogicError()
+       {
+               recordGenerator.generate(100, Set.of(), Set.of(77), messageSender);
+
+               await("Consumer failed")
+                               .atMost(Duration.ofSeconds(30))
+                               .pollInterval(Duration.ofSeconds(1))
+                               .until(() -> !endlessConsumer.running());
+
+               checkSeenOffsetsForProgress();
+               compareToCommitedOffsets(oldOffsets);
+
+               endlessConsumer.start();
+               await("Consumer failed")
+                               .atMost(Duration.ofSeconds(30))
+                               .pollInterval(Duration.ofSeconds(1))
+                               .until(() -> !endlessConsumer.running());
+
+               checkSeenOffsetsForProgress();
+               compareToCommitedOffsets(oldOffsets);
+               assertThat(receivedRecords.size())
+                               .describedAs("Received not all sent events")
+                               .isLessThan(100);
+
+               assertThatNoException()
+                               .describedAs("Consumer should not be running")
+                               .isThrownBy(() -> endlessConsumer.exitStatus());
+               assertThat(endlessConsumer.exitStatus())
+                               .describedAs("Consumer should have exited abnormally")
+                               .containsInstanceOf(RuntimeException.class);
+       }
+
+
+       /** Helper methods for the verification of expectations */
+
+       void compareToCommitedOffsets(Map<TopicPartition, Long> offsetsToCheck)
+       {
+               doForCurrentOffsets((tp, offset) ->
+               {
+                       Long expected = offsetsToCheck.get(tp) + 1;
+                       log.debug("Checking, if the offset for {} is {}", tp, expected);
+                       assertThat(offset)
+                                       .describedAs("Committed offset corresponds to the offset of the consumer")
+                                       .isEqualTo(expected);
+               });
+       }
+
+       void checkSeenOffsetsForProgress()
+       {
+               // Be sure, that some messages were consumed...!
+               Set<TopicPartition> withProgress = new HashSet<>();
+               partitions().forEach(tp ->
+               {
+                       Long oldOffset = oldOffsets.get(tp) + 1;
+                       Long newOffset = newOffsets.get(tp) + 1;
+                       if (!oldOffset.equals(newOffset))
+                       {
+                               log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset);
+                               withProgress.add(tp);
+                       }
+               });
+               assertThat(withProgress)
+                               .describedAs("Some offsets must have changed, compared to the old offset-positions")
+                               .isNotEmpty();
+       }
+
+
+       /** Helper methods for setting up and running the tests */
+
+       void seekToEnd()
+       {
+               offsetConsumer.assign(partitions());
+               offsetConsumer.seekToEnd(partitions());
+               partitions().forEach(tp ->
+               {
+                       // seekToEnd() works lazily: it only takes effect on poll()/position()
+                       Long offset = offsetConsumer.position(tp);
+                       log.info("New position for {}: {}", tp, offset);
+               });
+               // The new positions must be commited!
+               offsetConsumer.commitSync();
+               offsetConsumer.unsubscribe();
+       }
+
+       void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
+       {
+               offsetConsumer.assign(partitions());
+               partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp)));
+               offsetConsumer.unsubscribe();
+       }
+
+       List<TopicPartition> partitions()
+       {
+               return
+                               IntStream
+                                               .range(0, PARTITIONS)
+                                               .mapToObj(partition -> new TopicPartition(TOPIC, partition))
+                                               .collect(Collectors.toList());
+       }
+
+
+       public interface RecordGenerator
+       {
+               void generate(
+                               int numberOfMessagesToGenerate,
+                               Set<Integer> poisonPills,
+                               Set<Integer> logicErrors,
+                               Consumer<ProducerRecord<Bytes, Bytes>> messageSender);
+       }
+
+       void sendMessage(ProducerRecord<Bytes, Bytes> record)
+       {
+               testRecordProducer.send(record, (metadata, e) ->
+               {
+                       if (metadata != null)
+                       {
+                               log.debug(
+                                               "{}|{} - {}={}",
+                                               metadata.partition(),
+                                               metadata.offset(),
+                                               record.key(),
+                                               record.value());
+                       }
+                       else
+                       {
+                               log.warn(
+                                               "Exception for {}={}: {}",
+                                               record.key(),
+                                               record.value(),
+                                               e.toString());
+                       }
+               });
+       }
+
+
+       @BeforeEach
+       public void init()
+       {
+               Properties props;
+               props = new Properties();
+               props.put("bootstrap.servers", properties.getBootstrapServer());
+               props.put("linger.ms", 100);
+               props.put("key.serializer", BytesSerializer.class.getName());
+               props.put("value.serializer", BytesSerializer.class.getName());
+               testRecordProducer = new KafkaProducer<>(props);
+
+               props = new Properties();
+               props.put("bootstrap.servers", properties.getBootstrapServer());
+               props.put("client.id", "OFFSET-CONSUMER");
+               props.put("group.id", properties.getGroupId());
+               props.put("key.deserializer", BytesDeserializer.class.getName());
+               props.put("value.deserializer", BytesDeserializer.class.getName());
+               offsetConsumer = new KafkaConsumer<>(props);
+
+               seekToEnd();
+
+               oldOffsets = new HashMap<>();
+               newOffsets = new HashMap<>();
+               receivedRecords = new HashSet<>();
+
+               doForCurrentOffsets((tp, offset) ->
+               {
+                       oldOffsets.put(tp, offset - 1);
+                       newOffsets.put(tp, offset - 1);
+               });
+
+               Consumer<ConsumerRecord<K, V>> captureOffsetAndExecuteTestHandler =
+                               record ->
+                               {
+                                       newOffsets.put(
+                                                       new TopicPartition(record.topic(), record.partition()),
+                                                       record.offset());
+                                       receivedRecords.add(record);
+                                       consumer.accept(record);
+                               };
+
+               endlessConsumer =
+                               new EndlessConsumer<>(
+                                               executor,
+                                               properties.getClientId(),
+                                               properties.getTopic(),
+                                               kafkaConsumer,
+                                               captureOffsetAndExecuteTestHandler);
+
+               endlessConsumer.start();
+       }
+
+       @AfterEach
+       public void deinit()
+       {
+               try
+               {
+                       endlessConsumer.stop();
+                       testRecordProducer.close();
+                       offsetConsumer.close();
+               }
+               catch (Exception e)
+               {
+                       log.info("Exception while stopping the consumer: {}", e.toString());
+               }
+       }
+
+
+       @TestConfiguration
+       @Import(ApplicationConfiguration.class)
+       public static class Configuration
+       {
+       }
+}
diff --git a/src/test/java/de/juplo/kafka/TestClientMessage.java b/src/test/java/de/juplo/kafka/TestClientMessage.java
deleted file mode 100644 (file)
index 0072121..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-package de.juplo.kafka;
-
-
-import lombok.Value;
-
-
-@Value
-public class TestClientMessage
-{
-  private final String client;
-  private final String message;
-}
diff --git a/src/test/java/de/juplo/kafka/TestFooMessage.java b/src/test/java/de/juplo/kafka/TestFooMessage.java
deleted file mode 100644 (file)
index d8f4c65..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-package de.juplo.kafka;
-
-
-import lombok.Value;
-
-
-@Value
-public class TestFooMessage
-{
-  private final String client;
-  private final Long timestamp;
-}
diff --git a/src/test/java/de/juplo/kafka/TestGreeting.java b/src/test/java/de/juplo/kafka/TestGreeting.java
deleted file mode 100644 (file)
index 446e877..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-package de.juplo.kafka;
-
-import lombok.Value;
-
-import java.time.LocalDateTime;
-
-
-@Value
-public class TestGreeting
-{
-  private final String name;
-  private final LocalDateTime when;
-}