Verbesserungen des Testfalls gemerged (Branch 'deserialization')
authorKai Moritz <kai@juplo.de>
Tue, 26 Jul 2022 14:11:45 +0000 (16:11 +0200)
committerKai Moritz <kai@juplo.de>
Tue, 26 Jul 2022 14:11:45 +0000 (16:11 +0200)
src/main/java/de/juplo/kafka/ApplicationConfiguration.java
src/main/java/de/juplo/kafka/ApplicationProperties.java
src/main/resources/application.yml
src/test/java/de/juplo/kafka/ApplicationTests.java

index ddeb9d5..9fc0c70 100644 (file)
@@ -58,6 +58,7 @@ public class ApplicationConfiguration
     props.put("group.id", properties.getGroupId());
     props.put("client.id", properties.getClientId());
     props.put("auto.offset.reset", properties.getAutoOffsetReset());
+    props.put("auto.commit.interval.ms", (int)properties.getCommitInterval().toMillis());
     props.put("metadata.max.age.ms", "1000");
     props.put("key.deserializer", StringDeserializer.class.getName());
     props.put("value.deserializer", JsonDeserializer.class.getName());
index fa731c5..14e928f 100644 (file)
@@ -7,6 +7,7 @@ import org.springframework.validation.annotation.Validated;
 
 import javax.validation.constraints.NotEmpty;
 import javax.validation.constraints.NotNull;
+import java.time.Duration;
 
 
 @ConfigurationProperties(prefix = "consumer")
@@ -30,4 +31,6 @@ public class ApplicationProperties
   @NotNull
   @NotEmpty
   private String autoOffsetReset;
+  @NotNull
+  private Duration commitInterval;
 }
index 9f3cb81..f8bfe7e 100644 (file)
@@ -4,6 +4,7 @@ consumer:
   client-id: DEV
   topic: test
   auto-offset-reset: earliest
+  commit-interval: 5s
 management:
   endpoint:
     shutdown:
index 9169de0..24d3a9e 100644 (file)
@@ -41,7 +41,8 @@ import static org.awaitility.Awaitility.*;
 @TestPropertySource(
                properties = {
                                "consumer.bootstrap-server=${spring.embedded.kafka.brokers}",
-                               "consumer.topic=" + TOPIC })
+                               "consumer.topic=" + TOPIC,
+                               "consumer.commit-interval=1s" })
 @EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
 @Slf4j
 class ApplicationTests
@@ -75,7 +76,6 @@ class ApplicationTests
        /** Tests methods */
 
        @Test
-       @Order(1) // << The poistion pill is not skipped. Hence, this test must run first
        void commitsCurrentOffsetsOnSuccess() throws ExecutionException, InterruptedException
        {
                send100Messages((partition, key, counter) ->
@@ -98,10 +98,12 @@ class ApplicationTests
 
                await("100 records received")
                                .atMost(Duration.ofSeconds(30))
+                               .pollInterval(Duration.ofSeconds(1))
                                .until(() -> receivedRecords.size() >= 100);
 
                await("Offsets committed")
                                .atMost(Duration.ofSeconds(10))
+                               .pollInterval(Duration.ofSeconds(1))
                                .untilAsserted(() ->
                                {
                                        checkSeenOffsetsForProgress();
@@ -114,8 +116,7 @@ class ApplicationTests
        }
 
        @Test
-       @Order(2)
-       void commitsOffsetOfErrorForReprocessingOnError()
+       void commitsOffsetOfErrorForReprocessingOnDeserializationError()
        {
                send100Messages((partition, key, counter) ->
                {
@@ -145,6 +146,7 @@ class ApplicationTests
 
                await("Consumer failed")
                                .atMost(Duration.ofSeconds(30))
+                               .pollInterval(Duration.ofSeconds(1))
                                .until(() -> !endlessConsumer.running());
 
                checkSeenOffsetsForProgress();
@@ -153,6 +155,7 @@ class ApplicationTests
                endlessConsumer.start();
                await("Consumer failed")
                                .atMost(Duration.ofSeconds(30))
+                               .pollInterval(Duration.ofSeconds(1))
                                .until(() -> !endlessConsumer.running());
 
                checkSeenOffsetsForProgress();
@@ -190,8 +193,8 @@ class ApplicationTests
                Set<TopicPartition> withProgress = new HashSet<>();
                partitions().forEach(tp ->
                {
-                       Long oldOffset = oldOffsets.get(tp);
-                       Long newOffset = newOffsets.get(tp);
+                       Long oldOffset = oldOffsets.get(tp) + 1;
+                       Long newOffset = newOffsets.get(tp) + 1;
                        if (!oldOffset.equals(newOffset))
                        {
                                log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset);
@@ -206,6 +209,21 @@ class ApplicationTests
 
        /** Helper methods for setting up and running the tests */
 
+       void seekToEnd()
+       {
+               offsetConsumer.assign(partitions());
+               offsetConsumer.seekToEnd(partitions());
+               partitions().forEach(tp ->
+               {
+                       // seekToEnd() works lazily: it only takes effect on poll()/position()
+                       Long offset = offsetConsumer.position(tp);
+                       log.info("New position for {}: {}", tp, offset);
+               });
+               // The new positions must be commited!
+               offsetConsumer.commitSync();
+               offsetConsumer.unsubscribe();
+       }
+
        void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
        {
                offsetConsumer.assign(partitions());
@@ -294,6 +312,8 @@ class ApplicationTests
        {
                testHandler = record -> {} ;
 
+               seekToEnd();
+
                oldOffsets = new HashMap<>();
                newOffsets = new HashMap<>();
                receivedRecords = new HashSet<>();