X-Git-Url: https://juplo.de/gitweb/?a=blobdiff_plain;f=src%2Ftest%2Fjava%2Fde%2Fjuplo%2Fkafka%2FApplicationTests.java;h=6aaff523c7071232bd2264fb0f6bca1b1c44cd0f;hb=475d3b4149a49568680ee1f6d6f4a1b7a45845df;hp=21d1668240d41d39481178821173d6a5f7e4bbc1;hpb=fe867d6d14fd90aab95bdd7ba9374a585c268d3f;p=demos%2Fkafka%2Ftraining diff --git a/src/test/java/de/juplo/kafka/ApplicationTests.java b/src/test/java/de/juplo/kafka/ApplicationTests.java index 21d1668..6aaff52 100644 --- a/src/test/java/de/juplo/kafka/ApplicationTests.java +++ b/src/test/java/de/juplo/kafka/ApplicationTests.java @@ -11,7 +11,10 @@ import org.apache.kafka.common.serialization.BytesSerializer; import org.apache.kafka.common.serialization.LongSerializer; import org.apache.kafka.common.serialization.StringSerializer; import org.apache.kafka.common.utils.Bytes; +import org.junit.jupiter.api.MethodOrderer; +import org.junit.jupiter.api.Order; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestMethodOrder; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer; import org.springframework.boot.test.context.TestConfiguration; @@ -35,6 +38,7 @@ import static org.assertj.core.api.Assertions.assertThat; @SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class) +@TestMethodOrder(MethodOrderer.OrderAnnotation.class) @TestPropertySource( properties = { "consumer.bootstrap-server=${spring.embedded.kafka.brokers}", @@ -59,24 +63,30 @@ class ApplicationTests @Autowired ExecutorService executor; + Map oldOffsets; + Map newOffsets; + @Test + @Order(1) // << The poistion pill is not skipped. Hence, this test must run first void commitsCurrentOffsetsOnSuccess() { send100Messages(i -> new Bytes(longSerializer.serialize(TOPIC, i))); Set> received = new HashSet<>(); - Map offsets = runEndlessConsumer(record -> + runEndlessConsumer(record -> { received.add(record); if (received.size() == 100) throw new WakeupException(); }); - check(offsets); + checkSeenOffsetsForProgress(); + compareToCommitedOffsets(newOffsets); } @Test + @Order(2) void commitsNoOffsetsOnError() { send100Messages(counter -> @@ -84,11 +94,10 @@ class ApplicationTests ? new Bytes(stringSerializer.serialize(TOPIC, "BOOM!")) : new Bytes(longSerializer.serialize(TOPIC, counter))); - Map oldOffsets = new HashMap<>(); - doForCurrentOffsets((tp, offset) -> oldOffsets.put(tp.partition(), offset -1)); - Map newOffsets = runEndlessConsumer((record) -> {}); + runEndlessConsumer((record) -> {}); - check(oldOffsets); + checkSeenOffsetsForProgress(); + compareToCommitedOffsets(oldOffsets); } @@ -133,11 +142,23 @@ class ApplicationTests } } - Map runEndlessConsumer(Consumer> consumer) + EndlessConsumer runEndlessConsumer(Consumer> consumer) { - Map offsets = new HashMap<>(); - doForCurrentOffsets((tp, offset) -> offsets.put(tp.partition(), offset -1)); - Consumer> captureOffset = record -> offsets.put(record.partition(), record.offset()); + oldOffsets = new HashMap<>(); + newOffsets = new HashMap<>(); + + doForCurrentOffsets((tp, offset) -> + { + oldOffsets.put(tp, offset - 1); + newOffsets.put(tp, offset - 1); + }); + + Consumer> captureOffset = + record -> + newOffsets.put( + new TopicPartition(record.topic(), record.partition()), + record.offset()); + EndlessConsumer endlessConsumer = new EndlessConsumer<>( executor, @@ -148,7 +169,7 @@ class ApplicationTests endlessConsumer.run(); - return offsets; + return endlessConsumer; } List partitions() @@ -167,11 +188,28 @@ class ApplicationTests kafkaConsumer.unsubscribe(); } - void check(Map offsets) + void checkSeenOffsetsForProgress() + { + // Be sure, that some messages were consumed...! + Set withProgress = new HashSet<>(); + partitions().forEach(tp -> + { + Long oldOffset = oldOffsets.get(tp); + Long newOffset = newOffsets.get(tp); + if (!oldOffset.equals(newOffset)) + { + log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset); + withProgress.add(tp); + } + }); + assertThat(withProgress).isNotEmpty().describedAs("Found no partitions with any offset-progress"); + } + + void compareToCommitedOffsets(Map offsetsToCheck) { doForCurrentOffsets((tp, offset) -> { - Long expected = offsets.get(tp.partition()) + 1; + Long expected = offsetsToCheck.get(tp) + 1; log.debug("Checking, if the offset for {} is {}", tp, expected); assertThat(offset).isEqualTo(expected); });