X-Git-Url: http://juplo.de/gitweb/?a=blobdiff_plain;f=src%2Ftest%2Fjava%2Fde%2Fjuplo%2Fkafka%2FApplicationTests.java;h=92074fffbbe12df59266f038af1be8424e27c645;hb=1206d7938f995eed3353d2bd3e26241f30b88e44;hp=21d1668240d41d39481178821173d6a5f7e4bbc1;hpb=fe867d6d14fd90aab95bdd7ba9374a585c268d3f;p=demos%2Fkafka%2Ftraining diff --git a/src/test/java/de/juplo/kafka/ApplicationTests.java b/src/test/java/de/juplo/kafka/ApplicationTests.java index 21d1668..92074ff 100644 --- a/src/test/java/de/juplo/kafka/ApplicationTests.java +++ b/src/test/java/de/juplo/kafka/ApplicationTests.java @@ -6,12 +6,10 @@ import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.errors.WakeupException; -import org.apache.kafka.common.serialization.BytesSerializer; -import org.apache.kafka.common.serialization.LongSerializer; -import org.apache.kafka.common.serialization.StringSerializer; +import org.apache.kafka.common.errors.RecordDeserializationException; +import org.apache.kafka.common.serialization.*; import org.apache.kafka.common.utils.Bytes; -import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.*; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer; import org.springframework.boot.test.context.TestConfiguration; @@ -21,7 +19,9 @@ import org.springframework.kafka.test.context.EmbeddedKafka; import org.springframework.test.context.TestPropertySource; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import java.time.Duration; import java.util.*; +import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.function.BiConsumer; import java.util.function.Consumer; @@ -32,9 +32,13 @@ import java.util.stream.IntStream; import static de.juplo.kafka.ApplicationTests.PARTITIONS; import static de.juplo.kafka.ApplicationTests.TOPIC; import static org.assertj.core.api.Assertions.assertThat; +import static org.awaitility.Awaitility.*; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertThrows; @SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class) +@TestMethodOrder(MethodOrderer.OrderAnnotation.class) @TestPropertySource( properties = { "consumer.bootstrap-server=${spring.embedded.kafka.brokers}", @@ -48,47 +52,139 @@ class ApplicationTests StringSerializer stringSerializer = new StringSerializer(); - LongSerializer longSerializer = new LongSerializer(); + @Autowired + Serializer valueSerializer; @Autowired KafkaProducer kafkaProducer; @Autowired KafkaConsumer kafkaConsumer; @Autowired + KafkaConsumer offsetConsumer; + @Autowired ApplicationProperties properties; @Autowired ExecutorService executor; + Consumer> testHandler; + EndlessConsumer endlessConsumer; + Map oldOffsets; + Map newOffsets; + Set> receivedRecords; + + + /** Tests methods */ @Test - void commitsCurrentOffsetsOnSuccess() + @Order(1) // << The poistion pill is not skipped. Hence, this test must run first + void commitsCurrentOffsetsOnSuccess() throws ExecutionException, InterruptedException { - send100Messages(i -> new Bytes(longSerializer.serialize(TOPIC, i))); + send100Messages(i -> new Bytes(valueSerializer.serialize(TOPIC, i))); - Set> received = new HashSet<>(); - Map offsets = runEndlessConsumer(record -> - { - received.add(record); - if (received.size() == 100) - throw new WakeupException(); - }); + await("100 records received") + .atMost(Duration.ofSeconds(30)) + .until(() -> receivedRecords.size() >= 100); - check(offsets); + await("Offsets committed") + .atMost(Duration.ofSeconds(10)) + .untilAsserted(() -> + { + checkSeenOffsetsForProgress(); + compareToCommitedOffsets(newOffsets); + }); + + assertThrows( + IllegalStateException.class, + () -> endlessConsumer.exitStatus(), + "Consumer should still be running"); } @Test - void commitsNoOffsetsOnError() + @Order(2) + void commitsOffsetOfErrorForReprocessingOnError() { send100Messages(counter -> counter == 77 ? new Bytes(stringSerializer.serialize(TOPIC, "BOOM!")) - : new Bytes(longSerializer.serialize(TOPIC, counter))); + : new Bytes(valueSerializer.serialize(TOPIC, counter))); + + await("Consumer failed") + .atMost(Duration.ofSeconds(30)) + .until(() -> !endlessConsumer.running()); + + checkSeenOffsetsForProgress(); + compareToCommitedOffsets(newOffsets); + + endlessConsumer.start(); + await("Consumer failed") + .atMost(Duration.ofSeconds(30)) + .until(() -> !endlessConsumer.running()); + + checkSeenOffsetsForProgress(); + compareToCommitedOffsets(newOffsets); + assertThat(receivedRecords.size()) + .describedAs("Received not all sent events") + .isLessThan(100); + + assertDoesNotThrow( + () -> endlessConsumer.exitStatus(), + "Consumer should not be running"); + assertThat(endlessConsumer.exitStatus()) + .describedAs("Consumer should have exited abnormally") + .containsInstanceOf(RecordDeserializationException.class); + } + + + /** Helper methods for the verification of expectations */ + + void compareToCommitedOffsets(Map offsetsToCheck) + { + doForCurrentOffsets((tp, offset) -> + { + Long expected = offsetsToCheck.get(tp) + 1; + log.debug("Checking, if the offset for {} is {}", tp, expected); + assertThat(offset) + .describedAs("Committed offset corresponds to the offset of the consumer") + .isEqualTo(expected); + }); + } + + void checkSeenOffsetsForProgress() + { + // Be sure, that some messages were consumed...! + Set withProgress = new HashSet<>(); + partitions().forEach(tp -> + { + Long oldOffset = oldOffsets.get(tp); + Long newOffset = newOffsets.get(tp); + if (!oldOffset.equals(newOffset)) + { + log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset); + withProgress.add(tp); + } + }); + assertThat(withProgress) + .describedAs("Some offsets must have changed, compared to the old offset-positions") + .isNotEmpty(); + } + - Map oldOffsets = new HashMap<>(); - doForCurrentOffsets((tp, offset) -> oldOffsets.put(tp.partition(), offset -1)); - Map newOffsets = runEndlessConsumer((record) -> {}); + /** Helper methods for setting up and running the tests */ - check(oldOffsets); + void doForCurrentOffsets(BiConsumer consumer) + { + offsetConsumer.assign(partitions()); + partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp))); + offsetConsumer.unsubscribe(); + } + + List partitions() + { + return + IntStream + .range(0, PARTITIONS) + .mapToObj(partition -> new TopicPartition(TOPIC, partition)) + .collect(Collectors.toList()); } @@ -133,48 +229,54 @@ class ApplicationTests } } - Map runEndlessConsumer(Consumer> consumer) + + @BeforeEach + public void init() { - Map offsets = new HashMap<>(); - doForCurrentOffsets((tp, offset) -> offsets.put(tp.partition(), offset -1)); - Consumer> captureOffset = record -> offsets.put(record.partition(), record.offset()); - EndlessConsumer endlessConsumer = + testHandler = record -> {} ; + + oldOffsets = new HashMap<>(); + newOffsets = new HashMap<>(); + receivedRecords = new HashSet<>(); + + doForCurrentOffsets((tp, offset) -> + { + oldOffsets.put(tp, offset - 1); + newOffsets.put(tp, offset - 1); + }); + + Consumer> captureOffsetAndExecuteTestHandler = + record -> + { + newOffsets.put( + new TopicPartition(record.topic(), record.partition()), + record.offset()); + receivedRecords.add(record); + testHandler.accept(record); + }; + + endlessConsumer = new EndlessConsumer<>( executor, properties.getClientId(), properties.getTopic(), kafkaConsumer, - captureOffset.andThen(consumer)); - - endlessConsumer.run(); + captureOffsetAndExecuteTestHandler); - return offsets; + endlessConsumer.start(); } - List partitions() - { - return - IntStream - .range(0, PARTITIONS) - .mapToObj(partition -> new TopicPartition(TOPIC, partition)) - .collect(Collectors.toList()); - } - - void doForCurrentOffsets(BiConsumer consumer) + @AfterEach + public void deinit() { - kafkaConsumer.assign(partitions()); - partitions().forEach(tp -> consumer.accept(tp, kafkaConsumer.position(tp))); - kafkaConsumer.unsubscribe(); - } - - void check(Map offsets) - { - doForCurrentOffsets((tp, offset) -> + try { - Long expected = offsets.get(tp.partition()) + 1; - log.debug("Checking, if the offset for {} is {}", tp, expected); - assertThat(offset).isEqualTo(expected); - }); + endlessConsumer.stop(); + } + catch (Exception e) + { + log.info("Exception while stopping the consumer: {}", e.toString()); + } } @@ -182,6 +284,12 @@ class ApplicationTests @Import(ApplicationConfiguration.class) public static class Configuration { + @Bean + Serializer serializer() + { + return new LongSerializer(); + } + @Bean KafkaProducer kafkaProducer(ApplicationProperties properties) { @@ -193,5 +301,18 @@ class ApplicationTests return new KafkaProducer<>(props); } + + @Bean + KafkaConsumer offsetConsumer(ApplicationProperties properties) + { + Properties props = new Properties(); + props.put("bootstrap.servers", properties.getBootstrapServer()); + props.put("client.id", "OFFSET-CONSUMER"); + props.put("group.id", properties.getGroupId()); + props.put("key.deserializer", BytesDeserializer.class.getName()); + props.put("value.deserializer", BytesDeserializer.class.getName()); + + return new KafkaConsumer<>(props); + } } }