X-Git-Url: http://juplo.de/gitweb/?a=blobdiff_plain;f=src%2Ftest%2Fjava%2Fde%2Fjuplo%2Fkafka%2FApplicationTests.java;h=43a4f6124d8fa931b0791149f84227959352ad2d;hb=1709f0e4f41be7e3b955d19769697a517633827d;hp=1d3546c1fb55f3e6e8f24f09348f1482be3c5294;hpb=95ada5445e5db63f53f9c36d55ba862459ea923e;p=demos%2Fkafka%2Ftraining diff --git a/src/test/java/de/juplo/kafka/ApplicationTests.java b/src/test/java/de/juplo/kafka/ApplicationTests.java index 1d3546c..43a4f61 100644 --- a/src/test/java/de/juplo/kafka/ApplicationTests.java +++ b/src/test/java/de/juplo/kafka/ApplicationTests.java @@ -6,18 +6,18 @@ import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.errors.RecordDeserializationException; import org.apache.kafka.common.serialization.*; import org.apache.kafka.common.utils.Bytes; -import org.assertj.core.api.OptionalAssert; import org.junit.jupiter.api.*; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.autoconfigure.kafka.KafkaAutoConfiguration; +import org.springframework.boot.autoconfigure.kafka.KafkaProperties; import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer; import org.springframework.boot.test.context.TestConfiguration; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Import; import org.springframework.context.annotation.Primary; +import org.springframework.kafka.support.serializer.JsonSerializer; import org.springframework.kafka.test.context.EmbeddedKafka; import org.springframework.test.context.TestPropertySource; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; @@ -26,8 +26,8 @@ import java.time.Duration; import java.util.*; import java.util.concurrent.ExecutionException; import java.util.function.BiConsumer; +import java.util.function.BiFunction; import java.util.function.Consumer; -import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -39,14 +39,14 @@ import static org.awaitility.Awaitility.*; @SpringJUnitConfig( initializers = ConfigDataApplicationContextInitializer.class, - classes = { + classes = { EndlessConsumer.class, KafkaAutoConfiguration.class, ApplicationTests.Configuration.class }) -@TestMethodOrder(MethodOrderer.OrderAnnotation.class) @TestPropertySource( properties = { - "consumer.bootstrap-server=${spring.embedded.kafka.brokers}", + "spring.kafka.consumer.bootstrap-servers=${spring.embedded.kafka.brokers}", + "spring.kafka.producer.bootstrap-servers=${spring.embedded.kafka.brokers}", "consumer.topic=" + TOPIC }) @EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS) @Slf4j @@ -63,9 +63,13 @@ class ApplicationTests @Autowired KafkaProducer kafkaProducer; @Autowired + org.apache.kafka.clients.consumer.Consumer kafkaConsumer; + @Autowired KafkaConsumer offsetConsumer; @Autowired - ApplicationProperties properties; + ApplicationProperties applicationProperties; + @Autowired + KafkaProperties kafkaProperties; @Autowired EndlessConsumer endlessConsumer; @Autowired @@ -73,20 +77,19 @@ class ApplicationTests Map oldOffsets; Map newOffsets; - Set> receivedRecords; + Set> receivedRecords; /** Tests methods */ @Test - @Order(1) // << The poistion pill is not skipped. Hence, this test must run first void commitsCurrentOffsetsOnSuccess() throws ExecutionException, InterruptedException { - send100Messages(i -> new Bytes(valueSerializer.serialize(TOPIC, i))); + send100Messages((key, counter) -> serialize(key, counter)); await("100 records received") .atMost(Duration.ofSeconds(30)) - .until(() -> receivedRecords.size() >= 100); + .until(() -> receivedRecords.size() == 100); await("Offsets committed") .atMost(Duration.ofSeconds(10)) @@ -96,35 +99,69 @@ class ApplicationTests compareToCommitedOffsets(newOffsets); }); - assertThatExceptionOfType(IllegalStateException.class) - .isThrownBy(() -> endlessConsumer.exitStatus()) - .describedAs("Consumer should still be running"); + assertThat(endlessConsumer.isRunning()) + .describedAs("Consumer should still be running") + .isTrue(); } @Test - @Order(2) - void commitsOffsetOfErrorForReprocessingOnError() + void commitsCurrentOffsetsOnDeserializationError() { - send100Messages(counter -> + send100Messages((key, counter) -> counter == 77 ? new Bytes(stringSerializer.serialize(TOPIC, "BOOM!")) - : new Bytes(valueSerializer.serialize(TOPIC, counter))); + : serialize(key, counter)); - await("Consumer failed") + await("99 records received") .atMost(Duration.ofSeconds(30)) - .untilAsserted(() -> checkSeenOffsetsForProgress()); - - compareToCommitedOffsets(newOffsets); - assertThat(receivedRecords.size()) - .describedAs("Received not all sent events") - .isLessThan(100); - - assertThatNoException() - .describedAs("Consumer should not be running") - .isThrownBy(() -> endlessConsumer.exitStatus()); - ((OptionalAssert)assertThat(endlessConsumer.exitStatus())) - .describedAs("Consumer should have exited abnormally") - .containsInstanceOf(RecordDeserializationException.class); + .until(() -> receivedRecords.size() == 99); + + await("Offsets committed") + .atMost(Duration.ofSeconds(10)) + .untilAsserted(() -> + { + // UNSCHÖN: + // Funktioniert nur, weil nach der Nachrichten, die den + // Deserialisierungs-Fehler auslöst noch valide Nachrichten + // gelesen werden. + // GRUND: + // Der MessageHandler sieht den Offset der Fehlerhaften + // Nachricht nicht! + checkSeenOffsetsForProgress(); + compareToCommitedOffsets(newOffsets); + }); + + assertThat(endlessConsumer.isRunning()) + .describedAs("Consumer should still be running") + .isTrue(); + } + + @Test + void commitsOffsetOnProgramLogicErrorFoo() + { + recordHandler.testHandler = (record) -> + { + if (Integer.parseInt(record.value().message)%10 ==0) + throw new RuntimeException("BOOM: " + record.value().message + "%10 == 0"); + }; + + send100Messages((key, counter) -> serialize(key, counter)); + + await("80 records received") + .atMost(Duration.ofSeconds(30)) + .until(() -> receivedRecords.size() == 100); + + await("Offsets committed") + .atMost(Duration.ofSeconds(10)) + .untilAsserted(() -> + { + checkSeenOffsetsForProgress(); + compareToCommitedOffsets(newOffsets); + }); + + assertThat(endlessConsumer.isRunning()) + .describedAs("Consumer should still be running") + .isTrue(); } @@ -135,7 +172,7 @@ class ApplicationTests doForCurrentOffsets((tp, offset) -> { Long expected = offsetsToCheck.get(tp) + 1; - log.debug("Checking, if the offset for {} is {}", tp, expected); + log.debug("TEST: Comparing the expected offset of {} for {} to {}", expected, tp, offset); assertThat(offset) .describedAs("Committed offset corresponds to the offset of the consumer") .isEqualTo(expected); @@ -152,10 +189,11 @@ class ApplicationTests Long newOffset = newOffsets.get(tp); if (!oldOffset.equals(newOffset)) { - log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset); + log.debug("TEST: Progress for {}: {} -> {}", tp, oldOffset, newOffset); withProgress.add(tp); } }); + log.debug("TEST: Offsets with progress: {}", withProgress); assertThat(withProgress) .describedAs("Some offsets must have changed, compared to the old offset-positions") .isNotEmpty(); @@ -181,7 +219,7 @@ class ApplicationTests } - void send100Messages(Function messageGenerator) + void send100Messages(BiFunction messageGenerator) { long i = 0; @@ -189,7 +227,7 @@ class ApplicationTests { for (int key = 0; key < 10; key++) { - Bytes value = messageGenerator.apply(++i); + Bytes value = messageGenerator.apply(key, ++i); ProducerRecord record = new ProducerRecord<>( @@ -198,12 +236,13 @@ class ApplicationTests Integer.toString(key%2), value); + record.headers().add("__TypeId__", "message".getBytes()); kafkaProducer.send(record, (metadata, e) -> { if (metadata != null) { log.debug( - "{}|{} - {}={}", + "TEST: Sending partition={}, offset={} - {}={}", metadata.partition(), metadata.offset(), record.key(), @@ -212,7 +251,7 @@ class ApplicationTests else { log.warn( - "Exception for {}={}: {}", + "TEST: Exception for {}={}: {}", record.key(), record.value(), e.toString()); @@ -222,6 +261,14 @@ class ApplicationTests } } + Bytes serialize(Integer key, Long value) + { + ClientMessage message = new ClientMessage(); + message.setClient(key.toString()); + message.setMessage(value.toString()); + return new Bytes(valueSerializer.serialize(TOPIC, message)); + } + @BeforeEach public void init() @@ -242,6 +289,7 @@ class ApplicationTests record -> { receivedRecords.add(record); + log.debug("TEST: Processing record #{}: {}", receivedRecords.size(), record.value()); newOffsets.put( new TopicPartition(record.topic(), record.partition()), record.offset()); @@ -259,18 +307,18 @@ class ApplicationTests } catch (Exception e) { - log.info("Exception while stopping the consumer: {}", e.toString()); + log.info("TEST: Exception while stopping the consumer: {}", e.toString()); } } - public static class RecordHandler implements Consumer> + public static class RecordHandler implements Consumer> { - Consumer> captureOffsets; - Consumer> testHandler; + Consumer> captureOffsets; + Consumer> testHandler; @Override - public void accept(ConsumerRecord record) + public void accept(ConsumerRecord record) { captureOffsets .andThen(testHandler) @@ -284,22 +332,22 @@ class ApplicationTests { @Primary @Bean - public Consumer> testHandler() + public Consumer> testHandler() { return new RecordHandler(); } @Bean - Serializer serializer() + Serializer serializer() { - return new LongSerializer(); + return new JsonSerializer<>(); } @Bean - KafkaProducer kafkaProducer(ApplicationProperties properties) + KafkaProducer kafkaProducer(KafkaProperties properties) { Properties props = new Properties(); - props.put("bootstrap.servers", properties.getBootstrapServer()); + props.put("bootstrap.servers", properties.getConsumer().getBootstrapServers()); props.put("linger.ms", 100); props.put("key.serializer", StringSerializer.class.getName()); props.put("value.serializer", BytesSerializer.class.getName()); @@ -308,12 +356,12 @@ class ApplicationTests } @Bean - KafkaConsumer offsetConsumer(ApplicationProperties properties) + KafkaConsumer offsetConsumer(KafkaProperties properties) { Properties props = new Properties(); - props.put("bootstrap.servers", properties.getBootstrapServer()); + props.put("bootstrap.servers", properties.getConsumer().getBootstrapServers()); props.put("client.id", "OFFSET-CONSUMER"); - props.put("group.id", properties.getGroupId()); + props.put("group.id", properties.getConsumer().getGroupId()); props.put("key.deserializer", BytesDeserializer.class.getName()); props.put("value.deserializer", BytesDeserializer.class.getName());