X-Git-Url: http://juplo.de/gitweb/?a=blobdiff_plain;f=src%2Ftest%2Fjava%2Fde%2Fjuplo%2Fkafka%2FApplicationTests.java;h=43a4f6124d8fa931b0791149f84227959352ad2d;hb=1709f0e4f41be7e3b955d19769697a517633827d;hp=bf1cdb8515a30053b64bfec64662809c8fa69d43;hpb=f6212abfac1d872979d2a27f5a6bf4708b643db6;p=demos%2Fkafka%2Ftraining diff --git a/src/test/java/de/juplo/kafka/ApplicationTests.java b/src/test/java/de/juplo/kafka/ApplicationTests.java index bf1cdb8..43a4f61 100644 --- a/src/test/java/de/juplo/kafka/ApplicationTests.java +++ b/src/test/java/de/juplo/kafka/ApplicationTests.java @@ -6,42 +6,47 @@ import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.errors.WakeupException; -import org.apache.kafka.common.serialization.BytesSerializer; -import org.apache.kafka.common.serialization.LongSerializer; -import org.apache.kafka.common.serialization.StringSerializer; +import org.apache.kafka.common.serialization.*; import org.apache.kafka.common.utils.Bytes; -import org.junit.jupiter.api.MethodOrderer; -import org.junit.jupiter.api.Order; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestMethodOrder; +import org.junit.jupiter.api.*; import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.autoconfigure.kafka.KafkaAutoConfiguration; +import org.springframework.boot.autoconfigure.kafka.KafkaProperties; import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer; import org.springframework.boot.test.context.TestConfiguration; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Import; +import org.springframework.context.annotation.Primary; +import org.springframework.kafka.support.serializer.JsonSerializer; import org.springframework.kafka.test.context.EmbeddedKafka; import org.springframework.test.context.TestPropertySource; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import java.time.Duration; import java.util.*; -import java.util.concurrent.ExecutorService; +import java.util.concurrent.ExecutionException; import java.util.function.BiConsumer; +import java.util.function.BiFunction; import java.util.function.Consumer; -import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.IntStream; import static de.juplo.kafka.ApplicationTests.PARTITIONS; import static de.juplo.kafka.ApplicationTests.TOPIC; -import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.*; +import static org.awaitility.Awaitility.*; -@SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class) -@TestMethodOrder(MethodOrderer.OrderAnnotation.class) +@SpringJUnitConfig( + initializers = ConfigDataApplicationContextInitializer.class, + classes = { + EndlessConsumer.class, + KafkaAutoConfiguration.class, + ApplicationTests.Configuration.class }) @TestPropertySource( properties = { - "consumer.bootstrap-server=${spring.embedded.kafka.brokers}", + "spring.kafka.consumer.bootstrap-servers=${spring.embedded.kafka.brokers}", + "spring.kafka.producer.bootstrap-servers=${spring.embedded.kafka.brokers}", "consumer.topic=" + TOPIC }) @EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS) @Slf4j @@ -52,63 +57,131 @@ class ApplicationTests StringSerializer stringSerializer = new StringSerializer(); - LongSerializer longSerializer = new LongSerializer(); + @Autowired + Serializer valueSerializer; @Autowired KafkaProducer kafkaProducer; @Autowired - KafkaConsumer kafkaConsumer; + org.apache.kafka.clients.consumer.Consumer kafkaConsumer; + @Autowired + KafkaConsumer offsetConsumer; + @Autowired + ApplicationProperties applicationProperties; + @Autowired + KafkaProperties kafkaProperties; @Autowired - ApplicationProperties properties; + EndlessConsumer endlessConsumer; @Autowired - ExecutorService executor; + RecordHandler recordHandler; + Map oldOffsets; + Map newOffsets; + Set> receivedRecords; + + + /** Tests methods */ @Test - @Order(1) // << The poistion pill is not skipped. Hence, this test must run first - void commitsCurrentOffsetsOnSuccess() + void commitsCurrentOffsetsOnSuccess() throws ExecutionException, InterruptedException { - send100Messages(i -> new Bytes(longSerializer.serialize(TOPIC, i))); + send100Messages((key, counter) -> serialize(key, counter)); - Map oldOffsets = new HashMap<>(); - doForCurrentOffsets((tp, offset) -> oldOffsets.put(tp, offset -1)); - Set> received = new HashSet<>(); - Map newOffsets = runEndlessConsumer(record -> - { - received.add(record); - if (received.size() == 100) - throw new WakeupException(); - }); + await("100 records received") + .atMost(Duration.ofSeconds(30)) + .until(() -> receivedRecords.size() == 100); - Set withProgress = new HashSet<>(); - partitions().forEach(tp -> - { - Long oldOffset = oldOffsets.get(tp); - Long newOffset = newOffsets.get(tp); - if (!oldOffset.equals(newOffset)) - { - log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset); - withProgress.add(tp); - } - }); - assertThat(withProgress).isNotEmpty().describedAs("Found no partitions with any offset-progress"); + await("Offsets committed") + .atMost(Duration.ofSeconds(10)) + .untilAsserted(() -> + { + checkSeenOffsetsForProgress(); + compareToCommitedOffsets(newOffsets); + }); - check(newOffsets); + assertThat(endlessConsumer.isRunning()) + .describedAs("Consumer should still be running") + .isTrue(); } @Test - @Order(2) - void commitsNoOffsetsOnError() + void commitsCurrentOffsetsOnDeserializationError() { - send100Messages(counter -> + send100Messages((key, counter) -> counter == 77 ? new Bytes(stringSerializer.serialize(TOPIC, "BOOM!")) - : new Bytes(longSerializer.serialize(TOPIC, counter))); + : serialize(key, counter)); - Map oldOffsets = new HashMap<>(); - doForCurrentOffsets((tp, offset) -> oldOffsets.put(tp, offset -1)); - Map newOffsets = runEndlessConsumer((record) -> {}); + await("99 records received") + .atMost(Duration.ofSeconds(30)) + .until(() -> receivedRecords.size() == 99); + await("Offsets committed") + .atMost(Duration.ofSeconds(10)) + .untilAsserted(() -> + { + // UNSCHÖN: + // Funktioniert nur, weil nach der Nachrichten, die den + // Deserialisierungs-Fehler auslöst noch valide Nachrichten + // gelesen werden. + // GRUND: + // Der MessageHandler sieht den Offset der Fehlerhaften + // Nachricht nicht! + checkSeenOffsetsForProgress(); + compareToCommitedOffsets(newOffsets); + }); + + assertThat(endlessConsumer.isRunning()) + .describedAs("Consumer should still be running") + .isTrue(); + } + + @Test + void commitsOffsetOnProgramLogicErrorFoo() + { + recordHandler.testHandler = (record) -> + { + if (Integer.parseInt(record.value().message)%10 ==0) + throw new RuntimeException("BOOM: " + record.value().message + "%10 == 0"); + }; + + send100Messages((key, counter) -> serialize(key, counter)); + + await("80 records received") + .atMost(Duration.ofSeconds(30)) + .until(() -> receivedRecords.size() == 100); + + await("Offsets committed") + .atMost(Duration.ofSeconds(10)) + .untilAsserted(() -> + { + checkSeenOffsetsForProgress(); + compareToCommitedOffsets(newOffsets); + }); + + assertThat(endlessConsumer.isRunning()) + .describedAs("Consumer should still be running") + .isTrue(); + } + + + /** Helper methods for the verification of expectations */ + + void compareToCommitedOffsets(Map offsetsToCheck) + { + doForCurrentOffsets((tp, offset) -> + { + Long expected = offsetsToCheck.get(tp) + 1; + log.debug("TEST: Comparing the expected offset of {} for {} to {}", expected, tp, offset); + assertThat(offset) + .describedAs("Committed offset corresponds to the offset of the consumer") + .isEqualTo(expected); + }); + } + + void checkSeenOffsetsForProgress() + { + // Be sure, that some messages were consumed...! Set withProgress = new HashSet<>(); partitions().forEach(tp -> { @@ -116,17 +189,37 @@ class ApplicationTests Long newOffset = newOffsets.get(tp); if (!oldOffset.equals(newOffset)) { - log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset); + log.debug("TEST: Progress for {}: {} -> {}", tp, oldOffset, newOffset); withProgress.add(tp); } }); - assertThat(withProgress).isNotEmpty().describedAs("Found no partitions with any offset-progress"); + log.debug("TEST: Offsets with progress: {}", withProgress); + assertThat(withProgress) + .describedAs("Some offsets must have changed, compared to the old offset-positions") + .isNotEmpty(); + } + + + /** Helper methods for setting up and running the tests */ - check(oldOffsets); + void doForCurrentOffsets(BiConsumer consumer) + { + offsetConsumer.assign(partitions()); + partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp))); + offsetConsumer.unsubscribe(); } + List partitions() + { + return + IntStream + .range(0, PARTITIONS) + .mapToObj(partition -> new TopicPartition(TOPIC, partition)) + .collect(Collectors.toList()); + } - void send100Messages(Function messageGenerator) + + void send100Messages(BiFunction messageGenerator) { long i = 0; @@ -134,7 +227,7 @@ class ApplicationTests { for (int key = 0; key < 10; key++) { - Bytes value = messageGenerator.apply(++i); + Bytes value = messageGenerator.apply(key, ++i); ProducerRecord record = new ProducerRecord<>( @@ -143,12 +236,13 @@ class ApplicationTests Integer.toString(key%2), value); + record.headers().add("__TypeId__", "message".getBytes()); kafkaProducer.send(record, (metadata, e) -> { if (metadata != null) { log.debug( - "{}|{} - {}={}", + "TEST: Sending partition={}, offset={} - {}={}", metadata.partition(), metadata.offset(), record.key(), @@ -157,7 +251,7 @@ class ApplicationTests else { log.warn( - "Exception for {}={}: {}", + "TEST: Exception for {}={}: {}", record.key(), record.value(), e.toString()); @@ -167,69 +261,111 @@ class ApplicationTests } } - Map runEndlessConsumer(Consumer> consumer) + Bytes serialize(Integer key, Long value) { - Map offsets = new HashMap<>(); - doForCurrentOffsets((tp, offset) -> offsets.put(tp, offset -1)); - Consumer> captureOffset = - record -> - offsets.put( - new TopicPartition(record.topic(), record.partition()), - record.offset()); - EndlessConsumer endlessConsumer = - new EndlessConsumer<>( - executor, - properties.getClientId(), - properties.getTopic(), - kafkaConsumer, - captureOffset.andThen(consumer)); - - endlessConsumer.run(); - - return offsets; + ClientMessage message = new ClientMessage(); + message.setClient(key.toString()); + message.setMessage(value.toString()); + return new Bytes(valueSerializer.serialize(TOPIC, message)); } - List partitions() + + @BeforeEach + public void init() { - return - IntStream - .range(0, PARTITIONS) - .mapToObj(partition -> new TopicPartition(TOPIC, partition)) - .collect(Collectors.toList()); + recordHandler.testHandler = (record) -> {}; + + oldOffsets = new HashMap<>(); + newOffsets = new HashMap<>(); + receivedRecords = new HashSet<>(); + + doForCurrentOffsets((tp, offset) -> + { + oldOffsets.put(tp, offset - 1); + newOffsets.put(tp, offset - 1); + }); + + recordHandler.captureOffsets = + record -> + { + receivedRecords.add(record); + log.debug("TEST: Processing record #{}: {}", receivedRecords.size(), record.value()); + newOffsets.put( + new TopicPartition(record.topic(), record.partition()), + record.offset()); + }; + + endlessConsumer.start(); } - void doForCurrentOffsets(BiConsumer consumer) + @AfterEach + public void deinit() { - kafkaConsumer.assign(partitions()); - partitions().forEach(tp -> consumer.accept(tp, kafkaConsumer.position(tp))); - kafkaConsumer.unsubscribe(); + try + { + endlessConsumer.stop(); + } + catch (Exception e) + { + log.info("TEST: Exception while stopping the consumer: {}", e.toString()); + } } - void check(Map offsets) + public static class RecordHandler implements Consumer> { - doForCurrentOffsets((tp, offset) -> + Consumer> captureOffsets; + Consumer> testHandler; + + + @Override + public void accept(ConsumerRecord record) { - Long expected = offsets.get(tp) + 1; - log.debug("Checking, if the offset for {} is {}", tp, expected); - assertThat(offset).isEqualTo(expected); - }); + captureOffsets + .andThen(testHandler) + .accept(record); + } } - @TestConfiguration @Import(ApplicationConfiguration.class) public static class Configuration { + @Primary @Bean - KafkaProducer kafkaProducer(ApplicationProperties properties) + public Consumer> testHandler() + { + return new RecordHandler(); + } + + @Bean + Serializer serializer() + { + return new JsonSerializer<>(); + } + + @Bean + KafkaProducer kafkaProducer(KafkaProperties properties) { Properties props = new Properties(); - props.put("bootstrap.servers", properties.getBootstrapServer()); + props.put("bootstrap.servers", properties.getConsumer().getBootstrapServers()); props.put("linger.ms", 100); props.put("key.serializer", StringSerializer.class.getName()); props.put("value.serializer", BytesSerializer.class.getName()); return new KafkaProducer<>(props); } + + @Bean + KafkaConsumer offsetConsumer(KafkaProperties properties) + { + Properties props = new Properties(); + props.put("bootstrap.servers", properties.getConsumer().getBootstrapServers()); + props.put("client.id", "OFFSET-CONSUMER"); + props.put("group.id", properties.getConsumer().getGroupId()); + props.put("key.deserializer", BytesDeserializer.class.getName()); + props.put("value.deserializer", BytesDeserializer.class.getName()); + + return new KafkaConsumer<>(props); + } } }