From: Kai Moritz Date: Sun, 14 Aug 2022 10:06:01 +0000 (+0200) Subject: Tests aus gemerged springified-consumer--serialization -> deserialization X-Git-Tag: sumup-adder---lvm-2-tage~9^2~7 X-Git-Url: https://juplo.de/gitweb/?a=commitdiff_plain;h=657bf71b6c1c99065f26cccf0c3d2a1f30bc9407;hp=b7a418d6c90c25187d6a00ba769aec895e5b7396;p=demos%2Fkafka%2Ftraining Tests aus gemerged springified-consumer--serialization -> deserialization * Es wurde nur der hinzugefügte Test übernommen. * Der hinzugefügte Test wurde an das von Spring-Kafka abweichende Verhalten bei einem Logik-Fehler angepasst: Kafka führt nicht automatisch Seeks oder einene Commit durch. Da `EndlessConsumer` bei einem Logik-Fehler explizit ein `unsubscribe()` durchführt, wird kein Offset-Commit durchgefürt, so dass die alten Offset-Positionen gültig bleiben. * Der Test wurde entsprechend umbenannt. * `RecordGenerator` wurde um einen weiteren Integer-Set erweitert, über den die Indizes der zu erzeugenden Logik-Fehler gesetzt werden können. * Der hinzugefügte Test wurde auf die überarbeitete Methode zur Erzeugung der Test-Nachrichten umgestellt. * `ApplicationTest` wurde so ergänzt, dass der für den hinzugefügten Test benötigte Logik-Fehler erzeugt wird. --- diff --git a/README.sh b/README.sh index 72f0c60..2a1e5d8 100755 --- a/README.sh +++ b/README.sh @@ -25,22 +25,44 @@ fi echo "Waiting for the Kafka-Cluster to become ready..." docker-compose exec cli cub kafka-ready -b kafka:9092 1 60 > /dev/null 2>&1 || exit 1 docker-compose up setup -docker-compose up -d producer consumer +docker-compose up -d + +while ! [[ $(http 0:8081/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "Waiting for consumer-1..."; sleep 1; done +while ! [[ $(http 0:8082/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "Waiting for consumer-2..."; sleep 1; done +while ! [[ $(http 0:8083/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "Waiting for consumer-3..."; sleep 1; done +while ! [[ $(http 0:8084/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "Waiting for consumer-4..."; sleep 1; done +while ! [[ $(http 0:8085/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "Waiting for consumer-5..."; sleep 1; done + sleep 5 + docker-compose exec -T cli bash << 'EOF' echo "Writing poison pill into topic test..." # tag::poisonpill[] echo 'BOOM!' | kafkacat -P -b kafka:9092 -t test # end::poisonpill[] EOF -while [[ $(http 0:8081/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "Consumer is still running..."; sleep 1; done + +while [[ $(http 0:8081/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "consumer-1 is still running..."; sleep 1; done +while [[ $(http 0:8082/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "consumer-2 is still running..."; sleep 1; done +while [[ $(http 0:8083/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "consumer-3 is still running..."; sleep 1; done +while [[ $(http 0:8084/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "consumer-4 is still running..."; sleep 1; done +while [[ $(http 0:8085/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "consumer-5 is still running..."; sleep 1; done + http -v :8081/actuator/health -echo "Restarting consumer" +echo "Restarting consumer-1" http -v post :8081/start -while ! [[ $(http 0:8081/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "Waiting for consumer..."; sleep 1; done -while [[ $(http 0:8081/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "Consumer is still running..."; sleep 1; done + +echo "Waiting for consumer-1 to come up" +while ! [[ $(http 0:8081/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "Waiting for consumer-1..."; sleep 1; done +http -v :8081/actuator/health + +echo "Waiting for consumer-1 to crash" +while [[ $(http 0:8081/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "consumer-1 is still running..."; sleep 1; done http -v :8081/actuator/health -http -v post :8081/actuator/shutdown + docker-compose stop producer -docker-compose ps -docker-compose logs --tail=100 consumer +docker-compose logs --tail=10 consumer-1 +docker-compose logs --tail=10 consumer-2 +docker-compose logs --tail=10 consumer-3 +docker-compose logs --tail=10 consumer-4 +docker-compose logs --tail=10 consumer-5 diff --git a/docker-compose.yml b/docker-compose.yml index 159f9cb..d36e851 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -37,7 +37,7 @@ services: command: sleep infinity producer: - image: juplo/endless-producer:1.0-SNAPSHOT + image: juplo/endless-long-producer:1.0-SNAPSHOT ports: - 8080:8080 environment: @@ -48,13 +48,47 @@ services: producer.throttle-ms: 200 - consumer: + consumer-1: image: juplo/endless-consumer:1.0-SNAPSHOT ports: - 8081:8080 environment: server.port: 8080 consumer.bootstrap-server: kafka:9092 - consumer.client-id: my-group - consumer.client-id: consumer - consumer.topic: test + consumer.client-id: consumer-1 + + consumer-2: + image: juplo/endless-consumer:1.0-SNAPSHOT + ports: + - 8082:8080 + environment: + server.port: 8080 + consumer.bootstrap-server: kafka:9092 + consumer.client-id: consumer-2 + + consumer-3: + image: juplo/endless-consumer:1.0-SNAPSHOT + ports: + - 8083:8080 + environment: + server.port: 8080 + consumer.bootstrap-server: kafka:9092 + consumer.client-id: consumer-3 + + consumer-4: + image: juplo/endless-consumer:1.0-SNAPSHOT + ports: + - 8084:8080 + environment: + server.port: 8080 + consumer.bootstrap-server: kafka:9092 + consumer.client-id: consumer-4 + + consumer-5: + image: juplo/endless-consumer:1.0-SNAPSHOT + ports: + - 8085:8080 + environment: + server.port: 8080 + consumer.bootstrap-server: kafka:9092 + consumer.client-id: consumer-5 diff --git a/pom.xml b/pom.xml index 0889d23..6fd5d5f 100644 --- a/pom.xml +++ b/pom.xml @@ -16,6 +16,10 @@ 1.0-SNAPSHOT Endless Consumer: a Simple Consumer-Group that reads and prints the topic and counts the received messages for each key by topic + + 11 + + org.springframework.boot @@ -38,10 +42,6 @@ org.apache.kafka kafka-clients - - org.springframework.kafka - spring-kafka - org.projectlombok lombok diff --git a/src/main/java/de/juplo/kafka/ApplicationConfiguration.java b/src/main/java/de/juplo/kafka/ApplicationConfiguration.java index 9fc0c70..766740b 100644 --- a/src/main/java/de/juplo/kafka/ApplicationConfiguration.java +++ b/src/main/java/de/juplo/kafka/ApplicationConfiguration.java @@ -2,11 +2,11 @@ package de.juplo.kafka; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.common.serialization.LongDeserializer; import org.apache.kafka.common.serialization.StringDeserializer; import org.springframework.boot.context.properties.EnableConfigurationProperties; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; -import org.springframework.kafka.support.serializer.JsonDeserializer; import java.util.Properties; import java.util.concurrent.ExecutorService; @@ -19,7 +19,7 @@ import java.util.function.Consumer; public class ApplicationConfiguration { @Bean - public Consumer> consumer() + public Consumer> consumer() { return (record) -> { @@ -28,10 +28,10 @@ public class ApplicationConfiguration } @Bean - public EndlessConsumer endlessConsumer( - KafkaConsumer kafkaConsumer, + public EndlessConsumer endlessConsumer( + KafkaConsumer kafkaConsumer, ExecutorService executor, - Consumer> handler, + Consumer> handler, ApplicationProperties properties) { return @@ -50,7 +50,7 @@ public class ApplicationConfiguration } @Bean(destroyMethod = "close") - public KafkaConsumer kafkaConsumer(ApplicationProperties properties) + public KafkaConsumer kafkaConsumer(ApplicationProperties properties) { Properties props = new Properties(); @@ -61,11 +61,7 @@ public class ApplicationConfiguration props.put("auto.commit.interval.ms", (int)properties.getCommitInterval().toMillis()); props.put("metadata.max.age.ms", "1000"); props.put("key.deserializer", StringDeserializer.class.getName()); - props.put("value.deserializer", JsonDeserializer.class.getName()); - props.put(JsonDeserializer.TYPE_MAPPINGS, - "message:" + ClientMessage.class.getName() + "," + - "greeting:" + Greeting.class.getName()); - props.put(JsonDeserializer.TRUSTED_PACKAGES, "de.juplo.kafka"); + props.put("value.deserializer", LongDeserializer.class.getName()); return new KafkaConsumer<>(props); } diff --git a/src/main/java/de/juplo/kafka/ClientMessage.java b/src/main/java/de/juplo/kafka/ClientMessage.java deleted file mode 100644 index a158907..0000000 --- a/src/main/java/de/juplo/kafka/ClientMessage.java +++ /dev/null @@ -1,22 +0,0 @@ -package de.juplo.kafka; - -import lombok.EqualsAndHashCode; -import lombok.Getter; -import lombok.Setter; -import lombok.ToString; - - -@Getter -@Setter -@EqualsAndHashCode -@ToString -public class ClientMessage extends ValidMessage -{ - String client; - String message; - - ClientMessage() - { - super(Type.CLIENT_MESSAGE); - } -} diff --git a/src/main/java/de/juplo/kafka/Greeting.java b/src/main/java/de/juplo/kafka/Greeting.java deleted file mode 100644 index 4421a50..0000000 --- a/src/main/java/de/juplo/kafka/Greeting.java +++ /dev/null @@ -1,21 +0,0 @@ -package de.juplo.kafka; - -import lombok.*; - -import java.time.LocalDateTime; - - -@Getter -@Setter -@EqualsAndHashCode -@ToString -public class Greeting extends ValidMessage -{ - String name; - LocalDateTime when; - - public Greeting() - { - super(Type.GREETING); - } -} diff --git a/src/main/java/de/juplo/kafka/ValidMessage.java b/src/main/java/de/juplo/kafka/ValidMessage.java deleted file mode 100644 index 217d8f3..0000000 --- a/src/main/java/de/juplo/kafka/ValidMessage.java +++ /dev/null @@ -1,14 +0,0 @@ -package de.juplo.kafka; - -import lombok.Getter; -import lombok.RequiredArgsConstructor; - - -@RequiredArgsConstructor -public abstract class ValidMessage -{ - enum Type { CLIENT_MESSAGE, GREETING } - - @Getter - private final Type type; -} diff --git a/src/test/java/de/juplo/kafka/ApplicationTest.java b/src/test/java/de/juplo/kafka/ApplicationTest.java new file mode 100644 index 0000000..ed93a21 --- /dev/null +++ b/src/test/java/de/juplo/kafka/ApplicationTest.java @@ -0,0 +1,84 @@ +package de.juplo.kafka; + +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.serialization.LongSerializer; +import org.apache.kafka.common.serialization.StringSerializer; +import org.apache.kafka.common.utils.Bytes; +import org.springframework.boot.test.context.TestConfiguration; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Primary; +import org.springframework.test.context.ContextConfiguration; + +import java.util.Set; +import java.util.function.Consumer; + + +@ContextConfiguration(classes = ApplicationTest.Configuration.class) +public class ApplicationTest extends GenericApplicationTest +{ + public ApplicationTest() + { + super( + new RecordGenerator() + { + final StringSerializer stringSerializer = new StringSerializer(); + final LongSerializer longSerializer = new LongSerializer(); + + + @Override + public void generate( + int numberOfMessagesToGenerate, + Set poisonPills, + Set logicErrors, + Consumer> messageSender) + { + int i = 0; + + for (int partition = 0; partition < 10; partition++) + { + for (int key = 0; key < 10; key++) + { + if (++i > numberOfMessagesToGenerate) + return; + + Bytes value = new Bytes(longSerializer.serialize(TOPIC, (long)i)); + if (logicErrors.contains(i)) + { + value = new Bytes(longSerializer.serialize(TOPIC, Long.MIN_VALUE)); + } + if (poisonPills.contains(i)) + { + value = new Bytes(stringSerializer.serialize(TOPIC, "BOOM (Poison-Pill)!")); + } + + ProducerRecord record = + new ProducerRecord<>( + TOPIC, + partition, + new Bytes(stringSerializer.serialize(TOPIC,Integer.toString(partition*10+key%2))), + value); + + messageSender.accept(record); + } + } + } + }); + } + + + @TestConfiguration + public static class Configuration + { + @Primary + @Bean + public Consumer> consumer() + { + return (record) -> + { + if (record.value() == Long.MIN_VALUE) + throw new RuntimeException("BOOM (Logic-Error)!"); + }; + } + } +} diff --git a/src/test/java/de/juplo/kafka/ApplicationTests.java b/src/test/java/de/juplo/kafka/ApplicationTests.java deleted file mode 100644 index b5644b6..0000000 --- a/src/test/java/de/juplo/kafka/ApplicationTests.java +++ /dev/null @@ -1,455 +0,0 @@ -package de.juplo.kafka; - -import lombok.extern.slf4j.Slf4j; -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.apache.kafka.clients.consumer.KafkaConsumer; -import org.apache.kafka.clients.producer.KafkaProducer; -import org.apache.kafka.clients.producer.ProducerRecord; -import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.errors.RecordDeserializationException; -import org.apache.kafka.common.serialization.*; -import org.apache.kafka.common.utils.Bytes; -import org.junit.jupiter.api.*; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer; -import org.springframework.boot.test.context.TestConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Import; -import org.springframework.kafka.support.serializer.JsonSerializer; -import org.springframework.kafka.test.context.EmbeddedKafka; -import org.springframework.test.context.TestPropertySource; -import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; - -import java.time.Duration; -import java.time.LocalDateTime; -import java.util.*; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.function.BiConsumer; -import java.util.function.Consumer; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - -import static de.juplo.kafka.ApplicationTests.PARTITIONS; -import static de.juplo.kafka.ApplicationTests.TOPIC; -import static org.assertj.core.api.Assertions.*; -import static org.awaitility.Awaitility.*; - - -@SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class) -@TestMethodOrder(MethodOrderer.OrderAnnotation.class) -@TestPropertySource( - properties = { - "consumer.bootstrap-server=${spring.embedded.kafka.brokers}", - "consumer.topic=" + TOPIC, - "consumer.commit-interval=1s" }) -@EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS) -@Slf4j -class ApplicationTests -{ - public static final String TOPIC = "FOO"; - public static final int PARTITIONS = 10; - - - StringSerializer stringSerializer = new StringSerializer(); - - @Autowired - Serializer valueSerializer; - @Autowired - KafkaProducer kafkaProducer; - @Autowired - KafkaConsumer kafkaConsumer; - @Autowired - KafkaConsumer offsetConsumer; - @Autowired - ApplicationProperties properties; - @Autowired - ExecutorService executor; - - Consumer> testHandler; - EndlessConsumer endlessConsumer; - Map oldOffsets; - Map newOffsets; - Set> receivedRecords; - - - /** Tests methods */ - - @Test - void commitsCurrentOffsetsOnSuccess() - { - send100Messages((partition, key, counter) -> - { - Bytes value; - String type; - - if (counter%3 != 0) - { - value = serializeClientMessage(key, counter); - type = "message"; - } - else { - value = serializeGreeting(key); - type = "greeting"; - } - - return toRecord(partition, key, value, Optional.of(type)); - }); - - await("100 records received") - .atMost(Duration.ofSeconds(30)) - .pollInterval(Duration.ofSeconds(1)) - .until(() -> receivedRecords.size() >= 100); - - await("Offsets committed") - .atMost(Duration.ofSeconds(10)) - .pollInterval(Duration.ofSeconds(1)) - .untilAsserted(() -> - { - checkSeenOffsetsForProgress(); - compareToCommitedOffsets(newOffsets); - }); - - assertThatExceptionOfType(IllegalStateException.class) - .isThrownBy(() -> endlessConsumer.exitStatus()) - .describedAs("Consumer should still be running"); - } - - @Test - void commitsOffsetOfErrorForReprocessingOnDeserializationErrorInvalidMessage() - { - send100Messages((partition, key, counter) -> - { - Bytes value; - String type; - - if (counter == 77) - { - value = serializeFooMessage(key, counter); - type = null; - } - else - { - if (counter%3 != 0) - { - value = serializeClientMessage(key, counter); - type = "message"; - } - else { - value = serializeGreeting(key); - type = "greeting"; - } - } - - return toRecord(partition, key, value, Optional.ofNullable(type)); - }); - - await("Consumer failed") - .atMost(Duration.ofSeconds(30)) - .pollInterval(Duration.ofSeconds(1)) - .until(() -> !endlessConsumer.running()); - - checkSeenOffsetsForProgress(); - compareToCommitedOffsets(newOffsets); - - endlessConsumer.start(); - await("Consumer failed") - .atMost(Duration.ofSeconds(30)) - .pollInterval(Duration.ofSeconds(1)) - .until(() -> !endlessConsumer.running()); - - checkSeenOffsetsForProgress(); - compareToCommitedOffsets(newOffsets); - assertThat(receivedRecords.size()) - .describedAs("Received not all sent events") - .isLessThan(100); - - assertThatNoException() - .describedAs("Consumer should not be running") - .isThrownBy(() -> endlessConsumer.exitStatus()); - assertThat(endlessConsumer.exitStatus()) - .describedAs("Consumer should have exited abnormally") - .containsInstanceOf(RecordDeserializationException.class); - } - - @Test - void commitsOffsetOfErrorForReprocessingOnDeserializationErrorOnUnknownMessage() - { - send100Messages((partition, key, counter) -> - { - Bytes value; - String type; - - if (counter == 77) - { - value = serializeFooMessage(key, counter); - type = "foo"; - } - else - { - if (counter%3 != 0) - { - value = serializeClientMessage(key, counter); - type = "message"; - } - else { - value = serializeGreeting(key); - type = "greeting"; - } - } - - return toRecord(partition, key, value, Optional.of(type)); - }); - - await("Consumer failed") - .atMost(Duration.ofSeconds(30)) - .pollInterval(Duration.ofSeconds(1)) - .until(() -> !endlessConsumer.running()); - - checkSeenOffsetsForProgress(); - compareToCommitedOffsets(newOffsets); - - endlessConsumer.start(); - await("Consumer failed") - .atMost(Duration.ofSeconds(30)) - .pollInterval(Duration.ofSeconds(1)) - .until(() -> !endlessConsumer.running()); - - checkSeenOffsetsForProgress(); - compareToCommitedOffsets(newOffsets); - assertThat(receivedRecords.size()) - .describedAs("Received not all sent events") - .isLessThan(100); - - assertThatNoException() - .describedAs("Consumer should not be running") - .isThrownBy(() -> endlessConsumer.exitStatus()); - assertThat(endlessConsumer.exitStatus()) - .describedAs("Consumer should have exited abnormally") - .containsInstanceOf(RecordDeserializationException.class); - } - - - /** Helper methods for the verification of expectations */ - - void compareToCommitedOffsets(Map offsetsToCheck) - { - doForCurrentOffsets((tp, offset) -> - { - Long expected = offsetsToCheck.get(tp) + 1; - log.debug("Checking, if the offset for {} is {}", tp, expected); - assertThat(offset) - .describedAs("Committed offset corresponds to the offset of the consumer") - .isEqualTo(expected); - }); - } - - void checkSeenOffsetsForProgress() - { - // Be sure, that some messages were consumed...! - Set withProgress = new HashSet<>(); - partitions().forEach(tp -> - { - Long oldOffset = oldOffsets.get(tp) + 1; - Long newOffset = newOffsets.get(tp) + 1; - if (!oldOffset.equals(newOffset)) - { - log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset); - withProgress.add(tp); - } - }); - assertThat(withProgress) - .describedAs("Some offsets must have changed, compared to the old offset-positions") - .isNotEmpty(); - } - - - /** Helper methods for setting up and running the tests */ - - void seekToEnd() - { - offsetConsumer.assign(partitions()); - offsetConsumer.seekToEnd(partitions()); - partitions().forEach(tp -> - { - // seekToEnd() works lazily: it only takes effect on poll()/position() - Long offset = offsetConsumer.position(tp); - log.info("New position for {}: {}", tp, offset); - }); - // The new positions must be commited! - offsetConsumer.commitSync(); - offsetConsumer.unsubscribe(); - } - - void doForCurrentOffsets(BiConsumer consumer) - { - offsetConsumer.assign(partitions()); - partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp))); - offsetConsumer.unsubscribe(); - } - - List partitions() - { - return - IntStream - .range(0, PARTITIONS) - .mapToObj(partition -> new TopicPartition(TOPIC, partition)) - .collect(Collectors.toList()); - } - - - public interface RecordGenerator - { - public ProducerRecord generate(int partition, String key, int counter); - } - - void send100Messages(RecordGenerator recordGenerator) - { - int i = 0; - - for (int partition = 0; partition < 10; partition++) - { - for (int key = 0; key < 10; key++) - { - ProducerRecord record = - recordGenerator.generate(partition, Integer.toString(partition*10+key%2), ++i); - - kafkaProducer.send(record, (metadata, e) -> - { - if (metadata != null) - { - log.debug( - "{}|{} - {}={}", - metadata.partition(), - metadata.offset(), - record.key(), - record.value()); - } - else - { - log.warn( - "Exception for {}={}: {}", - record.key(), - record.value(), - e.toString()); - } - }); - } - } - } - - ProducerRecord toRecord(int partition, String key, Bytes value, Optional type) - { - ProducerRecord record = - new ProducerRecord<>(TOPIC, partition, key, value); - - type.ifPresent(typeId -> record.headers().add("__TypeId__", typeId.getBytes())); - return record; - } - - Bytes serializeClientMessage(String key, int value) - { - TestClientMessage message = new TestClientMessage(key, Integer.toString(value)); - return new Bytes(valueSerializer.serialize(TOPIC, message)); - } - - Bytes serializeGreeting(String key) - { - TestGreeting message = new TestGreeting(key, LocalDateTime.now()); - return new Bytes(valueSerializer.serialize(TOPIC, message)); - } - - Bytes serializeFooMessage(String key, int value) - { - TestFooMessage message = new TestFooMessage(key, (long)value); - return new Bytes(valueSerializer.serialize(TOPIC, message)); - } - - @BeforeEach - public void init() - { - testHandler = record -> {} ; - - seekToEnd(); - - oldOffsets = new HashMap<>(); - newOffsets = new HashMap<>(); - receivedRecords = new HashSet<>(); - - doForCurrentOffsets((tp, offset) -> - { - oldOffsets.put(tp, offset - 1); - newOffsets.put(tp, offset - 1); - }); - - Consumer> captureOffsetAndExecuteTestHandler = - record -> - { - newOffsets.put( - new TopicPartition(record.topic(), record.partition()), - record.offset()); - receivedRecords.add(record); - testHandler.accept(record); - }; - - endlessConsumer = - new EndlessConsumer<>( - executor, - properties.getClientId(), - properties.getTopic(), - kafkaConsumer, - captureOffsetAndExecuteTestHandler); - - endlessConsumer.start(); - } - - @AfterEach - public void deinit() - { - try - { - endlessConsumer.stop(); - } - catch (Exception e) - { - log.info("Exception while stopping the consumer: {}", e.toString()); - } - } - - - @TestConfiguration - @Import(ApplicationConfiguration.class) - public static class Configuration - { - @Bean - Serializer serializer() - { - return new JsonSerializer<>(); - } - - @Bean - KafkaProducer kafkaProducer(ApplicationProperties properties) - { - Properties props = new Properties(); - props.put("bootstrap.servers", properties.getBootstrapServer()); - props.put("linger.ms", 100); - props.put("key.serializer", StringSerializer.class.getName()); - props.put("value.serializer", BytesSerializer.class.getName()); - - return new KafkaProducer<>(props); - } - - @Bean - KafkaConsumer offsetConsumer(ApplicationProperties properties) - { - Properties props = new Properties(); - props.put("bootstrap.servers", properties.getBootstrapServer()); - props.put("client.id", "OFFSET-CONSUMER"); - props.put("group.id", properties.getGroupId()); - props.put("key.deserializer", BytesDeserializer.class.getName()); - props.put("value.deserializer", BytesDeserializer.class.getName()); - - return new KafkaConsumer<>(props); - } - } -} diff --git a/src/test/java/de/juplo/kafka/GenericApplicationTest.java b/src/test/java/de/juplo/kafka/GenericApplicationTest.java new file mode 100644 index 0000000..a6d6aa1 --- /dev/null +++ b/src/test/java/de/juplo/kafka/GenericApplicationTest.java @@ -0,0 +1,344 @@ +package de.juplo.kafka; + +import lombok.extern.slf4j.Slf4j; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.clients.producer.KafkaProducer; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.errors.RecordDeserializationException; +import org.apache.kafka.common.serialization.*; +import org.apache.kafka.common.utils.Bytes; +import org.junit.jupiter.api.*; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer; +import org.springframework.boot.test.context.TestConfiguration; +import org.springframework.context.annotation.Import; +import org.springframework.kafka.test.context.EmbeddedKafka; +import org.springframework.test.context.TestPropertySource; +import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; + +import java.time.Duration; +import java.util.*; +import java.util.concurrent.ExecutorService; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static de.juplo.kafka.GenericApplicationTest.PARTITIONS; +import static de.juplo.kafka.GenericApplicationTest.TOPIC; +import static org.assertj.core.api.Assertions.*; +import static org.awaitility.Awaitility.*; + + +@SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class) +@TestPropertySource( + properties = { + "consumer.bootstrap-server=${spring.embedded.kafka.brokers}", + "consumer.topic=" + TOPIC, + "consumer.commit-interval=1s" }) +@EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS) +@Slf4j +abstract class GenericApplicationTest +{ + public static final String TOPIC = "FOO"; + public static final int PARTITIONS = 10; + + + @Autowired + KafkaConsumer kafkaConsumer; + @Autowired + Consumer> consumer; + @Autowired + ApplicationProperties properties; + @Autowired + ExecutorService executor; + + KafkaProducer testRecordProducer; + KafkaConsumer offsetConsumer; + EndlessConsumer endlessConsumer; + Map oldOffsets; + Map newOffsets; + Set> receivedRecords; + + + final RecordGenerator recordGenerator; + final Consumer> messageSender; + + public GenericApplicationTest(RecordGenerator recordGenerator) + { + this.recordGenerator = recordGenerator; + this.messageSender = (record) -> sendMessage(record); + } + + + /** Tests methods */ + + @Test + void commitsCurrentOffsetsOnSuccess() + { + recordGenerator.generate(100, Set.of(), Set.of(), messageSender); + + await("100 records received") + .atMost(Duration.ofSeconds(30)) + .pollInterval(Duration.ofSeconds(1)) + .until(() -> receivedRecords.size() >= 100); + + await("Offsets committed") + .atMost(Duration.ofSeconds(10)) + .pollInterval(Duration.ofSeconds(1)) + .untilAsserted(() -> + { + checkSeenOffsetsForProgress(); + compareToCommitedOffsets(newOffsets); + }); + + assertThatExceptionOfType(IllegalStateException.class) + .isThrownBy(() -> endlessConsumer.exitStatus()) + .describedAs("Consumer should still be running"); + } + + @Test + void commitsOffsetOfErrorForReprocessingOnDeserializationError() + { + recordGenerator.generate(100, Set.of(77), Set.of(), messageSender); + + await("Consumer failed") + .atMost(Duration.ofSeconds(30)) + .pollInterval(Duration.ofSeconds(1)) + .until(() -> !endlessConsumer.running()); + + checkSeenOffsetsForProgress(); + compareToCommitedOffsets(newOffsets); + + endlessConsumer.start(); + await("Consumer failed") + .atMost(Duration.ofSeconds(30)) + .pollInterval(Duration.ofSeconds(1)) + .until(() -> !endlessConsumer.running()); + + checkSeenOffsetsForProgress(); + compareToCommitedOffsets(newOffsets); + assertThat(receivedRecords.size()) + .describedAs("Received not all sent events") + .isLessThan(100); + + assertThatNoException() + .describedAs("Consumer should not be running") + .isThrownBy(() -> endlessConsumer.exitStatus()); + assertThat(endlessConsumer.exitStatus()) + .describedAs("Consumer should have exited abnormally") + .containsInstanceOf(RecordDeserializationException.class); + } + + @Test + void doesNotCommitOffsetsOnLogicError() + { + recordGenerator.generate(100, Set.of(), Set.of(77), messageSender); + + await("Consumer failed") + .atMost(Duration.ofSeconds(30)) + .pollInterval(Duration.ofSeconds(1)) + .until(() -> !endlessConsumer.running()); + + checkSeenOffsetsForProgress(); + compareToCommitedOffsets(oldOffsets); + + endlessConsumer.start(); + await("Consumer failed") + .atMost(Duration.ofSeconds(30)) + .pollInterval(Duration.ofSeconds(1)) + .until(() -> !endlessConsumer.running()); + + checkSeenOffsetsForProgress(); + compareToCommitedOffsets(oldOffsets); + assertThat(receivedRecords.size()) + .describedAs("Received not all sent events") + .isLessThan(100); + + assertThatNoException() + .describedAs("Consumer should not be running") + .isThrownBy(() -> endlessConsumer.exitStatus()); + assertThat(endlessConsumer.exitStatus()) + .describedAs("Consumer should have exited abnormally") + .containsInstanceOf(RuntimeException.class); + } + + + /** Helper methods for the verification of expectations */ + + void compareToCommitedOffsets(Map offsetsToCheck) + { + doForCurrentOffsets((tp, offset) -> + { + Long expected = offsetsToCheck.get(tp) + 1; + log.debug("Checking, if the offset for {} is {}", tp, expected); + assertThat(offset) + .describedAs("Committed offset corresponds to the offset of the consumer") + .isEqualTo(expected); + }); + } + + void checkSeenOffsetsForProgress() + { + // Be sure, that some messages were consumed...! + Set withProgress = new HashSet<>(); + partitions().forEach(tp -> + { + Long oldOffset = oldOffsets.get(tp) + 1; + Long newOffset = newOffsets.get(tp) + 1; + if (!oldOffset.equals(newOffset)) + { + log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset); + withProgress.add(tp); + } + }); + assertThat(withProgress) + .describedAs("Some offsets must have changed, compared to the old offset-positions") + .isNotEmpty(); + } + + + /** Helper methods for setting up and running the tests */ + + void seekToEnd() + { + offsetConsumer.assign(partitions()); + offsetConsumer.seekToEnd(partitions()); + partitions().forEach(tp -> + { + // seekToEnd() works lazily: it only takes effect on poll()/position() + Long offset = offsetConsumer.position(tp); + log.info("New position for {}: {}", tp, offset); + }); + // The new positions must be commited! + offsetConsumer.commitSync(); + offsetConsumer.unsubscribe(); + } + + void doForCurrentOffsets(BiConsumer consumer) + { + offsetConsumer.assign(partitions()); + partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp))); + offsetConsumer.unsubscribe(); + } + + List partitions() + { + return + IntStream + .range(0, PARTITIONS) + .mapToObj(partition -> new TopicPartition(TOPIC, partition)) + .collect(Collectors.toList()); + } + + + public interface RecordGenerator + { + void generate( + int numberOfMessagesToGenerate, + Set poisonPills, + Set logicErrors, + Consumer> messageSender); + } + + void sendMessage(ProducerRecord record) + { + testRecordProducer.send(record, (metadata, e) -> + { + if (metadata != null) + { + log.debug( + "{}|{} - {}={}", + metadata.partition(), + metadata.offset(), + record.key(), + record.value()); + } + else + { + log.warn( + "Exception for {}={}: {}", + record.key(), + record.value(), + e.toString()); + } + }); + } + + + @BeforeEach + public void init() + { + Properties props; + props = new Properties(); + props.put("bootstrap.servers", properties.getBootstrapServer()); + props.put("linger.ms", 100); + props.put("key.serializer", BytesSerializer.class.getName()); + props.put("value.serializer", BytesSerializer.class.getName()); + testRecordProducer = new KafkaProducer<>(props); + + props = new Properties(); + props.put("bootstrap.servers", properties.getBootstrapServer()); + props.put("client.id", "OFFSET-CONSUMER"); + props.put("group.id", properties.getGroupId()); + props.put("key.deserializer", BytesDeserializer.class.getName()); + props.put("value.deserializer", BytesDeserializer.class.getName()); + offsetConsumer = new KafkaConsumer<>(props); + + seekToEnd(); + + oldOffsets = new HashMap<>(); + newOffsets = new HashMap<>(); + receivedRecords = new HashSet<>(); + + doForCurrentOffsets((tp, offset) -> + { + oldOffsets.put(tp, offset - 1); + newOffsets.put(tp, offset - 1); + }); + + Consumer> captureOffsetAndExecuteTestHandler = + record -> + { + newOffsets.put( + new TopicPartition(record.topic(), record.partition()), + record.offset()); + receivedRecords.add(record); + consumer.accept(record); + }; + + endlessConsumer = + new EndlessConsumer<>( + executor, + properties.getClientId(), + properties.getTopic(), + kafkaConsumer, + captureOffsetAndExecuteTestHandler); + + endlessConsumer.start(); + } + + @AfterEach + public void deinit() + { + try + { + endlessConsumer.stop(); + testRecordProducer.close(); + offsetConsumer.close(); + } + catch (Exception e) + { + log.info("Exception while stopping the consumer: {}", e.toString()); + } + } + + + @TestConfiguration + @Import(ApplicationConfiguration.class) + public static class Configuration + { + } +} diff --git a/src/test/java/de/juplo/kafka/TestClientMessage.java b/src/test/java/de/juplo/kafka/TestClientMessage.java deleted file mode 100644 index 0072121..0000000 --- a/src/test/java/de/juplo/kafka/TestClientMessage.java +++ /dev/null @@ -1,12 +0,0 @@ -package de.juplo.kafka; - - -import lombok.Value; - - -@Value -public class TestClientMessage -{ - private final String client; - private final String message; -} diff --git a/src/test/java/de/juplo/kafka/TestFooMessage.java b/src/test/java/de/juplo/kafka/TestFooMessage.java deleted file mode 100644 index d8f4c65..0000000 --- a/src/test/java/de/juplo/kafka/TestFooMessage.java +++ /dev/null @@ -1,12 +0,0 @@ -package de.juplo.kafka; - - -import lombok.Value; - - -@Value -public class TestFooMessage -{ - private final String client; - private final Long timestamp; -} diff --git a/src/test/java/de/juplo/kafka/TestGreeting.java b/src/test/java/de/juplo/kafka/TestGreeting.java deleted file mode 100644 index 446e877..0000000 --- a/src/test/java/de/juplo/kafka/TestGreeting.java +++ /dev/null @@ -1,13 +0,0 @@ -package de.juplo.kafka; - -import lombok.Value; - -import java.time.LocalDateTime; - - -@Value -public class TestGreeting -{ - private final String name; - private final LocalDateTime when; -}