From: Kai Moritz Date: Sun, 14 Aug 2022 18:52:49 +0000 (+0200) Subject: Verbesserte Tests und Korrekturen gemerged: sumup-adder -> stored-offsets X-Git-Url: http://juplo.de/gitweb/?a=commitdiff_plain;h=refs%2Fheads%2Fstored-offsets;p=demos%2Fkafka%2Ftraining Verbesserte Tests und Korrekturen gemerged: sumup-adder -> stored-offsets --- f1eec82fb197f9fc7906eb9a90d75468e9e4356f diff --cc README.sh index b7bce99,2845ab1..133af42 --- a/README.sh +++ b/README.sh @@@ -16,7 -16,8 +16,8 @@@ if [ "$1" = "build" ]] then - mvn install || exit - docker-compose rm -svf adder ++ docker-compose rm -svf peter beate + mvn clean install || exit else echo "Using image existing images:" docker image ls $IMAGE diff --cc pom.xml index 701704d,ecb559a..fa78c70 --- a/pom.xml +++ b/pom.xml @@@ -12,10 -12,15 +12,14 @@@ de.juplo.kafka - sumup-adder + endless-consumer 1.0-SNAPSHOT - SumUp Adder - Calculates the sum for the send messages + Endless Consumer: a Simple Consumer-Group that reads and prints the topic and counts the received messages for each key by topic + + 11 + + org.springframework.boot diff --cc src/main/java/de/juplo/kafka/ApplicationRebalanceListener.java index 59b420a,542af2d..444b7b7 --- a/src/main/java/de/juplo/kafka/ApplicationRebalanceListener.java +++ b/src/main/java/de/juplo/kafka/ApplicationRebalanceListener.java @@@ -22,9 -22,10 +22,10 @@@ public class ApplicationRebalanceListen private final String topic; private final Clock clock; private final Duration commitInterval; - private final Consumer consumer; + private final Consumer consumer; private Instant lastCommit = Instant.EPOCH; + private boolean commitsEnabled = true; @Override public void onPartitionsAssigned(Collection partitions) diff --cc src/test/java/de/juplo/kafka/ApplicationTests.java index 1f18e59,4ddf8a9..5166227 --- a/src/test/java/de/juplo/kafka/ApplicationTests.java +++ b/src/test/java/de/juplo/kafka/ApplicationTests.java @@@ -1,357 -1,84 +1,92 @@@ package de.juplo.kafka; - import lombok.extern.slf4j.Slf4j; +import org.apache.kafka.clients.consumer.ConsumerRecord; - import org.apache.kafka.clients.consumer.KafkaConsumer; - import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.ProducerRecord; - import org.apache.kafka.common.TopicPartition; - import org.apache.kafka.common.errors.RecordDeserializationException; - import org.apache.kafka.common.serialization.*; ++import org.apache.kafka.common.serialization.LongSerializer; + import org.apache.kafka.common.serialization.StringSerializer; import org.apache.kafka.common.utils.Bytes; - import org.junit.jupiter.api.*; - import org.springframework.beans.factory.annotation.Autowired; - import org.springframework.boot.autoconfigure.EnableAutoConfiguration; - import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo; - import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer; +import org.springframework.boot.test.context.TestConfiguration; +import org.springframework.context.annotation.Bean; - import org.springframework.context.annotation.Import; - import org.springframework.kafka.test.context.EmbeddedKafka; - import org.springframework.test.context.TestPropertySource; - import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; ++import org.springframework.context.annotation.Primary; ++import org.springframework.test.context.ContextConfiguration; - import java.time.Duration; - import java.util.*; - import java.util.concurrent.ExecutionException; - import java.util.concurrent.ExecutorService; - import java.util.function.BiConsumer; - import java.util.stream.Collectors; - import java.util.stream.IntStream; + import java.util.function.Consumer; -import java.util.stream.IntStream; - import static de.juplo.kafka.ApplicationTests.PARTITIONS; - import static de.juplo.kafka.ApplicationTests.TOPIC; - import static org.assertj.core.api.Assertions.*; - import static org.awaitility.Awaitility.*; - - @SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class) - @TestMethodOrder(MethodOrderer.OrderAnnotation.class) - @TestPropertySource( - properties = { - "consumer.bootstrap-server=${spring.embedded.kafka.brokers}", - "consumer.topic=" + TOPIC, - "consumer.commit-interval=1s", - "spring.mongodb.embedded.version=4.4.13" }) - @EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS) - @EnableAutoConfiguration - @AutoConfigureDataMongo - @Slf4j - class ApplicationTests -public class ApplicationTests extends GenericApplicationTests ++@ContextConfiguration(classes = ApplicationTests.Configuration.class) ++public class ApplicationTests extends GenericApplicationTests { - public static final String TOPIC = "FOO"; - public static final int PARTITIONS = 10; - - - StringSerializer stringSerializer = new StringSerializer(); - - @Autowired - Serializer valueSerializer; - @Autowired - KafkaProducer kafkaProducer; - @Autowired - KafkaConsumer kafkaConsumer; - @Autowired - KafkaConsumer offsetConsumer; - @Autowired - ApplicationProperties properties; - @Autowired - ExecutorService executor; - @Autowired - StateRepository stateRepository; - @Autowired - ApplicationRebalanceListener rebalanceListener; - @Autowired - ApplicationRecordHandler recordHandler; - - EndlessConsumer endlessConsumer; - Map oldOffsets; - Map newOffsets; - Set> receivedRecords; - - - /** Tests methods */ - - @Test - void commitsCurrentOffsetsOnSuccess() throws ExecutionException, InterruptedException - { - send100Messages((partition, key, counter) -> - { - Bytes value = new Bytes(valueSerializer.serialize(TOPIC, counter)); - return new ProducerRecord<>(TOPIC, partition, key, value); - }); - - await("100 records received") - .atMost(Duration.ofSeconds(30)) - .pollInterval(Duration.ofSeconds(1)) - .until(() -> receivedRecords.size() >= 100); - - await("Offsets committed") - .atMost(Duration.ofSeconds(10)) - .pollInterval(Duration.ofSeconds(1)) - .untilAsserted(() -> - { - checkSeenOffsetsForProgress(); - compareToCommitedOffsets(newOffsets); - }); - - assertThatExceptionOfType(IllegalStateException.class) - .isThrownBy(() -> endlessConsumer.exitStatus()) - .describedAs("Consumer should still be running"); - } - - @Test - void commitsOffsetOfErrorForReprocessingOnDeserializationError() - { - send100Messages((partition, key, counter) -> - { - Bytes value = counter == 77 - ? new Bytes(stringSerializer.serialize(TOPIC, "BOOM!")) - : new Bytes(valueSerializer.serialize(TOPIC, counter)); - return new ProducerRecord<>(TOPIC, partition, key, value); - }); - - await("Consumer failed") - .atMost(Duration.ofSeconds(30)) - .pollInterval(Duration.ofSeconds(1)) - .until(() -> !endlessConsumer.running()); - - checkSeenOffsetsForProgress(); - compareToCommitedOffsets(newOffsets); - - endlessConsumer.start(); - await("Consumer failed") - .atMost(Duration.ofSeconds(30)) - .pollInterval(Duration.ofSeconds(1)) - .until(() -> !endlessConsumer.running()); - - checkSeenOffsetsForProgress(); - compareToCommitedOffsets(newOffsets); - assertThat(receivedRecords.size()) - .describedAs("Received not all sent events") - .isLessThan(100); - - assertThatNoException() - .describedAs("Consumer should not be running") - .isThrownBy(() -> endlessConsumer.exitStatus()); - assertThat(endlessConsumer.exitStatus()) - .describedAs("Consumer should have exited abnormally") - .containsInstanceOf(RecordDeserializationException.class); - } - - - /** Helper methods for the verification of expectations */ - - void compareToCommitedOffsets(Map offsetsToCheck) - { - doForCurrentOffsets((tp, offset) -> - { - Long expected = offsetsToCheck.get(tp) + 1; - log.debug("Checking, if the offset for {} is {}", tp, expected); - assertThat(offset) - .describedAs("Committed offset corresponds to the offset of the consumer") - .isEqualTo(expected); - }); - } - - void checkSeenOffsetsForProgress() - { - // Be sure, that some messages were consumed...! - Set withProgress = new HashSet<>(); - partitions().forEach(tp -> - { - Long oldOffset = oldOffsets.get(tp) + 1; - Long newOffset = newOffsets.get(tp) + 1; - if (!oldOffset.equals(newOffset)) - { - log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset); - withProgress.add(tp); - } - }); - assertThat(withProgress) - .describedAs("Some offsets must have changed, compared to the old offset-positions") - .isNotEmpty(); - } - - - /** Helper methods for setting up and running the tests */ - - void seekToEnd() - { - offsetConsumer.assign(partitions()); - partitions().forEach(tp -> - { - Long offset = offsetConsumer.position(tp); - log.info("New position for {}: {}", tp, offset); - Integer partition = tp.partition(); - StateDocument document = - stateRepository - .findById(partition.toString()) - .orElse(new StateDocument(partition)); - document.offset = offset; - stateRepository.save(document); - }); - offsetConsumer.unsubscribe(); - } - - void doForCurrentOffsets(BiConsumer consumer) - { - partitions().forEach(tp -> - { - String partition = Integer.toString(tp.partition()); - Optional offset = stateRepository.findById(partition).map(document -> document.offset); - consumer.accept(tp, offset.orElse(0l)); - }); - } - - List partitions() - { - return - IntStream - .range(0, PARTITIONS) - .mapToObj(partition -> new TopicPartition(TOPIC, partition)) - .collect(Collectors.toList()); - } - - - public interface RecordGenerator - { - public ProducerRecord generate(int partition, String key, long counter); - } - - void send100Messages(RecordGenerator recordGenerator) - { - long i = 0; - - for (int partition = 0; partition < 10; partition++) - { - for (int key = 0; key < 10; key++) - { - ProducerRecord record = - recordGenerator.generate(partition, Integer.toString(partition*10+key%2), ++i); - - kafkaProducer.send(record, (metadata, e) -> - { - if (metadata != null) - { - log.debug( - "{}|{} - {}={}", - metadata.partition(), - metadata.offset(), - record.key(), - record.value()); - } - else - { - log.warn( - "Exception for {}={}: {}", - record.key(), - record.value(), - e.toString()); - } - }); - } - } - } - - - @BeforeEach - public void init() - { - seekToEnd(); - - oldOffsets = new HashMap<>(); - newOffsets = new HashMap<>(); - receivedRecords = new HashSet<>(); - - doForCurrentOffsets((tp, offset) -> - { - oldOffsets.put(tp, offset - 1); - newOffsets.put(tp, offset - 1); - }); - - TestRecordHandler captureOffsetAndExecuteTestHandler = - new TestRecordHandler(recordHandler) { - @Override - public void onNewRecord(ConsumerRecord record) - { - newOffsets.put( - new TopicPartition(record.topic(), record.partition()), - record.offset()); - receivedRecords.add(record); - } - }; - - endlessConsumer = - new EndlessConsumer<>( - executor, - properties.getClientId(), - properties.getTopic(), - kafkaConsumer, - rebalanceListener, - captureOffsetAndExecuteTestHandler); - - endlessConsumer.start(); - } - - @AfterEach - public void deinit() - { - try - { - endlessConsumer.stop(); - } - catch (Exception e) - { - log.info("Exception while stopping the consumer: {}", e.toString()); - } - } - - - @TestConfiguration - @Import(ApplicationConfiguration.class) - public static class Configuration - { - @Bean - Serializer serializer() - { - return new LongSerializer(); - } - - @Bean - KafkaProducer kafkaProducer(ApplicationProperties properties) - { - Properties props = new Properties(); - props.put("bootstrap.servers", properties.getBootstrapServer()); - props.put("linger.ms", 100); - props.put("key.serializer", StringSerializer.class.getName()); - props.put("value.serializer", BytesSerializer.class.getName()); - - return new KafkaProducer<>(props); - } - - @Bean - KafkaConsumer offsetConsumer(ApplicationProperties properties) - { - Properties props = new Properties(); - props.put("bootstrap.servers", properties.getBootstrapServer()); - props.put("client.id", "OFFSET-CONSUMER"); - props.put("enable.auto.commit", false); - props.put("auto.offset.reset", "latest"); - props.put("key.deserializer", BytesDeserializer.class.getName()); - props.put("value.deserializer", BytesDeserializer.class.getName()); - - return new KafkaConsumer<>(props); - } - } + public ApplicationTests() + { + super( + new RecordGenerator() + { - final int[] numbers = { 1, 7, 3, 2, 33, 6, 11 }; - final String[] dieWilden13 = - IntStream - .range(1,14) - .mapToObj(i -> "seeräuber-" + i) - .toArray(i -> new String[i]); + final StringSerializer stringSerializer = new StringSerializer(); - final Bytes startMessage = new Bytes(stringSerializer.serialize(TOPIC, "START")); - final Bytes endMessage = new Bytes(stringSerializer.serialize(TOPIC, "END")); - - int counter = 0; ++ final LongSerializer longSerializer = new LongSerializer(); + + + @Override + public int generate( + boolean poisonPills, + boolean logicErrors, + Consumer> messageSender) + { - counter = 0; ++ int i = 0; + - for (int i = 0; i < 33; i++) ++ for (int partition = 0; partition < 10; partition++) + { - String seeräuber = dieWilden13[i%13]; - int number = numbers[i%7]; - - Bytes key = new Bytes(stringSerializer.serialize(TOPIC, seeräuber)); - - send(key, startMessage, logicErrors, messageSender); - for (int message = 1; message <= number; message++) ++ for (int key = 0; key < 10; key++) + { - Bytes value = new Bytes(stringSerializer.serialize(TOPIC, Integer.toString(message))); - send(key, value, logicErrors, messageSender); - } - send(key, endMessage, logicErrors, messageSender); - } ++ i++; + - return counter; - } ++ Bytes value = new Bytes(longSerializer.serialize(TOPIC, (long)i)); ++ if (i == 77) ++ { ++ if (logicErrors) ++ { ++ value = new Bytes(longSerializer.serialize(TOPIC, Long.MIN_VALUE)); ++ } ++ if (poisonPills) ++ { ++ value = new Bytes(stringSerializer.serialize(TOPIC, "BOOM (Poison-Pill)!")); ++ } ++ } + - void send( - Bytes key, - Bytes value, - boolean logicErrors, - Consumer> messageSender) - { - counter++; ++ ProducerRecord record = ++ new ProducerRecord<>( ++ TOPIC, ++ partition, ++ new Bytes(stringSerializer.serialize(TOPIC,Integer.toString(partition*10+key%2))), ++ value); + - if (counter == 77) - { - if (logicErrors) - { - value = value.equals(startMessage) ? endMessage : startMessage; ++ messageSender.accept(record); + } + } + - messageSender.accept(new ProducerRecord<>(TOPIC, key, value)); - } - - @Override - public boolean canGeneratePoisonPill() - { - return false; ++ return i; + } + }); + } ++ ++ ++ @TestConfiguration ++ public static class Configuration ++ { ++ @Primary ++ @Bean ++ public ApplicationRecordHandler recordHandler() ++ { ++ ApplicationRecordHandler recordHandler = new ApplicationRecordHandler(); ++ return new ApplicationRecordHandler() ++ { ++ @Override ++ public void accept(ConsumerRecord record) ++ { ++ if (record.value() == Long.MIN_VALUE) ++ throw new RuntimeException("BOOM (Logic-Error)!"); ++ super.accept(record); ++ } ++ }; ++ } ++ } } diff --cc src/test/java/de/juplo/kafka/GenericApplicationTests.java index 0000000,9a6f812..fa3d911 mode 000000,100644..100644 --- a/src/test/java/de/juplo/kafka/GenericApplicationTests.java +++ b/src/test/java/de/juplo/kafka/GenericApplicationTests.java @@@ -1,0 -1,390 +1,390 @@@ + package de.juplo.kafka; + + import lombok.extern.slf4j.Slf4j; + import org.apache.kafka.clients.consumer.ConsumerRecord; + import org.apache.kafka.clients.consumer.KafkaConsumer; + import org.apache.kafka.clients.producer.KafkaProducer; + import org.apache.kafka.clients.producer.ProducerRecord; + import org.apache.kafka.common.TopicPartition; + import org.apache.kafka.common.errors.RecordDeserializationException; + import org.apache.kafka.common.serialization.*; + import org.apache.kafka.common.utils.Bytes; + import org.junit.jupiter.api.*; + import org.springframework.beans.factory.annotation.Autowired; + import org.springframework.boot.autoconfigure.EnableAutoConfiguration; + import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo; + import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer; + import org.springframework.boot.test.context.TestConfiguration; + import org.springframework.context.annotation.Import; + import org.springframework.kafka.test.context.EmbeddedKafka; + import org.springframework.test.context.TestPropertySource; + import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; + + import java.time.Duration; + import java.util.*; + import java.util.concurrent.ExecutorService; + import java.util.function.BiConsumer; + import java.util.function.Consumer; + import java.util.stream.Collectors; + import java.util.stream.IntStream; + + import static de.juplo.kafka.GenericApplicationTests.PARTITIONS; + import static de.juplo.kafka.GenericApplicationTests.TOPIC; + import static org.assertj.core.api.Assertions.*; + import static org.awaitility.Awaitility.*; + + + @SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class) + @TestPropertySource( + properties = { - "sumup.adder.bootstrap-server=${spring.embedded.kafka.brokers}", - "sumup.adder.topic=" + TOPIC, - "sumup.adder.commit-interval=1s", ++ "consumer.bootstrap-server=${spring.embedded.kafka.brokers}", ++ "consumer.topic=" + TOPIC, ++ "consumer.commit-interval=1s", + "spring.mongodb.embedded.version=4.4.13" }) + @EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS) + @EnableAutoConfiguration + @AutoConfigureDataMongo + @Slf4j + abstract class GenericApplicationTests + { + public static final String TOPIC = "FOO"; + public static final int PARTITIONS = 10; + + + @Autowired + KafkaConsumer kafkaConsumer; + @Autowired + Consumer> consumer; + @Autowired + ApplicationProperties properties; + @Autowired + ExecutorService executor; + @Autowired + StateRepository stateRepository; + @Autowired + PollIntervalAwareConsumerRebalanceListener rebalanceListener; + @Autowired + RecordHandler recordHandler; + + KafkaProducer testRecordProducer; + KafkaConsumer offsetConsumer; + EndlessConsumer endlessConsumer; + Map oldOffsets; + Map newOffsets; + Set> receivedRecords; + + + final RecordGenerator recordGenerator; + final Consumer> messageSender; + + public GenericApplicationTests(RecordGenerator recordGenerator) + { + this.recordGenerator = recordGenerator; + this.messageSender = (record) -> sendMessage(record); + } + + + /** Tests methods */ + + @Test + void commitsCurrentOffsetsOnSuccess() + { + int numberOfGeneratedMessages = + recordGenerator.generate(false, false, messageSender); + + await(numberOfGeneratedMessages + " records received") + .atMost(Duration.ofSeconds(30)) + .pollInterval(Duration.ofSeconds(1)) + .until(() -> receivedRecords.size() >= numberOfGeneratedMessages); + + await("Offsets committed") + .atMost(Duration.ofSeconds(10)) + .pollInterval(Duration.ofSeconds(1)) + .untilAsserted(() -> + { + checkSeenOffsetsForProgress(); + compareToCommitedOffsets(newOffsets); + }); + + assertThatExceptionOfType(IllegalStateException.class) + .isThrownBy(() -> endlessConsumer.exitStatus()) + .describedAs("Consumer should still be running"); + + recordGenerator.assertBusinessLogic(); + } + + @Test + @SkipWhenErrorCannotBeGenerated(poisonPill = true) + void commitsOffsetOfErrorForReprocessingOnDeserializationError() + { + int numberOfGeneratedMessages = + recordGenerator.generate(true, false, messageSender); + + await("Consumer failed") + .atMost(Duration.ofSeconds(30)) + .pollInterval(Duration.ofSeconds(1)) + .until(() -> !endlessConsumer.running()); + + checkSeenOffsetsForProgress(); + compareToCommitedOffsets(newOffsets); + + endlessConsumer.start(); + await("Consumer failed") + .atMost(Duration.ofSeconds(30)) + .pollInterval(Duration.ofSeconds(1)) + .until(() -> !endlessConsumer.running()); + + checkSeenOffsetsForProgress(); + compareToCommitedOffsets(newOffsets); + assertThat(receivedRecords.size()) + .describedAs("Received not all sent events") + .isLessThan(numberOfGeneratedMessages); + + assertThatNoException() + .describedAs("Consumer should not be running") + .isThrownBy(() -> endlessConsumer.exitStatus()); + assertThat(endlessConsumer.exitStatus()) + .describedAs("Consumer should have exited abnormally") + .containsInstanceOf(RecordDeserializationException.class); + + recordGenerator.assertBusinessLogic(); + } + + @Test + @SkipWhenErrorCannotBeGenerated(logicError = true) + void doesNotCommitOffsetsOnLogicError() + { + int numberOfGeneratedMessages = + recordGenerator.generate(false, true, messageSender); + + await("Consumer failed") + .atMost(Duration.ofSeconds(30)) + .pollInterval(Duration.ofSeconds(1)) + .until(() -> !endlessConsumer.running()); + + checkSeenOffsetsForProgress(); + compareToCommitedOffsets(oldOffsets); + + endlessConsumer.start(); + await("Consumer failed") + .atMost(Duration.ofSeconds(30)) + .pollInterval(Duration.ofSeconds(1)) + .until(() -> !endlessConsumer.running()); + + checkSeenOffsetsForProgress(); + compareToCommitedOffsets(oldOffsets); + assertThat(receivedRecords.size()) + .describedAs("Received not all sent events") + .isLessThan(numberOfGeneratedMessages); + + assertThatNoException() + .describedAs("Consumer should not be running") + .isThrownBy(() -> endlessConsumer.exitStatus()); + assertThat(endlessConsumer.exitStatus()) + .describedAs("Consumer should have exited abnormally") + .containsInstanceOf(RuntimeException.class); + + recordGenerator.assertBusinessLogic(); + } + + + /** Helper methods for the verification of expectations */ + + void compareToCommitedOffsets(Map offsetsToCheck) + { + doForCurrentOffsets((tp, offset) -> + { + Long expected = offsetsToCheck.get(tp) + 1; + log.debug("Checking, if the offset for {} is {}", tp, expected); + assertThat(offset) + .describedAs("Committed offset corresponds to the offset of the consumer") + .isEqualTo(expected); + }); + } + + void checkSeenOffsetsForProgress() + { + // Be sure, that some messages were consumed...! + Set withProgress = new HashSet<>(); + partitions().forEach(tp -> + { + Long oldOffset = oldOffsets.get(tp) + 1; + Long newOffset = newOffsets.get(tp) + 1; + if (!oldOffset.equals(newOffset)) + { + log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset); + withProgress.add(tp); + } + }); + assertThat(withProgress) + .describedAs("Some offsets must have changed, compared to the old offset-positions") + .isNotEmpty(); + } + + + /** Helper methods for setting up and running the tests */ + + void seekToEnd() + { + offsetConsumer.assign(partitions()); + partitions().forEach(tp -> + { + Long offset = offsetConsumer.position(tp); + log.info("New position for {}: {}", tp, offset); + Integer partition = tp.partition(); + StateDocument document = + stateRepository + .findById(partition.toString()) + .orElse(new StateDocument(partition)); + document.offset = offset; + stateRepository.save(document); + }); + offsetConsumer.unsubscribe(); + } + + void doForCurrentOffsets(BiConsumer consumer) + { + partitions().forEach(tp -> + { + String partition = Integer.toString(tp.partition()); + Optional offset = stateRepository.findById(partition).map(document -> document.offset); + consumer.accept(tp, offset.orElse(0l)); + }); + } + + List partitions() + { + return + IntStream + .range(0, PARTITIONS) + .mapToObj(partition -> new TopicPartition(TOPIC, partition)) + .collect(Collectors.toList()); + } + + + public interface RecordGenerator + { + int generate( + boolean poisonPills, + boolean logicErrors, + Consumer> messageSender); + + default boolean canGeneratePoisonPill() + { + return true; + } + + default boolean canGenerateLogicError() + { + return true; + } + + default void assertBusinessLogic() + { + log.debug("No business-logic to assert"); + } + } + + void sendMessage(ProducerRecord record) + { + testRecordProducer.send(record, (metadata, e) -> + { + if (metadata != null) + { + log.debug( + "{}|{} - {}={}", + metadata.partition(), + metadata.offset(), + record.key(), + record.value()); + } + else + { + log.warn( + "Exception for {}={}: {}", + record.key(), + record.value(), + e.toString()); + } + }); + } + + + @BeforeEach + public void init() + { + Properties props; + props = new Properties(); + props.put("bootstrap.servers", properties.getBootstrapServer()); + props.put("linger.ms", 100); + props.put("key.serializer", BytesSerializer.class.getName()); + props.put("value.serializer", BytesSerializer.class.getName()); + testRecordProducer = new KafkaProducer<>(props); + + props = new Properties(); + props.put("bootstrap.servers", properties.getBootstrapServer()); + props.put("client.id", "OFFSET-CONSUMER"); + props.put("group.id", properties.getGroupId()); + props.put("key.deserializer", BytesDeserializer.class.getName()); + props.put("value.deserializer", BytesDeserializer.class.getName()); + offsetConsumer = new KafkaConsumer<>(props); + + seekToEnd(); + + oldOffsets = new HashMap<>(); + newOffsets = new HashMap<>(); + receivedRecords = new HashSet<>(); + + doForCurrentOffsets((tp, offset) -> + { + oldOffsets.put(tp, offset - 1); + newOffsets.put(tp, offset - 1); + }); + + TestRecordHandler captureOffsetAndExecuteTestHandler = + new TestRecordHandler(recordHandler) + { + @Override + public void onNewRecord(ConsumerRecord record) + { + newOffsets.put( + new TopicPartition(record.topic(), record.partition()), + record.offset()); + receivedRecords.add(record); + } + }; + + endlessConsumer = + new EndlessConsumer<>( + executor, + properties.getClientId(), + properties.getTopic(), + kafkaConsumer, + rebalanceListener, + captureOffsetAndExecuteTestHandler); + + endlessConsumer.start(); + } + + @AfterEach + public void deinit() + { + try + { + endlessConsumer.stop(); + testRecordProducer.close(); + offsetConsumer.close(); + } + catch (Exception e) + { + log.info("Exception while stopping the consumer: {}", e.toString()); + } + } + + + @TestConfiguration + @Import(ApplicationConfiguration.class) + public static class Configuration + { + } + }