package de.juplo.kafka;
- import lombok.extern.slf4j.Slf4j;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
- import org.apache.kafka.clients.consumer.KafkaConsumer;
- import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
- import org.apache.kafka.common.TopicPartition;
- import org.apache.kafka.common.errors.RecordDeserializationException;
- import org.apache.kafka.common.serialization.*;
++import org.apache.kafka.common.serialization.LongSerializer;
+ import org.apache.kafka.common.serialization.StringSerializer;
import org.apache.kafka.common.utils.Bytes;
- import org.junit.jupiter.api.*;
- import org.springframework.beans.factory.annotation.Autowired;
- import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
- import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo;
- import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
+import org.springframework.boot.test.context.TestConfiguration;
+import org.springframework.context.annotation.Bean;
- import org.springframework.context.annotation.Import;
- import org.springframework.kafka.test.context.EmbeddedKafka;
- import org.springframework.test.context.TestPropertySource;
- import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
++import org.springframework.context.annotation.Primary;
++import org.springframework.test.context.ContextConfiguration;
- import java.time.Duration;
- import java.util.*;
- import java.util.concurrent.ExecutionException;
- import java.util.concurrent.ExecutorService;
- import java.util.function.BiConsumer;
- import java.util.stream.Collectors;
- import java.util.stream.IntStream;
+ import java.util.function.Consumer;
-import java.util.stream.IntStream;
- import static de.juplo.kafka.ApplicationTests.PARTITIONS;
- import static de.juplo.kafka.ApplicationTests.TOPIC;
- import static org.assertj.core.api.Assertions.*;
- import static org.awaitility.Awaitility.*;
-
- @SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class)
- @TestMethodOrder(MethodOrderer.OrderAnnotation.class)
- @TestPropertySource(
- properties = {
- "consumer.bootstrap-server=${spring.embedded.kafka.brokers}",
- "consumer.topic=" + TOPIC,
- "consumer.commit-interval=1s",
- "spring.mongodb.embedded.version=4.4.13" })
- @EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
- @EnableAutoConfiguration
- @AutoConfigureDataMongo
- @Slf4j
- class ApplicationTests
-public class ApplicationTests extends GenericApplicationTests<String, String>
++@ContextConfiguration(classes = ApplicationTests.Configuration.class)
++public class ApplicationTests extends GenericApplicationTests<String, Long>
{
- public static final String TOPIC = "FOO";
- public static final int PARTITIONS = 10;
-
-
- StringSerializer stringSerializer = new StringSerializer();
-
- @Autowired
- Serializer valueSerializer;
- @Autowired
- KafkaProducer<String, Bytes> kafkaProducer;
- @Autowired
- KafkaConsumer<String, Long> kafkaConsumer;
- @Autowired
- KafkaConsumer<Bytes, Bytes> offsetConsumer;
- @Autowired
- ApplicationProperties properties;
- @Autowired
- ExecutorService executor;
- @Autowired
- StateRepository stateRepository;
- @Autowired
- ApplicationRebalanceListener rebalanceListener;
- @Autowired
- ApplicationRecordHandler recordHandler;
-
- EndlessConsumer<String, Long> endlessConsumer;
- Map<TopicPartition, Long> oldOffsets;
- Map<TopicPartition, Long> newOffsets;
- Set<ConsumerRecord<String, Long>> receivedRecords;
-
-
- /** Tests methods */
-
- @Test
- void commitsCurrentOffsetsOnSuccess() throws ExecutionException, InterruptedException
- {
- send100Messages((partition, key, counter) ->
- {
- Bytes value = new Bytes(valueSerializer.serialize(TOPIC, counter));
- return new ProducerRecord<>(TOPIC, partition, key, value);
- });
-
- await("100 records received")
- .atMost(Duration.ofSeconds(30))
- .pollInterval(Duration.ofSeconds(1))
- .until(() -> receivedRecords.size() >= 100);
-
- await("Offsets committed")
- .atMost(Duration.ofSeconds(10))
- .pollInterval(Duration.ofSeconds(1))
- .untilAsserted(() ->
- {
- checkSeenOffsetsForProgress();
- compareToCommitedOffsets(newOffsets);
- });
-
- assertThatExceptionOfType(IllegalStateException.class)
- .isThrownBy(() -> endlessConsumer.exitStatus())
- .describedAs("Consumer should still be running");
- }
-
- @Test
- void commitsOffsetOfErrorForReprocessingOnDeserializationError()
- {
- send100Messages((partition, key, counter) ->
- {
- Bytes value = counter == 77
- ? new Bytes(stringSerializer.serialize(TOPIC, "BOOM!"))
- : new Bytes(valueSerializer.serialize(TOPIC, counter));
- return new ProducerRecord<>(TOPIC, partition, key, value);
- });
-
- await("Consumer failed")
- .atMost(Duration.ofSeconds(30))
- .pollInterval(Duration.ofSeconds(1))
- .until(() -> !endlessConsumer.running());
-
- checkSeenOffsetsForProgress();
- compareToCommitedOffsets(newOffsets);
-
- endlessConsumer.start();
- await("Consumer failed")
- .atMost(Duration.ofSeconds(30))
- .pollInterval(Duration.ofSeconds(1))
- .until(() -> !endlessConsumer.running());
-
- checkSeenOffsetsForProgress();
- compareToCommitedOffsets(newOffsets);
- assertThat(receivedRecords.size())
- .describedAs("Received not all sent events")
- .isLessThan(100);
-
- assertThatNoException()
- .describedAs("Consumer should not be running")
- .isThrownBy(() -> endlessConsumer.exitStatus());
- assertThat(endlessConsumer.exitStatus())
- .describedAs("Consumer should have exited abnormally")
- .containsInstanceOf(RecordDeserializationException.class);
- }
-
-
- /** Helper methods for the verification of expectations */
-
- void compareToCommitedOffsets(Map<TopicPartition, Long> offsetsToCheck)
- {
- doForCurrentOffsets((tp, offset) ->
- {
- Long expected = offsetsToCheck.get(tp) + 1;
- log.debug("Checking, if the offset for {} is {}", tp, expected);
- assertThat(offset)
- .describedAs("Committed offset corresponds to the offset of the consumer")
- .isEqualTo(expected);
- });
- }
-
- void checkSeenOffsetsForProgress()
- {
- // Be sure, that some messages were consumed...!
- Set<TopicPartition> withProgress = new HashSet<>();
- partitions().forEach(tp ->
- {
- Long oldOffset = oldOffsets.get(tp) + 1;
- Long newOffset = newOffsets.get(tp) + 1;
- if (!oldOffset.equals(newOffset))
- {
- log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset);
- withProgress.add(tp);
- }
- });
- assertThat(withProgress)
- .describedAs("Some offsets must have changed, compared to the old offset-positions")
- .isNotEmpty();
- }
-
-
- /** Helper methods for setting up and running the tests */
-
- void seekToEnd()
- {
- offsetConsumer.assign(partitions());
- partitions().forEach(tp ->
- {
- Long offset = offsetConsumer.position(tp);
- log.info("New position for {}: {}", tp, offset);
- Integer partition = tp.partition();
- StateDocument document =
- stateRepository
- .findById(partition.toString())
- .orElse(new StateDocument(partition));
- document.offset = offset;
- stateRepository.save(document);
- });
- offsetConsumer.unsubscribe();
- }
-
- void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
- {
- partitions().forEach(tp ->
- {
- String partition = Integer.toString(tp.partition());
- Optional<Long> offset = stateRepository.findById(partition).map(document -> document.offset);
- consumer.accept(tp, offset.orElse(0l));
- });
- }
-
- List<TopicPartition> partitions()
- {
- return
- IntStream
- .range(0, PARTITIONS)
- .mapToObj(partition -> new TopicPartition(TOPIC, partition))
- .collect(Collectors.toList());
- }
-
-
- public interface RecordGenerator<K, V>
- {
- public ProducerRecord<String, Bytes> generate(int partition, String key, long counter);
- }
-
- void send100Messages(RecordGenerator recordGenerator)
- {
- long i = 0;
-
- for (int partition = 0; partition < 10; partition++)
- {
- for (int key = 0; key < 10; key++)
- {
- ProducerRecord<String, Bytes> record =
- recordGenerator.generate(partition, Integer.toString(partition*10+key%2), ++i);
-
- kafkaProducer.send(record, (metadata, e) ->
- {
- if (metadata != null)
- {
- log.debug(
- "{}|{} - {}={}",
- metadata.partition(),
- metadata.offset(),
- record.key(),
- record.value());
- }
- else
- {
- log.warn(
- "Exception for {}={}: {}",
- record.key(),
- record.value(),
- e.toString());
- }
- });
- }
- }
- }
-
-
- @BeforeEach
- public void init()
- {
- seekToEnd();
-
- oldOffsets = new HashMap<>();
- newOffsets = new HashMap<>();
- receivedRecords = new HashSet<>();
-
- doForCurrentOffsets((tp, offset) ->
- {
- oldOffsets.put(tp, offset - 1);
- newOffsets.put(tp, offset - 1);
- });
-
- TestRecordHandler<String, Long> captureOffsetAndExecuteTestHandler =
- new TestRecordHandler<String, Long>(recordHandler) {
- @Override
- public void onNewRecord(ConsumerRecord<String, Long> record)
- {
- newOffsets.put(
- new TopicPartition(record.topic(), record.partition()),
- record.offset());
- receivedRecords.add(record);
- }
- };
-
- endlessConsumer =
- new EndlessConsumer<>(
- executor,
- properties.getClientId(),
- properties.getTopic(),
- kafkaConsumer,
- rebalanceListener,
- captureOffsetAndExecuteTestHandler);
-
- endlessConsumer.start();
- }
-
- @AfterEach
- public void deinit()
- {
- try
- {
- endlessConsumer.stop();
- }
- catch (Exception e)
- {
- log.info("Exception while stopping the consumer: {}", e.toString());
- }
- }
-
-
- @TestConfiguration
- @Import(ApplicationConfiguration.class)
- public static class Configuration
- {
- @Bean
- Serializer<Long> serializer()
- {
- return new LongSerializer();
- }
-
- @Bean
- KafkaProducer<String, Bytes> kafkaProducer(ApplicationProperties properties)
- {
- Properties props = new Properties();
- props.put("bootstrap.servers", properties.getBootstrapServer());
- props.put("linger.ms", 100);
- props.put("key.serializer", StringSerializer.class.getName());
- props.put("value.serializer", BytesSerializer.class.getName());
-
- return new KafkaProducer<>(props);
- }
-
- @Bean
- KafkaConsumer<Bytes, Bytes> offsetConsumer(ApplicationProperties properties)
- {
- Properties props = new Properties();
- props.put("bootstrap.servers", properties.getBootstrapServer());
- props.put("client.id", "OFFSET-CONSUMER");
- props.put("enable.auto.commit", false);
- props.put("auto.offset.reset", "latest");
- props.put("key.deserializer", BytesDeserializer.class.getName());
- props.put("value.deserializer", BytesDeserializer.class.getName());
-
- return new KafkaConsumer<>(props);
- }
- }
+ public ApplicationTests()
+ {
+ super(
+ new RecordGenerator()
+ {
- final int[] numbers = { 1, 7, 3, 2, 33, 6, 11 };
- final String[] dieWilden13 =
- IntStream
- .range(1,14)
- .mapToObj(i -> "seeräuber-" + i)
- .toArray(i -> new String[i]);
+ final StringSerializer stringSerializer = new StringSerializer();
- final Bytes startMessage = new Bytes(stringSerializer.serialize(TOPIC, "START"));
- final Bytes endMessage = new Bytes(stringSerializer.serialize(TOPIC, "END"));
-
- int counter = 0;
++ final LongSerializer longSerializer = new LongSerializer();
+
+
+ @Override
+ public int generate(
+ boolean poisonPills,
+ boolean logicErrors,
+ Consumer<ProducerRecord<Bytes, Bytes>> messageSender)
+ {
- counter = 0;
++ int i = 0;
+
- for (int i = 0; i < 33; i++)
++ for (int partition = 0; partition < 10; partition++)
+ {
- String seeräuber = dieWilden13[i%13];
- int number = numbers[i%7];
-
- Bytes key = new Bytes(stringSerializer.serialize(TOPIC, seeräuber));
-
- send(key, startMessage, logicErrors, messageSender);
- for (int message = 1; message <= number; message++)
++ for (int key = 0; key < 10; key++)
+ {
- Bytes value = new Bytes(stringSerializer.serialize(TOPIC, Integer.toString(message)));
- send(key, value, logicErrors, messageSender);
- }
- send(key, endMessage, logicErrors, messageSender);
- }
++ i++;
+
- return counter;
- }
++ Bytes value = new Bytes(longSerializer.serialize(TOPIC, (long)i));
++ if (i == 77)
++ {
++ if (logicErrors)
++ {
++ value = new Bytes(longSerializer.serialize(TOPIC, Long.MIN_VALUE));
++ }
++ if (poisonPills)
++ {
++ value = new Bytes(stringSerializer.serialize(TOPIC, "BOOM (Poison-Pill)!"));
++ }
++ }
+
- void send(
- Bytes key,
- Bytes value,
- boolean logicErrors,
- Consumer<ProducerRecord<Bytes, Bytes>> messageSender)
- {
- counter++;
++ ProducerRecord<Bytes, Bytes> record =
++ new ProducerRecord<>(
++ TOPIC,
++ partition,
++ new Bytes(stringSerializer.serialize(TOPIC,Integer.toString(partition*10+key%2))),
++ value);
+
- if (counter == 77)
- {
- if (logicErrors)
- {
- value = value.equals(startMessage) ? endMessage : startMessage;
++ messageSender.accept(record);
+ }
+ }
+
- messageSender.accept(new ProducerRecord<>(TOPIC, key, value));
- }
-
- @Override
- public boolean canGeneratePoisonPill()
- {
- return false;
++ return i;
+ }
+ });
+ }
++
++
++ @TestConfiguration
++ public static class Configuration
++ {
++ @Primary
++ @Bean
++ public ApplicationRecordHandler recordHandler()
++ {
++ ApplicationRecordHandler recordHandler = new ApplicationRecordHandler();
++ return new ApplicationRecordHandler()
++ {
++ @Override
++ public void accept(ConsumerRecord<String, Long> record)
++ {
++ if (record.value() == Long.MIN_VALUE)
++ throw new RuntimeException("BOOM (Logic-Error)!");
++ super.accept(record);
++ }
++ };
++ }
++ }
}
--- /dev/null
- "sumup.adder.bootstrap-server=${spring.embedded.kafka.brokers}",
- "sumup.adder.topic=" + TOPIC,
- "sumup.adder.commit-interval=1s",
+ package de.juplo.kafka;
+
+ import lombok.extern.slf4j.Slf4j;
+ import org.apache.kafka.clients.consumer.ConsumerRecord;
+ import org.apache.kafka.clients.consumer.KafkaConsumer;
+ import org.apache.kafka.clients.producer.KafkaProducer;
+ import org.apache.kafka.clients.producer.ProducerRecord;
+ import org.apache.kafka.common.TopicPartition;
+ import org.apache.kafka.common.errors.RecordDeserializationException;
+ import org.apache.kafka.common.serialization.*;
+ import org.apache.kafka.common.utils.Bytes;
+ import org.junit.jupiter.api.*;
+ import org.springframework.beans.factory.annotation.Autowired;
+ import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
+ import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo;
+ import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
+ import org.springframework.boot.test.context.TestConfiguration;
+ import org.springframework.context.annotation.Import;
+ import org.springframework.kafka.test.context.EmbeddedKafka;
+ import org.springframework.test.context.TestPropertySource;
+ import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
+
+ import java.time.Duration;
+ import java.util.*;
+ import java.util.concurrent.ExecutorService;
+ import java.util.function.BiConsumer;
+ import java.util.function.Consumer;
+ import java.util.stream.Collectors;
+ import java.util.stream.IntStream;
+
+ import static de.juplo.kafka.GenericApplicationTests.PARTITIONS;
+ import static de.juplo.kafka.GenericApplicationTests.TOPIC;
+ import static org.assertj.core.api.Assertions.*;
+ import static org.awaitility.Awaitility.*;
+
+
+ @SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class)
+ @TestPropertySource(
+ properties = {
++ "consumer.bootstrap-server=${spring.embedded.kafka.brokers}",
++ "consumer.topic=" + TOPIC,
++ "consumer.commit-interval=1s",
+ "spring.mongodb.embedded.version=4.4.13" })
+ @EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
+ @EnableAutoConfiguration
+ @AutoConfigureDataMongo
+ @Slf4j
+ abstract class GenericApplicationTests<K, V>
+ {
+ public static final String TOPIC = "FOO";
+ public static final int PARTITIONS = 10;
+
+
+ @Autowired
+ KafkaConsumer<K, V> kafkaConsumer;
+ @Autowired
+ Consumer<ConsumerRecord<K, V>> consumer;
+ @Autowired
+ ApplicationProperties properties;
+ @Autowired
+ ExecutorService executor;
+ @Autowired
+ StateRepository stateRepository;
+ @Autowired
+ PollIntervalAwareConsumerRebalanceListener rebalanceListener;
+ @Autowired
+ RecordHandler<K, V> recordHandler;
+
+ KafkaProducer<Bytes, Bytes> testRecordProducer;
+ KafkaConsumer<Bytes, Bytes> offsetConsumer;
+ EndlessConsumer<K, V> endlessConsumer;
+ Map<TopicPartition, Long> oldOffsets;
+ Map<TopicPartition, Long> newOffsets;
+ Set<ConsumerRecord<K, V>> receivedRecords;
+
+
+ final RecordGenerator recordGenerator;
+ final Consumer<ProducerRecord<Bytes, Bytes>> messageSender;
+
+ public GenericApplicationTests(RecordGenerator recordGenerator)
+ {
+ this.recordGenerator = recordGenerator;
+ this.messageSender = (record) -> sendMessage(record);
+ }
+
+
+ /** Tests methods */
+
+ @Test
+ void commitsCurrentOffsetsOnSuccess()
+ {
+ int numberOfGeneratedMessages =
+ recordGenerator.generate(false, false, messageSender);
+
+ await(numberOfGeneratedMessages + " records received")
+ .atMost(Duration.ofSeconds(30))
+ .pollInterval(Duration.ofSeconds(1))
+ .until(() -> receivedRecords.size() >= numberOfGeneratedMessages);
+
+ await("Offsets committed")
+ .atMost(Duration.ofSeconds(10))
+ .pollInterval(Duration.ofSeconds(1))
+ .untilAsserted(() ->
+ {
+ checkSeenOffsetsForProgress();
+ compareToCommitedOffsets(newOffsets);
+ });
+
+ assertThatExceptionOfType(IllegalStateException.class)
+ .isThrownBy(() -> endlessConsumer.exitStatus())
+ .describedAs("Consumer should still be running");
+
+ recordGenerator.assertBusinessLogic();
+ }
+
+ @Test
+ @SkipWhenErrorCannotBeGenerated(poisonPill = true)
+ void commitsOffsetOfErrorForReprocessingOnDeserializationError()
+ {
+ int numberOfGeneratedMessages =
+ recordGenerator.generate(true, false, messageSender);
+
+ await("Consumer failed")
+ .atMost(Duration.ofSeconds(30))
+ .pollInterval(Duration.ofSeconds(1))
+ .until(() -> !endlessConsumer.running());
+
+ checkSeenOffsetsForProgress();
+ compareToCommitedOffsets(newOffsets);
+
+ endlessConsumer.start();
+ await("Consumer failed")
+ .atMost(Duration.ofSeconds(30))
+ .pollInterval(Duration.ofSeconds(1))
+ .until(() -> !endlessConsumer.running());
+
+ checkSeenOffsetsForProgress();
+ compareToCommitedOffsets(newOffsets);
+ assertThat(receivedRecords.size())
+ .describedAs("Received not all sent events")
+ .isLessThan(numberOfGeneratedMessages);
+
+ assertThatNoException()
+ .describedAs("Consumer should not be running")
+ .isThrownBy(() -> endlessConsumer.exitStatus());
+ assertThat(endlessConsumer.exitStatus())
+ .describedAs("Consumer should have exited abnormally")
+ .containsInstanceOf(RecordDeserializationException.class);
+
+ recordGenerator.assertBusinessLogic();
+ }
+
+ @Test
+ @SkipWhenErrorCannotBeGenerated(logicError = true)
+ void doesNotCommitOffsetsOnLogicError()
+ {
+ int numberOfGeneratedMessages =
+ recordGenerator.generate(false, true, messageSender);
+
+ await("Consumer failed")
+ .atMost(Duration.ofSeconds(30))
+ .pollInterval(Duration.ofSeconds(1))
+ .until(() -> !endlessConsumer.running());
+
+ checkSeenOffsetsForProgress();
+ compareToCommitedOffsets(oldOffsets);
+
+ endlessConsumer.start();
+ await("Consumer failed")
+ .atMost(Duration.ofSeconds(30))
+ .pollInterval(Duration.ofSeconds(1))
+ .until(() -> !endlessConsumer.running());
+
+ checkSeenOffsetsForProgress();
+ compareToCommitedOffsets(oldOffsets);
+ assertThat(receivedRecords.size())
+ .describedAs("Received not all sent events")
+ .isLessThan(numberOfGeneratedMessages);
+
+ assertThatNoException()
+ .describedAs("Consumer should not be running")
+ .isThrownBy(() -> endlessConsumer.exitStatus());
+ assertThat(endlessConsumer.exitStatus())
+ .describedAs("Consumer should have exited abnormally")
+ .containsInstanceOf(RuntimeException.class);
+
+ recordGenerator.assertBusinessLogic();
+ }
+
+
+ /** Helper methods for the verification of expectations */
+
+ void compareToCommitedOffsets(Map<TopicPartition, Long> offsetsToCheck)
+ {
+ doForCurrentOffsets((tp, offset) ->
+ {
+ Long expected = offsetsToCheck.get(tp) + 1;
+ log.debug("Checking, if the offset for {} is {}", tp, expected);
+ assertThat(offset)
+ .describedAs("Committed offset corresponds to the offset of the consumer")
+ .isEqualTo(expected);
+ });
+ }
+
+ void checkSeenOffsetsForProgress()
+ {
+ // Be sure, that some messages were consumed...!
+ Set<TopicPartition> withProgress = new HashSet<>();
+ partitions().forEach(tp ->
+ {
+ Long oldOffset = oldOffsets.get(tp) + 1;
+ Long newOffset = newOffsets.get(tp) + 1;
+ if (!oldOffset.equals(newOffset))
+ {
+ log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset);
+ withProgress.add(tp);
+ }
+ });
+ assertThat(withProgress)
+ .describedAs("Some offsets must have changed, compared to the old offset-positions")
+ .isNotEmpty();
+ }
+
+
+ /** Helper methods for setting up and running the tests */
+
+ void seekToEnd()
+ {
+ offsetConsumer.assign(partitions());
+ partitions().forEach(tp ->
+ {
+ Long offset = offsetConsumer.position(tp);
+ log.info("New position for {}: {}", tp, offset);
+ Integer partition = tp.partition();
+ StateDocument document =
+ stateRepository
+ .findById(partition.toString())
+ .orElse(new StateDocument(partition));
+ document.offset = offset;
+ stateRepository.save(document);
+ });
+ offsetConsumer.unsubscribe();
+ }
+
+ void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
+ {
+ partitions().forEach(tp ->
+ {
+ String partition = Integer.toString(tp.partition());
+ Optional<Long> offset = stateRepository.findById(partition).map(document -> document.offset);
+ consumer.accept(tp, offset.orElse(0l));
+ });
+ }
+
+ List<TopicPartition> partitions()
+ {
+ return
+ IntStream
+ .range(0, PARTITIONS)
+ .mapToObj(partition -> new TopicPartition(TOPIC, partition))
+ .collect(Collectors.toList());
+ }
+
+
+ public interface RecordGenerator
+ {
+ int generate(
+ boolean poisonPills,
+ boolean logicErrors,
+ Consumer<ProducerRecord<Bytes, Bytes>> messageSender);
+
+ default boolean canGeneratePoisonPill()
+ {
+ return true;
+ }
+
+ default boolean canGenerateLogicError()
+ {
+ return true;
+ }
+
+ default void assertBusinessLogic()
+ {
+ log.debug("No business-logic to assert");
+ }
+ }
+
+ void sendMessage(ProducerRecord<Bytes, Bytes> record)
+ {
+ testRecordProducer.send(record, (metadata, e) ->
+ {
+ if (metadata != null)
+ {
+ log.debug(
+ "{}|{} - {}={}",
+ metadata.partition(),
+ metadata.offset(),
+ record.key(),
+ record.value());
+ }
+ else
+ {
+ log.warn(
+ "Exception for {}={}: {}",
+ record.key(),
+ record.value(),
+ e.toString());
+ }
+ });
+ }
+
+
+ @BeforeEach
+ public void init()
+ {
+ Properties props;
+ props = new Properties();
+ props.put("bootstrap.servers", properties.getBootstrapServer());
+ props.put("linger.ms", 100);
+ props.put("key.serializer", BytesSerializer.class.getName());
+ props.put("value.serializer", BytesSerializer.class.getName());
+ testRecordProducer = new KafkaProducer<>(props);
+
+ props = new Properties();
+ props.put("bootstrap.servers", properties.getBootstrapServer());
+ props.put("client.id", "OFFSET-CONSUMER");
+ props.put("group.id", properties.getGroupId());
+ props.put("key.deserializer", BytesDeserializer.class.getName());
+ props.put("value.deserializer", BytesDeserializer.class.getName());
+ offsetConsumer = new KafkaConsumer<>(props);
+
+ seekToEnd();
+
+ oldOffsets = new HashMap<>();
+ newOffsets = new HashMap<>();
+ receivedRecords = new HashSet<>();
+
+ doForCurrentOffsets((tp, offset) ->
+ {
+ oldOffsets.put(tp, offset - 1);
+ newOffsets.put(tp, offset - 1);
+ });
+
+ TestRecordHandler<K, V> captureOffsetAndExecuteTestHandler =
+ new TestRecordHandler<K, V>(recordHandler)
+ {
+ @Override
+ public void onNewRecord(ConsumerRecord<K, V> record)
+ {
+ newOffsets.put(
+ new TopicPartition(record.topic(), record.partition()),
+ record.offset());
+ receivedRecords.add(record);
+ }
+ };
+
+ endlessConsumer =
+ new EndlessConsumer<>(
+ executor,
+ properties.getClientId(),
+ properties.getTopic(),
+ kafkaConsumer,
+ rebalanceListener,
+ captureOffsetAndExecuteTestHandler);
+
+ endlessConsumer.start();
+ }
+
+ @AfterEach
+ public void deinit()
+ {
+ try
+ {
+ endlessConsumer.stop();
+ testRecordProducer.close();
+ offsetConsumer.close();
+ }
+ catch (Exception e)
+ {
+ log.info("Exception while stopping the consumer: {}", e.toString());
+ }
+ }
+
+
+ @TestConfiguration
+ @Import(ApplicationConfiguration.class)
+ public static class Configuration
+ {
+ }
+ }