+++ /dev/null
-package de.juplo.kafka;
-
-import lombok.extern.slf4j.Slf4j;
-import org.apache.kafka.clients.consumer.ConsumerRecord;
-import org.apache.kafka.clients.consumer.KafkaConsumer;
-import org.apache.kafka.clients.producer.KafkaProducer;
-import org.apache.kafka.clients.producer.ProducerRecord;
-import org.apache.kafka.common.TopicPartition;
-import org.apache.kafka.common.errors.RecordDeserializationException;
-import org.apache.kafka.common.serialization.*;
-import org.apache.kafka.common.utils.Bytes;
-import org.junit.jupiter.api.*;
-import org.springframework.beans.factory.annotation.Autowired;
-import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
-import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
-import org.springframework.boot.test.context.TestConfiguration;
-import org.springframework.context.annotation.Bean;
-import org.springframework.context.annotation.Import;
-import org.springframework.kafka.test.context.EmbeddedKafka;
-import org.springframework.test.context.TestPropertySource;
-import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
-
-import java.time.Duration;
-import java.util.*;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.function.BiConsumer;
-import java.util.stream.Collectors;
-import java.util.stream.IntStream;
-
-import static de.juplo.kafka.ApplicationTests.*;
-import static org.assertj.core.api.Assertions.*;
-import static org.awaitility.Awaitility.*;
-
-
-@SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class)
-@TestMethodOrder(MethodOrderer.OrderAnnotation.class)
-@TestPropertySource(
- properties = {
- "sumup.requests.bootstrap-server=${spring.embedded.kafka.brokers}",
- "sumup.requests.topic-in=" + INPUT_TOPIC,
- "sumup.requests.commit-interval=1s" })
-@EmbeddedKafka(topics = { INPUT_TOPIC, OUTPUT_TOPIC }, partitions = PARTITIONS)
-@EnableAutoConfiguration
-@Slf4j
-class ApplicationTests
-{
- public static final String INPUT_TOPIC = "FOO";
- public static final String OUTPUT_TOPIC = "BAR";
- public static final int PARTITIONS = 10;
-
-
- StringSerializer stringSerializer = new StringSerializer();
-
- @Autowired
- Serializer valueSerializer;
- @Autowired
- KafkaProducer<String, Bytes> testProducer;
- @Autowired
- KafkaConsumer<String, Integer> kafkaConsumer;
- @Autowired
- KafkaConsumer<Bytes, Bytes> offsetConsumer;
- @Autowired
- ApplicationProperties properties;
- @Autowired
- ExecutorService executor;
- @Autowired
- RecordHandler noopRecordHandler;
-
- EndlessConsumer<String, Integer> endlessConsumer;
- Map<TopicPartition, Long> oldOffsets;
- Map<TopicPartition, Long> newOffsets;
- Set<ConsumerRecord<String, Integer>> receivedRecords;
-
-
- /** Tests methods */
-
- @Test
- void commitsCurrentOffsetsOnSuccess() throws ExecutionException, InterruptedException
- {
- send100Messages((partition, key, counter) ->
- {
- Bytes value = new Bytes(valueSerializer.serialize(INPUT_TOPIC, counter));
- return new ProducerRecord<>(INPUT_TOPIC, partition, key, value);
- });
-
- await("100 records received")
- .atMost(Duration.ofSeconds(30))
- .pollInterval(Duration.ofSeconds(1))
- .until(() -> receivedRecords.size() >= 100);
-
- await("Offsets committed")
- .atMost(Duration.ofSeconds(10))
- .pollInterval(Duration.ofSeconds(1))
- .untilAsserted(() ->
- {
- checkSeenOffsetsForProgress();
- compareToCommitedOffsets(newOffsets);
- });
-
- assertThatExceptionOfType(IllegalStateException.class)
- .isThrownBy(() -> endlessConsumer.exitStatus())
- .describedAs("Consumer should still be running");
- }
-
- @Test
- void commitsOffsetOfErrorForReprocessingOnDeserializationError()
- {
- send100Messages((partition, key, counter) ->
- {
- Bytes value = counter == 77
- ? new Bytes(stringSerializer.serialize(INPUT_TOPIC, "BOOM!"))
- : new Bytes(valueSerializer.serialize(INPUT_TOPIC, counter));
- return new ProducerRecord<>(INPUT_TOPIC, partition, key, value);
- });
-
- await("Consumer failed")
- .atMost(Duration.ofSeconds(30))
- .pollInterval(Duration.ofSeconds(1))
- .until(() -> !endlessConsumer.running());
-
- checkSeenOffsetsForProgress();
- compareToCommitedOffsets(newOffsets);
-
- endlessConsumer.start();
- await("Consumer failed")
- .atMost(Duration.ofSeconds(30))
- .pollInterval(Duration.ofSeconds(1))
- .until(() -> !endlessConsumer.running());
-
- checkSeenOffsetsForProgress();
- compareToCommitedOffsets(newOffsets);
- assertThat(receivedRecords.size())
- .describedAs("Received not all sent events")
- .isLessThan(100);
-
- assertThatNoException()
- .describedAs("Consumer should not be running")
- .isThrownBy(() -> endlessConsumer.exitStatus());
- assertThat(endlessConsumer.exitStatus())
- .describedAs("Consumer should have exited abnormally")
- .containsInstanceOf(RecordDeserializationException.class);
- }
-
-
- /** Helper methods for the verification of expectations */
-
- void compareToCommitedOffsets(Map<TopicPartition, Long> offsetsToCheck)
- {
- doForCurrentOffsets((tp, offset) ->
- {
- Long expected = offsetsToCheck.get(tp) + 1;
- log.debug("Checking, if the offset for {} is {}", tp, expected);
- assertThat(offset)
- .describedAs("Committed offset corresponds to the offset of the consumer")
- .isEqualTo(expected);
- });
- }
-
- void checkSeenOffsetsForProgress()
- {
- // Be sure, that some messages were consumed...!
- Set<TopicPartition> withProgress = new HashSet<>();
- partitions().forEach(tp ->
- {
- Long oldOffset = oldOffsets.get(tp) + 1;
- Long newOffset = newOffsets.get(tp) + 1;
- if (!oldOffset.equals(newOffset))
- {
- log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset);
- withProgress.add(tp);
- }
- });
- assertThat(withProgress)
- .describedAs("Some offsets must have changed, compared to the old offset-positions")
- .isNotEmpty();
- }
-
-
- /** Helper methods for setting up and running the tests */
-
- void seekToEnd()
- {
- offsetConsumer.assign(partitions());
- offsetConsumer.seekToEnd(partitions());
- partitions().forEach(tp ->
- {
- // seekToEnd() works lazily: it only takes effect on poll()/position()
- Long offset = offsetConsumer.position(tp);
- log.info("New position for {}: {}", tp, offset);
- });
- // The new positions must be commited!
- offsetConsumer.commitSync();
- offsetConsumer.unsubscribe();
- }
-
- void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
- {
- offsetConsumer.assign(partitions());
- partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp)));
- offsetConsumer.unsubscribe();
- }
-
- List<TopicPartition> partitions()
- {
- return
- IntStream
- .range(0, PARTITIONS)
- .mapToObj(partition -> new TopicPartition(INPUT_TOPIC, partition))
- .collect(Collectors.toList());
- }
-
-
- public interface RecordGenerator<K, V>
- {
- public ProducerRecord<String, Bytes> generate(int partition, String key, int counter);
- }
-
- void send100Messages(RecordGenerator recordGenerator)
- {
- int i = 0;
-
- for (int partition = 0; partition < 10; partition++)
- {
- for (int key = 0; key < 10; key++)
- {
- ProducerRecord<String, Bytes> record =
- recordGenerator.generate(partition, Integer.toString(partition*10+key%2), ++i);
-
- testProducer.send(record, (metadata, e) ->
- {
- if (metadata != null)
- {
- log.debug(
- "{}|{} - {}={}",
- metadata.partition(),
- metadata.offset(),
- record.key(),
- record.value());
- }
- else
- {
- log.warn(
- "Exception for {}={}: {}",
- record.key(),
- record.value(),
- e.toString());
- }
- });
- }
- }
- }
-
-
- @BeforeEach
- public void init()
- {
- seekToEnd();
-
- oldOffsets = new HashMap<>();
- newOffsets = new HashMap<>();
- receivedRecords = new HashSet<>();
-
- doForCurrentOffsets((tp, offset) ->
- {
- oldOffsets.put(tp, offset - 1);
- newOffsets.put(tp, offset - 1);
- });
-
- TestRecordHandler<String, Integer> captureOffsetAndExecuteTestHandler =
- new TestRecordHandler<String, Integer>(noopRecordHandler) {
- @Override
- public void onNewRecord(ConsumerRecord<String, Integer> record)
- {
- newOffsets.put(
- new TopicPartition(record.topic(), record.partition()),
- record.offset());
- receivedRecords.add(record);
- }
- };
-
- endlessConsumer =
- new EndlessConsumer<>(
- executor,
- properties.getClientId(),
- properties.getTopicIn(),
- kafkaConsumer,
- captureOffsetAndExecuteTestHandler);
-
- endlessConsumer.start();
- }
-
- @AfterEach
- public void deinit()
- {
- try
- {
- endlessConsumer.stop();
- }
- catch (Exception e)
- {
- log.info("Exception while stopping the consumer: {}", e.toString());
- }
- }
-
-
- @TestConfiguration
- @Import(ApplicationConfiguration.class)
- public static class Configuration
- {
- @Bean
- Serializer<Integer> valueSerializer()
- {
- return new IntegerSerializer();
- }
-
- @Bean
- KafkaProducer<String, Bytes> testProducer(ApplicationProperties properties)
- {
- Properties props = new Properties();
- props.put("bootstrap.servers", properties.getBootstrapServer());
- props.put("linger.ms", 100);
- props.put("key.serializer", StringSerializer.class.getName());
- props.put("value.serializer", BytesSerializer.class.getName());
-
- return new KafkaProducer<>(props);
- }
-
- @Bean
- KafkaConsumer<Bytes, Bytes> offsetConsumer(ApplicationProperties properties)
- {
- Properties props = new Properties();
- props.put("bootstrap.servers", properties.getBootstrapServer());
- props.put("client.id", "OFFSET-CONSUMER");
- props.put("group.id", properties.getGroupId());
- props.put("key.deserializer", BytesDeserializer.class.getName());
- props.put("value.deserializer", BytesDeserializer.class.getName());
-
- return new KafkaConsumer<>(props);
- }
-
- @Bean
- KafkaProducer<String, String> kafkaProducer(ApplicationProperties properties)
- {
- Properties props = new Properties();
- props.put("bootstrap.servers", properties.getBootstrapServer());
- props.put("key.serializer", StringSerializer.class.getName());
- props.put("value.serializer", StringSerializer.class.getName());
-
- return new KafkaProducer<>(props);
- }
- }
-}