import org.apache.kafka.common.utils.Bytes;
import org.junit.jupiter.api.*;
import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.boot.autoconfigure.kafka.KafkaAutoConfiguration;
import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
import org.springframework.boot.test.context.TestConfiguration;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Import;
+import org.springframework.context.annotation.Primary;
import org.springframework.kafka.test.context.EmbeddedKafka;
import org.springframework.test.context.TestPropertySource;
import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
import java.time.Duration;
import java.util.*;
import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
import java.util.function.Function;
import static org.awaitility.Awaitility.*;
-@SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class)
+@SpringJUnitConfig(
+ initializers = ConfigDataApplicationContextInitializer.class,
+ classes = {
+ EndlessConsumer.class,
+ KafkaAutoConfiguration.class,
+ ApplicationTests.Configuration.class })
@TestMethodOrder(MethodOrderer.OrderAnnotation.class)
@TestPropertySource(
properties = {
@Autowired
KafkaProducer<String, Bytes> kafkaProducer;
@Autowired
- KafkaConsumer<String, Long> kafkaConsumer;
- @Autowired
KafkaConsumer<Bytes, Bytes> offsetConsumer;
@Autowired
ApplicationProperties properties;
@Autowired
- ExecutorService executor;
+ RecordHandler recordHandler;
- Consumer<ConsumerRecord<String, Long>> testHandler;
- EndlessConsumer<String, Long> endlessConsumer;
Map<TopicPartition, Long> oldOffsets;
Map<TopicPartition, Long> newOffsets;
+ /** Tests methods */
+
@Test
@Order(1) // << The poistion pill is not skipped. Hence, this test must run first
void commitsCurrentOffsetsOnSuccess() throws ExecutionException, InterruptedException
send100Messages(i -> new Bytes(longSerializer.serialize(TOPIC, i)));
Set<ConsumerRecord<String, Long>> received = new HashSet<>();
- testHandler = record -> received.add(record);
+ recordHandler.testHandler = record -> received.add(record);
await("100 records received")
.atMost(Duration.ofSeconds(30))
@Test
@Order(2)
- void commitsNoOffsetsOnError()
+ void commitsOffsetOfErrorForReprocessingOnError()
{
send100Messages(counter ->
counter == 77
await("Consumer failed")
.atMost(Duration.ofSeconds(30))
- .until(() -> !endlessConsumer.running());
+ .untilAsserted(() -> checkSeenOffsetsForProgress());
+
+ compareToCommitedOffsets(newOffsets);
+ }
- checkSeenOffsetsForProgress();
- compareToCommitedOffsets(oldOffsets);
+
+ /** Helper methods for the verification of expectations */
+
+ void compareToCommitedOffsets(Map<TopicPartition, Long> offsetsToCheck)
+ {
+ doForCurrentOffsets((tp, offset) ->
+ {
+ Long expected = offsetsToCheck.get(tp) + 1;
+ log.debug("Checking, if the offset for {} is {}", tp, expected);
+ assertThat(offset).isEqualTo(expected);
+ });
+ }
+
+ void checkSeenOffsetsForProgress()
+ {
+ // Be sure, that some messages were consumed...!
+ Set<TopicPartition> withProgress = new HashSet<>();
+ partitions().forEach(tp ->
+ {
+ Long oldOffset = oldOffsets.get(tp);
+ Long newOffset = newOffsets.get(tp);
+ if (!oldOffset.equals(newOffset))
+ {
+ log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset);
+ withProgress.add(tp);
+ }
+ });
+ assertThat(withProgress).isNotEmpty().describedAs("Found no partitions with any offset-progress");
+ }
+
+
+ /** Helper methods for setting up and running the tests */
+
+ void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
+ {
+ offsetConsumer.assign(partitions());
+ partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp)));
+ offsetConsumer.unsubscribe();
+ }
+
+ List<TopicPartition> partitions()
+ {
+ return
+ IntStream
+ .range(0, PARTITIONS)
+ .mapToObj(partition -> new TopicPartition(TOPIC, partition))
+ .collect(Collectors.toList());
}
}
}
+
@BeforeEach
public void init()
{
- testHandler = record -> {} ;
+ recordHandler.testHandler = (record) -> {};
oldOffsets = new HashMap<>();
newOffsets = new HashMap<>();
newOffsets.put(tp, offset - 1);
});
- Consumer<ConsumerRecord<String, Long>> captureOffsetAndExecuteTestHandler =
+ recordHandler.captureOffsets =
record ->
- {
newOffsets.put(
new TopicPartition(record.topic(), record.partition()),
record.offset());
- testHandler.accept(record);
- };
-
- endlessConsumer =
- new EndlessConsumer<>(
- executor,
- properties.getClientId(),
- properties.getTopic(),
- kafkaConsumer,
- captureOffsetAndExecuteTestHandler);
-
- endlessConsumer.start();
}
- List<TopicPartition> partitions()
- {
- return
- IntStream
- .range(0, PARTITIONS)
- .mapToObj(partition -> new TopicPartition(TOPIC, partition))
- .collect(Collectors.toList());
- }
- void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
+ public static class RecordHandler implements Consumer<ConsumerRecord<String, Long>>
{
- offsetConsumer.assign(partitions());
- partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp)));
- offsetConsumer.unsubscribe();
- }
-
- void checkSeenOffsetsForProgress()
- {
- // Be sure, that some messages were consumed...!
- Set<TopicPartition> withProgress = new HashSet<>();
- partitions().forEach(tp ->
- {
- Long oldOffset = oldOffsets.get(tp);
- Long newOffset = newOffsets.get(tp);
- if (!oldOffset.equals(newOffset))
- {
- log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset);
- withProgress.add(tp);
- }
- });
- assertThat(withProgress).isNotEmpty().describedAs("Found no partitions with any offset-progress");
- }
-
- void compareToCommitedOffsets(Map<TopicPartition, Long> offsetsToCheck)
- {
- doForCurrentOffsets((tp, offset) ->
- {
- Long expected = offsetsToCheck.get(tp) + 1;
- log.debug("Checking, if the offset for {} is {}", tp, expected);
- assertThat(offset).isEqualTo(expected);
- });
- }
+ Consumer<ConsumerRecord<String, Long>> captureOffsets;
+ Consumer<ConsumerRecord<String, Long>> testHandler;
- @AfterEach
- public void deinit()
- {
- try
+ @Override
+ public void accept(ConsumerRecord<String, Long> record)
{
- endlessConsumer.stop();
- }
- catch (Exception e)
- {
- log.info("Exception while stopping the consumer: {}", e.toString());
+ captureOffsets
+ .andThen(testHandler)
+ .accept(record);
}
}
@Import(ApplicationConfiguration.class)
public static class Configuration
{
+ @Primary
+ @Bean
+ public Consumer<ConsumerRecord<String, Long>> testHandler()
+ {
+ return new RecordHandler();
+ }
+
@Bean
KafkaProducer<String, Bytes> kafkaProducer(ApplicationProperties properties)
{