import com.mongodb.client.MongoClient;
import lombok.extern.slf4j.Slf4j;
-import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.TopicPartition;
-import org.apache.kafka.common.errors.RecordDeserializationException;
import org.apache.kafka.common.serialization.*;
import org.apache.kafka.common.utils.Bytes;
import org.junit.jupiter.api.*;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
+import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
import org.springframework.boot.autoconfigure.mongo.MongoProperties;
import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo;
import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
import org.springframework.boot.test.context.TestConfiguration;
+import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Import;
+import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
+import org.springframework.kafka.config.KafkaListenerEndpointRegistry;
+import org.springframework.kafka.core.ConsumerFactory;
+import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.test.context.EmbeddedKafka;
import org.springframework.test.context.TestPropertySource;
import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
import java.time.Duration;
import java.util.*;
-import java.util.concurrent.ExecutorService;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
import java.util.stream.Collectors;
@SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class)
@TestPropertySource(
properties = {
- "sumup.adder.bootstrap-server=${spring.embedded.kafka.brokers}",
+ "spring.kafka.bootstrap-servers=${spring.embedded.kafka.brokers}",
+ "spring.kafka.producer.bootstrap-servers=${spring.embedded.kafka.brokers}",
"sumup.adder.topic=" + TOPIC,
- "sumup.adder.commit-interval=500ms",
+ "spring.kafka.consumer.auto-commit-interval=500ms",
"spring.mongodb.embedded.version=4.4.13" })
@EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
@EnableAutoConfiguration
@Autowired
- KafkaConsumer<K, V> kafkaConsumer;
+ org.apache.kafka.clients.consumer.Consumer<K, V> kafkaConsumer;
@Autowired
- Consumer<ConsumerRecord<K, V>> consumer;
+ KafkaProperties kafkaProperties;
@Autowired
- ApplicationProperties properties;
- @Autowired
- ExecutorService executor;
- @Autowired
- StateRepository stateRepository;
+ ApplicationProperties applicationProperties;
@Autowired
MongoClient mongoClient;
@Autowired
MongoProperties mongoProperties;
@Autowired
- PollIntervalAwareConsumerRebalanceListener rebalanceListener;
+ KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry;
+ @Autowired
+ TestRecordHandler recordHandler;
+ @Autowired
+ DeadLetterTopicConsumer deadLetterTopicConsumer;
@Autowired
- RecordHandler<K, V> recordHandler;
+ EndlessConsumer endlessConsumer;
KafkaProducer<Bytes, Bytes> testRecordProducer;
KafkaConsumer<Bytes, Bytes> offsetConsumer;
- EndlessConsumer<K, V> endlessConsumer;
Map<TopicPartition, Long> oldOffsets;
- Map<TopicPartition, Long> seenOffsets;
- Set<ConsumerRecord<K, V>> receivedRecords;
final RecordGenerator recordGenerator;
/** Tests methods */
@Test
- void commitsCurrentOffsetsOnSuccess()
+ void commitsCurrentOffsetsOnSuccess() throws Exception
{
- int numberOfGeneratedMessages =
- recordGenerator.generate(false, false, messageSender);
+ recordGenerator.generate(false, false, messageSender);
+
+ int numberOfGeneratedMessages = recordGenerator.getNumberOfMessages();
await(numberOfGeneratedMessages + " records received")
.atMost(Duration.ofSeconds(30))
.pollInterval(Duration.ofSeconds(1))
- .until(() -> receivedRecords.size() >= numberOfGeneratedMessages);
+ .until(() -> recordHandler.receivedMessages >= numberOfGeneratedMessages);
await("Offsets committed")
.atMost(Duration.ofSeconds(10))
.untilAsserted(() ->
{
checkSeenOffsetsForProgress();
- assertSeenOffsetsEqualCommittedOffsets(seenOffsets);
+ assertSeenOffsetsEqualCommittedOffsets(recordHandler.seenOffsets);
});
- assertThatExceptionOfType(IllegalStateException.class)
- .isThrownBy(() -> endlessConsumer.exitStatus())
- .describedAs("Consumer should still be running");
+ assertThat(endlessConsumer.running())
+ .describedAs("Consumer should still be running")
+ .isTrue();
+ endlessConsumer.stop();
recordGenerator.assertBusinessLogic();
}
@SkipWhenErrorCannotBeGenerated(poisonPill = true)
void commitsOffsetOfErrorForReprocessingOnDeserializationError()
{
- int numberOfGeneratedMessages =
- recordGenerator.generate(true, false, messageSender);
+ recordGenerator.generate(true, false, messageSender);
+
+ int numberOfValidMessages =
+ recordGenerator.getNumberOfMessages() -
+ recordGenerator.getNumberOfPoisonPills();
- await("Consumer failed")
+ await(numberOfValidMessages + " records received")
.atMost(Duration.ofSeconds(30))
.pollInterval(Duration.ofSeconds(1))
- .until(() -> !endlessConsumer.running());
-
- checkSeenOffsetsForProgress();
- assertSeenOffsetsEqualCommittedOffsets(seenOffsets);
-
- endlessConsumer.start();
- await("Consumer failed")
+ .until(() -> recordHandler.receivedMessages >= numberOfValidMessages);
+ await(recordGenerator.getNumberOfPoisonPills() + " poison-pills received")
.atMost(Duration.ofSeconds(30))
.pollInterval(Duration.ofSeconds(1))
- .until(() -> !endlessConsumer.running());
+ .until(() -> deadLetterTopicConsumer.messages.size() == recordGenerator.getNumberOfPoisonPills());
- checkSeenOffsetsForProgress();
- assertSeenOffsetsEqualCommittedOffsets(seenOffsets);
- assertThat(receivedRecords.size())
- .describedAs("Received not all sent events")
- .isLessThan(numberOfGeneratedMessages);
+ await("Offsets committed")
+ .atMost(Duration.ofSeconds(10))
+ .pollInterval(Duration.ofSeconds(1))
+ .untilAsserted(() ->
+ {
+ checkSeenOffsetsForProgress();
+ assertSeenOffsetsEqualCommittedOffsets(recordHandler.seenOffsets);
+ });
- assertThatNoException()
- .describedAs("Consumer should not be running")
- .isThrownBy(() -> endlessConsumer.exitStatus());
- assertThat(endlessConsumer.exitStatus())
- .describedAs("Consumer should have exited abnormally")
- .containsInstanceOf(RecordDeserializationException.class);
+ assertThat(endlessConsumer.running())
+ .describedAs("Consumer should still be running")
+ .isTrue();
+ endlessConsumer.stop();
recordGenerator.assertBusinessLogic();
}
@Test
@SkipWhenErrorCannotBeGenerated(logicError = true)
- void doesNotCommitOffsetsOnLogicError()
+ void commitsOffsetsOfUnseenRecordsOnLogicError()
{
- int numberOfGeneratedMessages =
- recordGenerator.generate(false, true, messageSender);
+ recordGenerator.generate(false, true, messageSender);
- await("Consumer failed")
+ int numberOfValidMessages =
+ recordGenerator.getNumberOfMessages() -
+ recordGenerator.getNumberOfLogicErrors();
+
+ await(numberOfValidMessages + " records received")
.atMost(Duration.ofSeconds(30))
.pollInterval(Duration.ofSeconds(1))
- .until(() -> !endlessConsumer.running());
-
- checkSeenOffsetsForProgress();
- assertSeenOffsetsAreBehindCommittedOffsets(seenOffsets);
-
- endlessConsumer.start();
- await("Consumer failed")
+ .until(() -> recordHandler.receivedMessages >= numberOfValidMessages);
+ await(recordGenerator.getNumberOfLogicErrors() + " logic-errors received")
.atMost(Duration.ofSeconds(30))
.pollInterval(Duration.ofSeconds(1))
- .until(() -> !endlessConsumer.running());
+ .until(() -> deadLetterTopicConsumer.messages.size() == recordGenerator.getNumberOfLogicErrors());
- assertSeenOffsetsAreBehindCommittedOffsets(seenOffsets);
+ await("Offsets committed")
+ .atMost(Duration.ofSeconds(10))
+ .pollInterval(Duration.ofSeconds(1))
+ .untilAsserted(() ->
+ {
+ checkSeenOffsetsForProgress();
+ assertSeenOffsetsEqualCommittedOffsets(recordHandler.seenOffsets);
+ });
- assertThatNoException()
- .describedAs("Consumer should not be running")
- .isThrownBy(() -> endlessConsumer.exitStatus());
- assertThat(endlessConsumer.exitStatus())
- .describedAs("Consumer should have exited abnormally")
- .containsInstanceOf(RuntimeException.class);
+ assertThat(endlessConsumer.running())
+ .describedAs("Consumer should still be running")
+ .isTrue();
+ endlessConsumer.stop();
recordGenerator.assertBusinessLogic();
}
Long expected = offsetsToCheck.get(tp) + 1;
log.debug("Checking, if the offset {} for {} is at most {}", offset, tp, expected);
assertThat(offset)
- .describedAs("Committed offset corresponds to the offset of the consumer")
+ .describedAs("Committed offset must be at most equal to the offset of the consumer")
.isLessThanOrEqualTo(expected);
isOffsetBehindSeen.add(offset < expected);
});
partitions().forEach(tp ->
{
Long oldOffset = oldOffsets.get(tp) + 1;
- Long newOffset = seenOffsets.get(tp) + 1;
+ Long newOffset = recordHandler.seenOffsets.get(tp) + 1;
if (!oldOffset.equals(newOffset))
{
log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset);
void seekToEnd()
{
offsetConsumer.assign(partitions());
+ offsetConsumer.seekToEnd(partitions());
partitions().forEach(tp ->
{
+ // seekToEnd() works lazily: it only takes effect on poll()/position()
Long offset = offsetConsumer.position(tp);
log.info("New position for {}: {}", tp, offset);
- Integer partition = tp.partition();
- StateDocument document =
- stateRepository
- .findById(partition.toString())
- .orElse(new StateDocument(partition));
- document.offset = offset;
- stateRepository.save(document);
});
+ // The new positions must be commited!
+ offsetConsumer.commitSync();
offsetConsumer.unsubscribe();
}
void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
{
- partitions().forEach(tp ->
- {
- String partition = Integer.toString(tp.partition());
- Optional<Long> offset = stateRepository.findById(partition).map(document -> document.offset);
- consumer.accept(tp, offset.orElse(0l));
- });
+ offsetConsumer.assign(partitions());
+ partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp)));
+ offsetConsumer.unsubscribe();
}
List<TopicPartition> partitions()
public interface RecordGenerator
{
- int generate(
+ void generate(
boolean poisonPills,
boolean logicErrors,
Consumer<ProducerRecord<Bytes, Bytes>> messageSender);
+ int getNumberOfMessages();
+ int getNumberOfPoisonPills();
+ int getNumberOfLogicErrors();
+
default boolean canGeneratePoisonPill()
{
return true;
{
Properties props;
props = new Properties();
- props.put("bootstrap.servers", properties.getBootstrapServer());
+ props.put("bootstrap.servers", kafkaProperties.getBootstrapServers());
props.put("linger.ms", 100);
props.put("key.serializer", BytesSerializer.class.getName());
props.put("value.serializer", BytesSerializer.class.getName());
testRecordProducer = new KafkaProducer<>(props);
props = new Properties();
- props.put("bootstrap.servers", properties.getBootstrapServer());
+ props.put("bootstrap.servers", kafkaProperties.getBootstrapServers());
props.put("client.id", "OFFSET-CONSUMER");
- props.put("group.id", properties.getGroupId());
+ props.put("group.id", kafkaProperties.getConsumer().getGroupId());
props.put("key.deserializer", BytesDeserializer.class.getName());
props.put("value.deserializer", BytesDeserializer.class.getName());
offsetConsumer = new KafkaConsumer<>(props);
seekToEnd();
oldOffsets = new HashMap<>();
- seenOffsets = new HashMap<>();
- receivedRecords = new HashSet<>();
+ recordHandler.seenOffsets = new HashMap<>();
+ recordHandler.receivedMessages = 0;
+
+ deadLetterTopicConsumer.messages.clear();
doForCurrentOffsets((tp, offset) ->
{
oldOffsets.put(tp, offset - 1);
- seenOffsets.put(tp, offset - 1);
+ recordHandler.seenOffsets.put(tp, offset - 1);
});
- TestRecordHandler<K, V> captureOffsetAndExecuteTestHandler =
- new TestRecordHandler<K, V>(recordHandler)
- {
- @Override
- public void onNewRecord(ConsumerRecord<K, V> record)
- {
- seenOffsets.put(
- new TopicPartition(record.topic(), record.partition()),
- record.offset());
- receivedRecords.add(record);
- }
- };
-
- endlessConsumer =
- new EndlessConsumer<>(
- executor,
- properties.getClientId(),
- properties.getTopic(),
- kafkaConsumer,
- rebalanceListener,
- captureOffsetAndExecuteTestHandler);
-
endlessConsumer.start();
}
try
{
endlessConsumer.stop();
+ }
+ catch (Exception e)
+ {
+ log.debug("{}", e.toString());
+ }
+
+ try
+ {
testRecordProducer.close();
offsetConsumer.close();
}
@Import(ApplicationConfiguration.class)
public static class Configuration
{
+ @Bean
+ public RecordHandler recordHandler(RecordHandler applicationRecordHandler)
+ {
+ return new TestRecordHandler(applicationRecordHandler);
+ }
+
+ @Bean(destroyMethod = "close")
+ public org.apache.kafka.clients.consumer.Consumer<String, Message> kafkaConsumer(ConsumerFactory<String, Message> factory)
+ {
+ return factory.createConsumer();
+ }
+
+ @Bean
+ public ConcurrentKafkaListenerContainerFactory<String, String> dltContainerFactory(
+ KafkaProperties properties)
+ {
+ Map<String, Object> consumerProperties = new HashMap<>();
+
+ consumerProperties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, properties.getBootstrapServers());
+ consumerProperties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
+ consumerProperties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
+ consumerProperties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
+
+ DefaultKafkaConsumerFactory dltConsumerFactory =
+ new DefaultKafkaConsumerFactory<>(consumerProperties);
+ ConcurrentKafkaListenerContainerFactory<String, String> factory =
+ new ConcurrentKafkaListenerContainerFactory<>();
+ factory.setConsumerFactory(dltConsumerFactory);
+ return factory;
+ }
+
+ @Bean
+ public DeadLetterTopicConsumer deadLetterTopicConsumer()
+ {
+ return new DeadLetterTopicConsumer();
+ }
}
}