1 package de.juplo.kafka;
3 import com.mongodb.client.MongoClient;
4 import lombok.extern.slf4j.Slf4j;
5 import org.apache.kafka.clients.consumer.ConsumerConfig;
6 import org.apache.kafka.clients.consumer.KafkaConsumer;
7 import org.apache.kafka.clients.producer.KafkaProducer;
8 import org.apache.kafka.clients.producer.ProducerRecord;
9 import org.apache.kafka.common.TopicPartition;
10 import org.apache.kafka.common.serialization.*;
11 import org.apache.kafka.common.utils.Bytes;
12 import org.junit.jupiter.api.*;
13 import org.springframework.beans.factory.annotation.Autowired;
14 import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
15 import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
16 import org.springframework.boot.autoconfigure.mongo.MongoProperties;
17 import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo;
18 import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
19 import org.springframework.boot.test.context.TestConfiguration;
20 import org.springframework.context.annotation.Bean;
21 import org.springframework.context.annotation.Import;
22 import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
23 import org.springframework.kafka.config.KafkaListenerEndpointRegistry;
24 import org.springframework.kafka.core.ConsumerFactory;
25 import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
26 import org.springframework.kafka.test.context.EmbeddedKafka;
27 import org.springframework.test.context.TestPropertySource;
28 import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
30 import java.time.Duration;
32 import java.util.function.BiConsumer;
33 import java.util.function.Consumer;
34 import java.util.stream.Collectors;
35 import java.util.stream.IntStream;
37 import static de.juplo.kafka.GenericApplicationTests.PARTITIONS;
38 import static de.juplo.kafka.GenericApplicationTests.TOPIC;
39 import static org.assertj.core.api.Assertions.*;
40 import static org.awaitility.Awaitility.*;
43 @SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class)
46 "spring.kafka.bootstrap-servers=${spring.embedded.kafka.brokers}",
47 "spring.kafka.producer.bootstrap-servers=${spring.embedded.kafka.brokers}",
48 "sumup.adder.topic=" + TOPIC,
49 "spring.kafka.consumer.auto-commit-interval=500ms",
50 "spring.mongodb.embedded.version=4.4.13" })
51 @EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
52 @EnableAutoConfiguration
53 @AutoConfigureDataMongo
55 abstract class GenericApplicationTests<K, V>
57 public static final String TOPIC = "FOO";
58 public static final int PARTITIONS = 10;
62 org.apache.kafka.clients.consumer.Consumer<K, V> kafkaConsumer;
64 KafkaProperties kafkaProperties;
66 ApplicationProperties applicationProperties;
68 MongoClient mongoClient;
70 MongoProperties mongoProperties;
72 KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry;
74 TestRecordHandler recordHandler;
76 DeadLetterTopicConsumer deadLetterTopicConsumer;
78 EndlessConsumer endlessConsumer;
80 KafkaProducer<Bytes, Bytes> testRecordProducer;
81 KafkaConsumer<Bytes, Bytes> offsetConsumer;
82 Map<TopicPartition, Long> oldOffsets;
85 final RecordGenerator recordGenerator;
86 final Consumer<ProducerRecord<Bytes, Bytes>> messageSender;
88 public GenericApplicationTests(RecordGenerator recordGenerator)
90 this.recordGenerator = recordGenerator;
91 this.messageSender = (record) -> sendMessage(record);
98 void commitsCurrentOffsetsOnSuccess() throws Exception
100 recordGenerator.generate(false, false, messageSender);
102 int numberOfGeneratedMessages = recordGenerator.getNumberOfMessages();
104 await(numberOfGeneratedMessages + " records received")
105 .atMost(Duration.ofSeconds(30))
106 .pollInterval(Duration.ofSeconds(1))
107 .until(() -> recordHandler.receivedMessages >= numberOfGeneratedMessages);
109 await("Offsets committed")
110 .atMost(Duration.ofSeconds(10))
111 .pollInterval(Duration.ofSeconds(1))
114 checkSeenOffsetsForProgress();
115 assertSeenOffsetsEqualCommittedOffsets(recordHandler.seenOffsets);
118 assertThat(endlessConsumer.running())
119 .describedAs("Consumer should still be running")
122 endlessConsumer.stop();
123 recordGenerator.assertBusinessLogic();
127 @SkipWhenErrorCannotBeGenerated(poisonPill = true)
128 void commitsOffsetOfErrorForReprocessingOnDeserializationError()
130 recordGenerator.generate(true, false, messageSender);
132 int numberOfValidMessages =
133 recordGenerator.getNumberOfMessages() -
134 recordGenerator.getNumberOfPoisonPills();
136 await(numberOfValidMessages + " records received")
137 .atMost(Duration.ofSeconds(30))
138 .pollInterval(Duration.ofSeconds(1))
139 .until(() -> recordHandler.receivedMessages >= numberOfValidMessages);
140 await(recordGenerator.getNumberOfPoisonPills() + " poison-pills received")
141 .atMost(Duration.ofSeconds(30))
142 .pollInterval(Duration.ofSeconds(1))
143 .until(() -> deadLetterTopicConsumer.messages.size() == recordGenerator.getNumberOfPoisonPills());
145 await("Offsets committed")
146 .atMost(Duration.ofSeconds(10))
147 .pollInterval(Duration.ofSeconds(1))
150 checkSeenOffsetsForProgress();
151 assertSeenOffsetsEqualCommittedOffsets(recordHandler.seenOffsets);
154 assertThat(endlessConsumer.running())
155 .describedAs("Consumer should still be running")
158 endlessConsumer.stop();
159 recordGenerator.assertBusinessLogic();
163 @SkipWhenErrorCannotBeGenerated(logicError = true)
164 void commitsOffsetsOfUnseenRecordsOnLogicError()
166 recordGenerator.generate(false, true, messageSender);
168 int numberOfValidMessages =
169 recordGenerator.getNumberOfMessages() -
170 recordGenerator.getNumberOfLogicErrors();
172 await(numberOfValidMessages + " records received")
173 .atMost(Duration.ofSeconds(30))
174 .pollInterval(Duration.ofSeconds(1))
175 .until(() -> recordHandler.receivedMessages >= numberOfValidMessages);
176 await(recordGenerator.getNumberOfLogicErrors() + " logic-errors received")
177 .atMost(Duration.ofSeconds(30))
178 .pollInterval(Duration.ofSeconds(1))
179 .until(() -> deadLetterTopicConsumer.messages.size() == recordGenerator.getNumberOfLogicErrors());
181 await("Offsets committed")
182 .atMost(Duration.ofSeconds(10))
183 .pollInterval(Duration.ofSeconds(1))
186 checkSeenOffsetsForProgress();
187 assertSeenOffsetsEqualCommittedOffsets(recordHandler.seenOffsets);
190 assertThat(endlessConsumer.running())
191 .describedAs("Consumer should still be running")
194 endlessConsumer.stop();
195 recordGenerator.assertBusinessLogic();
199 /** Helper methods for the verification of expectations */
201 void assertSeenOffsetsEqualCommittedOffsets(Map<TopicPartition, Long> offsetsToCheck)
203 doForCurrentOffsets((tp, offset) ->
205 Long expected = offsetsToCheck.get(tp) + 1;
206 log.debug("Checking, if the offset {} for {} is exactly {}", offset, tp, expected);
208 .describedAs("Committed offset corresponds to the offset of the consumer")
209 .isEqualTo(expected);
213 void assertSeenOffsetsAreBehindCommittedOffsets(Map<TopicPartition, Long> offsetsToCheck)
215 List<Boolean> isOffsetBehindSeen = new LinkedList<>();
217 doForCurrentOffsets((tp, offset) ->
219 Long expected = offsetsToCheck.get(tp) + 1;
220 log.debug("Checking, if the offset {} for {} is at most {}", offset, tp, expected);
222 .describedAs("Committed offset must be at most equal to the offset of the consumer")
223 .isLessThanOrEqualTo(expected);
224 isOffsetBehindSeen.add(offset < expected);
227 assertThat(isOffsetBehindSeen.stream().reduce(false, (result, next) -> result | next))
228 .describedAs("Committed offsets are behind seen offsets")
232 void checkSeenOffsetsForProgress()
234 // Be sure, that some messages were consumed...!
235 Set<TopicPartition> withProgress = new HashSet<>();
236 partitions().forEach(tp ->
238 Long oldOffset = oldOffsets.get(tp) + 1;
239 Long newOffset = recordHandler.seenOffsets.get(tp) + 1;
240 if (!oldOffset.equals(newOffset))
242 log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset);
243 withProgress.add(tp);
246 assertThat(withProgress)
247 .describedAs("Some offsets must have changed, compared to the old offset-positions")
252 /** Helper methods for setting up and running the tests */
256 offsetConsumer.assign(partitions());
257 offsetConsumer.seekToEnd(partitions());
258 partitions().forEach(tp ->
260 // seekToEnd() works lazily: it only takes effect on poll()/position()
261 Long offset = offsetConsumer.position(tp);
262 log.info("New position for {}: {}", tp, offset);
264 // The new positions must be commited!
265 offsetConsumer.commitSync();
266 offsetConsumer.unsubscribe();
269 void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
271 offsetConsumer.assign(partitions());
272 partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp)));
273 offsetConsumer.unsubscribe();
276 List<TopicPartition> partitions()
280 .range(0, PARTITIONS)
281 .mapToObj(partition -> new TopicPartition(TOPIC, partition))
282 .collect(Collectors.toList());
286 public interface RecordGenerator
291 Consumer<ProducerRecord<Bytes, Bytes>> messageSender);
293 int getNumberOfMessages();
294 int getNumberOfPoisonPills();
295 int getNumberOfLogicErrors();
297 default boolean canGeneratePoisonPill()
302 default boolean canGenerateLogicError()
307 default void assertBusinessLogic()
309 log.debug("No business-logic to assert");
313 void sendMessage(ProducerRecord<Bytes, Bytes> record)
315 testRecordProducer.send(record, (metadata, e) ->
317 if (metadata != null)
321 metadata.partition(),
329 "Exception for {}={}: {}",
342 props = new Properties();
343 props.put("bootstrap.servers", kafkaProperties.getBootstrapServers());
344 props.put("linger.ms", 100);
345 props.put("key.serializer", BytesSerializer.class.getName());
346 props.put("value.serializer", BytesSerializer.class.getName());
347 testRecordProducer = new KafkaProducer<>(props);
349 props = new Properties();
350 props.put("bootstrap.servers", kafkaProperties.getBootstrapServers());
351 props.put("client.id", "OFFSET-CONSUMER");
352 props.put("group.id", kafkaProperties.getConsumer().getGroupId());
353 props.put("key.deserializer", BytesDeserializer.class.getName());
354 props.put("value.deserializer", BytesDeserializer.class.getName());
355 offsetConsumer = new KafkaConsumer<>(props);
357 mongoClient.getDatabase(mongoProperties.getDatabase()).drop();
360 oldOffsets = new HashMap<>();
361 recordHandler.seenOffsets = new HashMap<>();
362 recordHandler.receivedMessages = 0;
364 deadLetterTopicConsumer.messages.clear();
366 doForCurrentOffsets((tp, offset) ->
368 oldOffsets.put(tp, offset - 1);
369 recordHandler.seenOffsets.put(tp, offset - 1);
372 endlessConsumer.start();
380 endlessConsumer.stop();
384 log.debug("{}", e.toString());
389 testRecordProducer.close();
390 offsetConsumer.close();
394 log.info("Exception while stopping the consumer: {}", e.toString());
400 @Import(ApplicationConfiguration.class)
401 public static class Configuration
404 public RecordHandler recordHandler(RecordHandler applicationRecordHandler)
406 return new TestRecordHandler(applicationRecordHandler);
409 @Bean(destroyMethod = "close")
410 public org.apache.kafka.clients.consumer.Consumer<String, Message> kafkaConsumer(ConsumerFactory<String, Message> factory)
412 return factory.createConsumer();
416 public ConcurrentKafkaListenerContainerFactory<String, String> dltContainerFactory(
417 KafkaProperties properties)
419 Map<String, Object> consumerProperties = new HashMap<>();
421 consumerProperties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, properties.getBootstrapServers());
422 consumerProperties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
423 consumerProperties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
424 consumerProperties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
426 DefaultKafkaConsumerFactory dltConsumerFactory =
427 new DefaultKafkaConsumerFactory<>(consumerProperties);
428 ConcurrentKafkaListenerContainerFactory<String, String> factory =
429 new ConcurrentKafkaListenerContainerFactory<>();
430 factory.setConsumerFactory(dltConsumerFactory);
435 public DeadLetterTopicConsumer deadLetterTopicConsumer()
437 return new DeadLetterTopicConsumer();