1 package de.juplo.kafka;
3 import com.mongodb.client.MongoClient;
4 import lombok.extern.slf4j.Slf4j;
5 import org.apache.kafka.clients.consumer.KafkaConsumer;
6 import org.apache.kafka.clients.producer.KafkaProducer;
7 import org.apache.kafka.clients.producer.ProducerRecord;
8 import org.apache.kafka.common.TopicPartition;
9 import org.apache.kafka.common.errors.RecordDeserializationException;
10 import org.apache.kafka.common.serialization.*;
11 import org.apache.kafka.common.utils.Bytes;
12 import org.junit.jupiter.api.*;
13 import org.springframework.beans.factory.annotation.Autowired;
14 import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
15 import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
16 import org.springframework.boot.autoconfigure.mongo.MongoProperties;
17 import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo;
18 import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
19 import org.springframework.boot.test.context.TestConfiguration;
20 import org.springframework.context.annotation.Bean;
21 import org.springframework.context.annotation.Import;
22 import org.springframework.kafka.config.KafkaListenerEndpointRegistry;
23 import org.springframework.kafka.core.ConsumerFactory;
24 import org.springframework.kafka.test.context.EmbeddedKafka;
25 import org.springframework.test.context.TestPropertySource;
26 import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
28 import java.time.Duration;
30 import java.util.function.BiConsumer;
31 import java.util.function.Consumer;
32 import java.util.stream.Collectors;
33 import java.util.stream.IntStream;
35 import static de.juplo.kafka.GenericApplicationTests.PARTITIONS;
36 import static de.juplo.kafka.GenericApplicationTests.TOPIC;
37 import static org.assertj.core.api.Assertions.*;
38 import static org.awaitility.Awaitility.*;
41 @SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class)
44 "spring.kafka.bootstrap-servers=${spring.embedded.kafka.brokers}",
45 "sumup.adder.topic=" + TOPIC,
46 "spring.kafka.consumer.auto-commit-interval=500ms",
47 "spring.mongodb.embedded.version=4.4.13" })
48 @EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
49 @EnableAutoConfiguration
50 @AutoConfigureDataMongo
52 abstract class GenericApplicationTests<K, V>
54 public static final String TOPIC = "FOO";
55 public static final int PARTITIONS = 10;
59 org.apache.kafka.clients.consumer.Consumer<K, V> kafkaConsumer;
61 KafkaProperties kafkaProperties;
63 ApplicationProperties applicationProperties;
65 MongoClient mongoClient;
67 MongoProperties mongoProperties;
69 KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry;
71 TestRecordHandler recordHandler;
73 EndlessConsumer endlessConsumer;
75 KafkaProducer<Bytes, Bytes> testRecordProducer;
76 KafkaConsumer<Bytes, Bytes> offsetConsumer;
77 Map<TopicPartition, Long> oldOffsets;
80 final RecordGenerator recordGenerator;
81 final Consumer<ProducerRecord<Bytes, Bytes>> messageSender;
83 public GenericApplicationTests(RecordGenerator recordGenerator)
85 this.recordGenerator = recordGenerator;
86 this.messageSender = (record) -> sendMessage(record);
93 void commitsCurrentOffsetsOnSuccess() throws Exception
95 int numberOfGeneratedMessages =
96 recordGenerator.generate(false, false, messageSender);
98 await(numberOfGeneratedMessages + " records received")
99 .atMost(Duration.ofSeconds(30))
100 .pollInterval(Duration.ofSeconds(1))
101 .until(() -> recordHandler.receivedMessages >= numberOfGeneratedMessages);
103 await("Offsets committed")
104 .atMost(Duration.ofSeconds(10))
105 .pollInterval(Duration.ofSeconds(1))
108 checkSeenOffsetsForProgress();
109 assertSeenOffsetsEqualCommittedOffsets(recordHandler.seenOffsets);
112 assertThatExceptionOfType(IllegalStateException.class)
113 .isThrownBy(() -> endlessConsumer.exitStatus())
114 .describedAs("Consumer should still be running");
116 endlessConsumer.stop();
117 recordGenerator.assertBusinessLogic();
121 @SkipWhenErrorCannotBeGenerated(poisonPill = true)
122 void commitsOffsetOfErrorForReprocessingOnDeserializationError()
124 int numberOfGeneratedMessages =
125 recordGenerator.generate(true, false, messageSender);
127 await("Consumer failed")
128 .atMost(Duration.ofSeconds(30))
129 .pollInterval(Duration.ofSeconds(1))
130 .until(() -> !endlessConsumer.running());
132 checkSeenOffsetsForProgress();
133 assertSeenOffsetsEqualCommittedOffsets(recordHandler.seenOffsets);
135 endlessConsumer.start();
136 await("Consumer failed")
137 .atMost(Duration.ofSeconds(30))
138 .pollInterval(Duration.ofSeconds(1))
139 .until(() -> !endlessConsumer.running());
141 checkSeenOffsetsForProgress();
142 assertSeenOffsetsEqualCommittedOffsets(recordHandler.seenOffsets);
143 assertThat(recordHandler.receivedMessages)
144 .describedAs("Received not all sent events")
145 .isLessThan(numberOfGeneratedMessages);
147 assertThatNoException()
148 .describedAs("Consumer should not be running")
149 .isThrownBy(() -> endlessConsumer.exitStatus());
150 assertThat(endlessConsumer.exitStatus())
151 .describedAs("Consumer should have exited abnormally")
152 .containsInstanceOf(RecordDeserializationException.class);
154 recordGenerator.assertBusinessLogic();
158 @SkipWhenErrorCannotBeGenerated(logicError = true)
159 void commitsOffsetsOfUnseenRecordsOnLogicError()
161 int numberOfGeneratedMessages =
162 recordGenerator.generate(false, true, messageSender);
164 await("Consumer failed")
165 .atMost(Duration.ofSeconds(30))
166 .pollInterval(Duration.ofSeconds(1))
167 .until(() -> !endlessConsumer.running());
169 checkSeenOffsetsForProgress();
170 assertSeenOffsetsEqualCommittedOffsets(recordHandler.seenOffsets);
172 endlessConsumer.start();
173 await("Consumer failed")
174 .atMost(Duration.ofSeconds(30))
175 .pollInterval(Duration.ofSeconds(1))
176 .until(() -> !endlessConsumer.running());
178 assertSeenOffsetsEqualCommittedOffsets(recordHandler.seenOffsets);
180 assertThatNoException()
181 .describedAs("Consumer should not be running")
182 .isThrownBy(() -> endlessConsumer.exitStatus());
183 assertThat(endlessConsumer.exitStatus())
184 .describedAs("Consumer should have exited abnormally")
185 .containsInstanceOf(RuntimeException.class);
187 recordGenerator.assertBusinessLogic();
191 /** Helper methods for the verification of expectations */
193 void assertSeenOffsetsEqualCommittedOffsets(Map<TopicPartition, Long> offsetsToCheck)
195 doForCurrentOffsets((tp, offset) ->
197 Long expected = offsetsToCheck.get(tp) + 1;
198 log.debug("Checking, if the offset {} for {} is exactly {}", offset, tp, expected);
200 .describedAs("Committed offset corresponds to the offset of the consumer")
201 .isEqualTo(expected);
205 void assertSeenOffsetsAreBehindCommittedOffsets(Map<TopicPartition, Long> offsetsToCheck)
207 List<Boolean> isOffsetBehindSeen = new LinkedList<>();
209 doForCurrentOffsets((tp, offset) ->
211 Long expected = offsetsToCheck.get(tp) + 1;
212 log.debug("Checking, if the offset {} for {} is at most {}", offset, tp, expected);
214 .describedAs("Committed offset must be at most equal to the offset of the consumer")
215 .isLessThanOrEqualTo(expected);
216 isOffsetBehindSeen.add(offset < expected);
219 assertThat(isOffsetBehindSeen.stream().reduce(false, (result, next) -> result | next))
220 .describedAs("Committed offsets are behind seen offsets")
224 void checkSeenOffsetsForProgress()
226 // Be sure, that some messages were consumed...!
227 Set<TopicPartition> withProgress = new HashSet<>();
228 partitions().forEach(tp ->
230 Long oldOffset = oldOffsets.get(tp) + 1;
231 Long newOffset = recordHandler.seenOffsets.get(tp) + 1;
232 if (!oldOffset.equals(newOffset))
234 log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset);
235 withProgress.add(tp);
238 assertThat(withProgress)
239 .describedAs("Some offsets must have changed, compared to the old offset-positions")
244 /** Helper methods for setting up and running the tests */
248 offsetConsumer.assign(partitions());
249 offsetConsumer.seekToEnd(partitions());
250 partitions().forEach(tp ->
252 // seekToEnd() works lazily: it only takes effect on poll()/position()
253 Long offset = offsetConsumer.position(tp);
254 log.info("New position for {}: {}", tp, offset);
256 // The new positions must be commited!
257 offsetConsumer.commitSync();
258 offsetConsumer.unsubscribe();
261 void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
263 offsetConsumer.assign(partitions());
264 partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp)));
265 offsetConsumer.unsubscribe();
268 List<TopicPartition> partitions()
272 .range(0, PARTITIONS)
273 .mapToObj(partition -> new TopicPartition(TOPIC, partition))
274 .collect(Collectors.toList());
278 public interface RecordGenerator
283 Consumer<ProducerRecord<Bytes, Bytes>> messageSender);
285 default boolean canGeneratePoisonPill()
290 default boolean canGenerateLogicError()
295 default void assertBusinessLogic()
297 log.debug("No business-logic to assert");
301 void sendMessage(ProducerRecord<Bytes, Bytes> record)
303 testRecordProducer.send(record, (metadata, e) ->
305 if (metadata != null)
309 metadata.partition(),
317 "Exception for {}={}: {}",
330 props = new Properties();
331 props.put("bootstrap.servers", kafkaProperties.getBootstrapServers());
332 props.put("linger.ms", 100);
333 props.put("key.serializer", BytesSerializer.class.getName());
334 props.put("value.serializer", BytesSerializer.class.getName());
335 testRecordProducer = new KafkaProducer<>(props);
337 props = new Properties();
338 props.put("bootstrap.servers", kafkaProperties.getBootstrapServers());
339 props.put("client.id", "OFFSET-CONSUMER");
340 props.put("group.id", kafkaProperties.getConsumer().getGroupId());
341 props.put("key.deserializer", BytesDeserializer.class.getName());
342 props.put("value.deserializer", BytesDeserializer.class.getName());
343 offsetConsumer = new KafkaConsumer<>(props);
345 mongoClient.getDatabase(mongoProperties.getDatabase()).drop();
348 oldOffsets = new HashMap<>();
349 recordHandler.seenOffsets = new HashMap<>();
350 recordHandler.receivedMessages = 0;
352 doForCurrentOffsets((tp, offset) ->
354 oldOffsets.put(tp, offset - 1);
355 recordHandler.seenOffsets.put(tp, offset - 1);
358 endlessConsumer.start();
366 endlessConsumer.stop();
370 log.debug("{}", e.toString());
375 testRecordProducer.close();
376 offsetConsumer.close();
380 log.info("Exception while stopping the consumer: {}", e.toString());
386 @Import(ApplicationConfiguration.class)
387 public static class Configuration
390 public RecordHandler recordHandler(RecordHandler applicationRecordHandler)
392 return new TestRecordHandler(applicationRecordHandler);
395 @Bean(destroyMethod = "close")
396 public org.apache.kafka.clients.consumer.Consumer<String, Message> kafkaConsumer(ConsumerFactory<String, Message> factory)
398 return factory.createConsumer();