1 package de.juplo.kafka;
3 import com.mongodb.client.MongoClient;
4 import lombok.extern.slf4j.Slf4j;
5 import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
6 import org.apache.kafka.clients.consumer.ConsumerRecord;
7 import org.apache.kafka.clients.consumer.KafkaConsumer;
8 import org.apache.kafka.clients.producer.KafkaProducer;
9 import org.apache.kafka.clients.producer.ProducerRecord;
10 import org.apache.kafka.common.TopicPartition;
11 import org.apache.kafka.common.errors.RecordDeserializationException;
12 import org.apache.kafka.common.serialization.*;
13 import org.apache.kafka.common.utils.Bytes;
14 import org.junit.jupiter.api.*;
15 import org.springframework.beans.factory.annotation.Autowired;
16 import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
17 import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
18 import org.springframework.boot.autoconfigure.mongo.MongoProperties;
19 import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo;
20 import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
21 import org.springframework.boot.test.context.TestConfiguration;
22 import org.springframework.context.annotation.Import;
23 import org.springframework.kafka.test.context.EmbeddedKafka;
24 import org.springframework.test.context.TestPropertySource;
25 import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
27 import java.time.Duration;
29 import java.util.concurrent.ExecutorService;
30 import java.util.function.BiConsumer;
31 import java.util.function.Consumer;
32 import java.util.stream.Collectors;
33 import java.util.stream.IntStream;
35 import static de.juplo.kafka.GenericApplicationTests.PARTITIONS;
36 import static de.juplo.kafka.GenericApplicationTests.TOPIC;
37 import static org.assertj.core.api.Assertions.*;
38 import static org.awaitility.Awaitility.*;
41 @SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class)
44 "spring.kafka.bootstrap-servers=${spring.embedded.kafka.brokers}",
45 "sumup.adder.topic=" + TOPIC,
46 "spring.kafka.consumer.auto-commit-interval=500ms",
47 "spring.mongodb.embedded.version=4.4.13" })
48 @EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
49 @EnableAutoConfiguration
50 @AutoConfigureDataMongo
52 abstract class GenericApplicationTests<K, V>
54 public static final String TOPIC = "FOO";
55 public static final int PARTITIONS = 10;
59 KafkaConsumer<K, V> kafkaConsumer;
61 Consumer<ConsumerRecord<K, V>> consumer;
63 ApplicationProperties applicationProperties;
65 KafkaProperties kafkaProperties;
67 ExecutorService executor;
69 MongoClient mongoClient;
71 MongoProperties mongoProperties;
73 ConsumerRebalanceListener rebalanceListener;
75 RecordHandler<K, V> recordHandler;
77 KafkaProducer<Bytes, Bytes> testRecordProducer;
78 KafkaConsumer<Bytes, Bytes> offsetConsumer;
79 EndlessConsumer<K, V> endlessConsumer;
80 Map<TopicPartition, Long> oldOffsets;
81 Map<TopicPartition, Long> seenOffsets;
82 Set<ConsumerRecord<K, V>> receivedRecords;
85 final RecordGenerator recordGenerator;
86 final Consumer<ProducerRecord<Bytes, Bytes>> messageSender;
88 public GenericApplicationTests(RecordGenerator recordGenerator)
90 this.recordGenerator = recordGenerator;
91 this.messageSender = (record) -> sendMessage(record);
98 void commitsCurrentOffsetsOnSuccess() throws Exception
100 int numberOfGeneratedMessages =
101 recordGenerator.generate(false, false, messageSender);
103 await(numberOfGeneratedMessages + " records received")
104 .atMost(Duration.ofSeconds(30))
105 .pollInterval(Duration.ofSeconds(1))
106 .until(() -> receivedRecords.size() >= numberOfGeneratedMessages);
108 await("Offsets committed")
109 .atMost(Duration.ofSeconds(10))
110 .pollInterval(Duration.ofSeconds(1))
113 checkSeenOffsetsForProgress();
114 assertSeenOffsetsEqualCommittedOffsets(seenOffsets);
117 assertThatExceptionOfType(IllegalStateException.class)
118 .isThrownBy(() -> endlessConsumer.exitStatus())
119 .describedAs("Consumer should still be running");
121 endlessConsumer.stop();
122 recordGenerator.assertBusinessLogic();
126 @SkipWhenErrorCannotBeGenerated(poisonPill = true)
127 void commitsOffsetOfErrorForReprocessingOnDeserializationError()
129 int numberOfGeneratedMessages =
130 recordGenerator.generate(true, false, messageSender);
132 await("Consumer failed")
133 .atMost(Duration.ofSeconds(30))
134 .pollInterval(Duration.ofSeconds(1))
135 .until(() -> !endlessConsumer.running());
137 checkSeenOffsetsForProgress();
138 assertSeenOffsetsEqualCommittedOffsets(seenOffsets);
140 endlessConsumer.start();
141 await("Consumer failed")
142 .atMost(Duration.ofSeconds(30))
143 .pollInterval(Duration.ofSeconds(1))
144 .until(() -> !endlessConsumer.running());
146 checkSeenOffsetsForProgress();
147 assertSeenOffsetsEqualCommittedOffsets(seenOffsets);
148 assertThat(receivedRecords.size())
149 .describedAs("Received not all sent events")
150 .isLessThan(numberOfGeneratedMessages);
152 assertThatNoException()
153 .describedAs("Consumer should not be running")
154 .isThrownBy(() -> endlessConsumer.exitStatus());
155 assertThat(endlessConsumer.exitStatus())
156 .describedAs("Consumer should have exited abnormally")
157 .containsInstanceOf(RecordDeserializationException.class);
159 recordGenerator.assertBusinessLogic();
163 @SkipWhenErrorCannotBeGenerated(logicError = true)
164 void doesNotCommitOffsetsOnLogicError()
166 int numberOfGeneratedMessages =
167 recordGenerator.generate(false, true, messageSender);
169 await("Consumer failed")
170 .atMost(Duration.ofSeconds(30))
171 .pollInterval(Duration.ofSeconds(1))
172 .until(() -> !endlessConsumer.running());
174 checkSeenOffsetsForProgress();
175 assertSeenOffsetsAreBehindCommittedOffsets(seenOffsets);
177 endlessConsumer.start();
178 await("Consumer failed")
179 .atMost(Duration.ofSeconds(30))
180 .pollInterval(Duration.ofSeconds(1))
181 .until(() -> !endlessConsumer.running());
183 assertSeenOffsetsAreBehindCommittedOffsets(seenOffsets);
185 assertThatNoException()
186 .describedAs("Consumer should not be running")
187 .isThrownBy(() -> endlessConsumer.exitStatus());
188 assertThat(endlessConsumer.exitStatus())
189 .describedAs("Consumer should have exited abnormally")
190 .containsInstanceOf(RuntimeException.class);
192 recordGenerator.assertBusinessLogic();
196 /** Helper methods for the verification of expectations */
198 void assertSeenOffsetsEqualCommittedOffsets(Map<TopicPartition, Long> offsetsToCheck)
200 doForCurrentOffsets((tp, offset) ->
202 Long expected = offsetsToCheck.get(tp) + 1;
203 log.debug("Checking, if the offset {} for {} is exactly {}", offset, tp, expected);
205 .describedAs("Committed offset corresponds to the offset of the consumer")
206 .isEqualTo(expected);
210 void assertSeenOffsetsAreBehindCommittedOffsets(Map<TopicPartition, Long> offsetsToCheck)
212 List<Boolean> isOffsetBehindSeen = new LinkedList<>();
214 doForCurrentOffsets((tp, offset) ->
216 Long expected = offsetsToCheck.get(tp) + 1;
217 log.debug("Checking, if the offset {} for {} is at most {}", offset, tp, expected);
219 .describedAs("Committed offset must be at most equal to the offset of the consumer")
220 .isLessThanOrEqualTo(expected);
221 isOffsetBehindSeen.add(offset < expected);
224 assertThat(isOffsetBehindSeen.stream().reduce(false, (result, next) -> result | next))
225 .describedAs("Committed offsets are behind seen offsets")
229 void checkSeenOffsetsForProgress()
231 // Be sure, that some messages were consumed...!
232 Set<TopicPartition> withProgress = new HashSet<>();
233 partitions().forEach(tp ->
235 Long oldOffset = oldOffsets.get(tp) + 1;
236 Long newOffset = seenOffsets.get(tp) + 1;
237 if (!oldOffset.equals(newOffset))
239 log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset);
240 withProgress.add(tp);
243 assertThat(withProgress)
244 .describedAs("Some offsets must have changed, compared to the old offset-positions")
249 /** Helper methods for setting up and running the tests */
253 offsetConsumer.assign(partitions());
254 offsetConsumer.seekToEnd(partitions());
255 partitions().forEach(tp ->
257 // seekToEnd() works lazily: it only takes effect on poll()/position()
258 Long offset = offsetConsumer.position(tp);
259 log.info("New position for {}: {}", tp, offset);
261 // The new positions must be commited!
262 offsetConsumer.commitSync();
263 offsetConsumer.unsubscribe();
266 void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
268 offsetConsumer.assign(partitions());
269 partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp)));
270 offsetConsumer.unsubscribe();
273 List<TopicPartition> partitions()
277 .range(0, PARTITIONS)
278 .mapToObj(partition -> new TopicPartition(TOPIC, partition))
279 .collect(Collectors.toList());
283 public interface RecordGenerator
288 Consumer<ProducerRecord<Bytes, Bytes>> messageSender);
290 default boolean canGeneratePoisonPill()
295 default boolean canGenerateLogicError()
300 default void assertBusinessLogic()
302 log.debug("No business-logic to assert");
306 void sendMessage(ProducerRecord<Bytes, Bytes> record)
308 testRecordProducer.send(record, (metadata, e) ->
310 if (metadata != null)
314 metadata.partition(),
322 "Exception for {}={}: {}",
335 props = new Properties();
336 props.put("bootstrap.servers", kafkaProperties.getBootstrapServers());
337 props.put("linger.ms", 100);
338 props.put("key.serializer", BytesSerializer.class.getName());
339 props.put("value.serializer", BytesSerializer.class.getName());
340 testRecordProducer = new KafkaProducer<>(props);
342 props = new Properties();
343 props.put("bootstrap.servers", kafkaProperties.getBootstrapServers());
344 props.put("client.id", "OFFSET-CONSUMER");
345 props.put("group.id", kafkaProperties.getConsumer().getGroupId());
346 props.put("key.deserializer", BytesDeserializer.class.getName());
347 props.put("value.deserializer", BytesDeserializer.class.getName());
348 offsetConsumer = new KafkaConsumer<>(props);
350 mongoClient.getDatabase(mongoProperties.getDatabase()).drop();
353 oldOffsets = new HashMap<>();
354 seenOffsets = new HashMap<>();
355 receivedRecords = new HashSet<>();
357 doForCurrentOffsets((tp, offset) ->
359 oldOffsets.put(tp, offset - 1);
360 seenOffsets.put(tp, offset - 1);
363 TestRecordHandler<K, V> captureOffsetAndExecuteTestHandler =
364 new TestRecordHandler<K, V>(recordHandler)
367 public void onNewRecord(ConsumerRecord<K, V> record)
370 new TopicPartition(record.topic(), record.partition()),
372 receivedRecords.add(record);
377 new EndlessConsumer<>(
379 kafkaProperties.getClientId(),
380 applicationProperties.getTopic(),
383 captureOffsetAndExecuteTestHandler);
385 endlessConsumer.start();
393 testRecordProducer.close();
394 offsetConsumer.close();
398 log.info("Exception while stopping the consumer: {}", e.toString());
404 @Import(ApplicationConfiguration.class)
405 public static class Configuration