1 package de.juplo.kafka;
3 import com.mongodb.client.MongoClient;
4 import lombok.extern.slf4j.Slf4j;
5 import org.apache.kafka.clients.consumer.ConsumerRecord;
6 import org.apache.kafka.clients.consumer.KafkaConsumer;
7 import org.apache.kafka.clients.producer.KafkaProducer;
8 import org.apache.kafka.clients.producer.ProducerRecord;
9 import org.apache.kafka.common.TopicPartition;
10 import org.apache.kafka.common.errors.RecordDeserializationException;
11 import org.apache.kafka.common.serialization.*;
12 import org.apache.kafka.common.utils.Bytes;
13 import org.junit.jupiter.api.*;
14 import org.springframework.beans.factory.annotation.Autowired;
15 import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
16 import org.springframework.boot.autoconfigure.mongo.MongoProperties;
17 import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo;
18 import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
19 import org.springframework.boot.test.context.TestConfiguration;
20 import org.springframework.context.annotation.Import;
21 import org.springframework.kafka.test.context.EmbeddedKafka;
22 import org.springframework.test.context.TestPropertySource;
23 import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
25 import java.time.Duration;
27 import java.util.concurrent.ExecutorService;
28 import java.util.function.BiConsumer;
29 import java.util.function.Consumer;
30 import java.util.stream.Collectors;
31 import java.util.stream.IntStream;
33 import static de.juplo.kafka.GenericApplicationTests.PARTITIONS;
34 import static de.juplo.kafka.GenericApplicationTests.TOPIC;
35 import static org.assertj.core.api.Assertions.*;
36 import static org.awaitility.Awaitility.*;
39 @SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class)
42 "sumup.adder.bootstrap-server=${spring.embedded.kafka.brokers}",
43 "sumup.adder.topic=" + TOPIC,
44 "sumup.adder.commit-interval=500ms",
45 "spring.mongodb.embedded.version=4.4.13" })
46 @EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
47 @EnableAutoConfiguration
48 @AutoConfigureDataMongo
50 abstract class GenericApplicationTests<K, V>
52 public static final String TOPIC = "FOO";
53 public static final int PARTITIONS = 10;
57 KafkaConsumer<K, V> kafkaConsumer;
59 Consumer<ConsumerRecord<K, V>> consumer;
61 ApplicationProperties properties;
63 ExecutorService executor;
65 StateRepository stateRepository;
67 MongoClient mongoClient;
69 MongoProperties mongoProperties;
71 PollIntervalAwareConsumerRebalanceListener rebalanceListener;
73 RecordHandler<K, V> recordHandler;
75 KafkaProducer<Bytes, Bytes> testRecordProducer;
76 KafkaConsumer<Bytes, Bytes> offsetConsumer;
77 EndlessConsumer<K, V> endlessConsumer;
78 Map<TopicPartition, Long> oldOffsets;
79 Map<TopicPartition, Long> seenOffsets;
80 Set<ConsumerRecord<K, V>> receivedRecords;
83 final RecordGenerator recordGenerator;
84 final Consumer<ProducerRecord<Bytes, Bytes>> messageSender;
86 public GenericApplicationTests(RecordGenerator recordGenerator)
88 this.recordGenerator = recordGenerator;
89 this.messageSender = (record) -> sendMessage(record);
96 void commitsCurrentOffsetsOnSuccess()
98 int numberOfGeneratedMessages =
99 recordGenerator.generate(false, false, messageSender);
101 await(numberOfGeneratedMessages + " records received")
102 .atMost(Duration.ofSeconds(30))
103 .pollInterval(Duration.ofSeconds(1))
104 .until(() -> receivedRecords.size() >= numberOfGeneratedMessages);
106 await("Offsets committed")
107 .atMost(Duration.ofSeconds(10))
108 .pollInterval(Duration.ofSeconds(1))
111 checkSeenOffsetsForProgress();
112 assertSeenOffsetsEqualCommittedOffsets(seenOffsets);
115 assertThatExceptionOfType(IllegalStateException.class)
116 .isThrownBy(() -> endlessConsumer.exitStatus())
117 .describedAs("Consumer should still be running");
119 recordGenerator.assertBusinessLogic();
123 @SkipWhenErrorCannotBeGenerated(poisonPill = true)
124 void commitsOffsetOfErrorForReprocessingOnDeserializationError()
126 int numberOfGeneratedMessages =
127 recordGenerator.generate(true, false, messageSender);
129 await("Consumer failed")
130 .atMost(Duration.ofSeconds(30))
131 .pollInterval(Duration.ofSeconds(1))
132 .until(() -> !endlessConsumer.running());
134 checkSeenOffsetsForProgress();
135 assertSeenOffsetsEqualCommittedOffsets(seenOffsets);
137 endlessConsumer.start();
138 await("Consumer failed")
139 .atMost(Duration.ofSeconds(30))
140 .pollInterval(Duration.ofSeconds(1))
141 .until(() -> !endlessConsumer.running());
143 checkSeenOffsetsForProgress();
144 assertSeenOffsetsEqualCommittedOffsets(seenOffsets);
145 assertThat(receivedRecords.size())
146 .describedAs("Received not all sent events")
147 .isLessThan(numberOfGeneratedMessages);
149 assertThatNoException()
150 .describedAs("Consumer should not be running")
151 .isThrownBy(() -> endlessConsumer.exitStatus());
152 assertThat(endlessConsumer.exitStatus())
153 .describedAs("Consumer should have exited abnormally")
154 .containsInstanceOf(RecordDeserializationException.class);
156 recordGenerator.assertBusinessLogic();
160 @SkipWhenErrorCannotBeGenerated(logicError = true)
161 void doesNotCommitOffsetsOnLogicError()
163 int numberOfGeneratedMessages =
164 recordGenerator.generate(false, true, messageSender);
166 await("Consumer failed")
167 .atMost(Duration.ofSeconds(30))
168 .pollInterval(Duration.ofSeconds(1))
169 .until(() -> !endlessConsumer.running());
171 checkSeenOffsetsForProgress();
172 assertSeenOffsetsAreBehindCommittedOffsets(seenOffsets);
174 endlessConsumer.start();
175 await("Consumer failed")
176 .atMost(Duration.ofSeconds(30))
177 .pollInterval(Duration.ofSeconds(1))
178 .until(() -> !endlessConsumer.running());
180 assertSeenOffsetsAreBehindCommittedOffsets(seenOffsets);
182 assertThatNoException()
183 .describedAs("Consumer should not be running")
184 .isThrownBy(() -> endlessConsumer.exitStatus());
185 assertThat(endlessConsumer.exitStatus())
186 .describedAs("Consumer should have exited abnormally")
187 .containsInstanceOf(RuntimeException.class);
189 recordGenerator.assertBusinessLogic();
193 /** Helper methods for the verification of expectations */
195 void assertSeenOffsetsEqualCommittedOffsets(Map<TopicPartition, Long> offsetsToCheck)
197 doForCurrentOffsets((tp, offset) ->
199 Long expected = offsetsToCheck.get(tp) + 1;
200 log.debug("Checking, if the offset {} for {} is exactly {}", offset, tp, expected);
202 .describedAs("Committed offset corresponds to the offset of the consumer")
203 .isEqualTo(expected);
207 void assertSeenOffsetsAreBehindCommittedOffsets(Map<TopicPartition, Long> offsetsToCheck)
209 List<Boolean> isOffsetBehindSeen = new LinkedList<>();
211 doForCurrentOffsets((tp, offset) ->
213 Long expected = offsetsToCheck.get(tp) + 1;
214 log.debug("Checking, if the offset {} for {} is at most {}", offset, tp, expected);
216 .describedAs("Committed offset corresponds to the offset of the consumer")
217 .isLessThanOrEqualTo(expected);
218 isOffsetBehindSeen.add(offset < expected);
221 assertThat(isOffsetBehindSeen.stream().reduce(false, (result, next) -> result | next))
222 .describedAs("Committed offsets are behind seen offsets")
226 void checkSeenOffsetsForProgress()
228 // Be sure, that some messages were consumed...!
229 Set<TopicPartition> withProgress = new HashSet<>();
230 partitions().forEach(tp ->
232 Long oldOffset = oldOffsets.get(tp) + 1;
233 Long newOffset = seenOffsets.get(tp) + 1;
234 if (!oldOffset.equals(newOffset))
236 log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset);
237 withProgress.add(tp);
240 assertThat(withProgress)
241 .describedAs("Some offsets must have changed, compared to the old offset-positions")
246 /** Helper methods for setting up and running the tests */
250 offsetConsumer.assign(partitions());
251 partitions().forEach(tp ->
253 Long offset = offsetConsumer.position(tp);
254 log.info("New position for {}: {}", tp, offset);
255 Integer partition = tp.partition();
256 StateDocument document =
258 .findById(partition.toString())
259 .orElse(new StateDocument(partition));
260 document.offset = offset;
261 stateRepository.save(document);
263 offsetConsumer.unsubscribe();
266 void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
268 partitions().forEach(tp ->
270 String partition = Integer.toString(tp.partition());
271 Optional<Long> offset = stateRepository.findById(partition).map(document -> document.offset);
272 consumer.accept(tp, offset.orElse(0l));
276 List<TopicPartition> partitions()
280 .range(0, PARTITIONS)
281 .mapToObj(partition -> new TopicPartition(TOPIC, partition))
282 .collect(Collectors.toList());
286 public interface RecordGenerator
291 Consumer<ProducerRecord<Bytes, Bytes>> messageSender);
293 default boolean canGeneratePoisonPill()
298 default boolean canGenerateLogicError()
303 default void assertBusinessLogic()
305 log.debug("No business-logic to assert");
309 void sendMessage(ProducerRecord<Bytes, Bytes> record)
311 testRecordProducer.send(record, (metadata, e) ->
313 if (metadata != null)
317 metadata.partition(),
325 "Exception for {}={}: {}",
338 props = new Properties();
339 props.put("bootstrap.servers", properties.getBootstrapServer());
340 props.put("linger.ms", 100);
341 props.put("key.serializer", BytesSerializer.class.getName());
342 props.put("value.serializer", BytesSerializer.class.getName());
343 testRecordProducer = new KafkaProducer<>(props);
345 props = new Properties();
346 props.put("bootstrap.servers", properties.getBootstrapServer());
347 props.put("client.id", "OFFSET-CONSUMER");
348 props.put("group.id", properties.getGroupId());
349 props.put("key.deserializer", BytesDeserializer.class.getName());
350 props.put("value.deserializer", BytesDeserializer.class.getName());
351 offsetConsumer = new KafkaConsumer<>(props);
353 mongoClient.getDatabase(mongoProperties.getDatabase()).drop();
356 oldOffsets = new HashMap<>();
357 seenOffsets = new HashMap<>();
358 receivedRecords = new HashSet<>();
360 doForCurrentOffsets((tp, offset) ->
362 oldOffsets.put(tp, offset - 1);
363 seenOffsets.put(tp, offset - 1);
366 TestRecordHandler<K, V> captureOffsetAndExecuteTestHandler =
367 new TestRecordHandler<K, V>(recordHandler)
370 public void onNewRecord(ConsumerRecord<K, V> record)
373 new TopicPartition(record.topic(), record.partition()),
375 receivedRecords.add(record);
380 new EndlessConsumer<>(
382 properties.getClientId(),
383 properties.getTopic(),
386 captureOffsetAndExecuteTestHandler);
388 endlessConsumer.start();
396 endlessConsumer.stop();
397 testRecordProducer.close();
398 offsetConsumer.close();
402 log.info("Exception while stopping the consumer: {}", e.toString());
408 @Import(ApplicationConfiguration.class)
409 public static class Configuration