1 package de.juplo.kafka;
3 import lombok.extern.slf4j.Slf4j;
4 import org.apache.kafka.clients.consumer.ConsumerRecord;
5 import org.apache.kafka.clients.consumer.KafkaConsumer;
6 import org.apache.kafka.clients.producer.KafkaProducer;
7 import org.apache.kafka.clients.producer.ProducerRecord;
8 import org.apache.kafka.common.TopicPartition;
9 import org.apache.kafka.common.errors.RecordDeserializationException;
10 import org.apache.kafka.common.serialization.*;
11 import org.apache.kafka.common.utils.Bytes;
12 import org.junit.jupiter.api.*;
13 import org.springframework.beans.factory.annotation.Autowired;
14 import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
15 import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo;
16 import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
17 import org.springframework.boot.test.context.TestConfiguration;
18 import org.springframework.context.annotation.Bean;
19 import org.springframework.context.annotation.Import;
20 import org.springframework.kafka.test.context.EmbeddedKafka;
21 import org.springframework.test.context.TestPropertySource;
22 import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
24 import java.time.Duration;
26 import java.util.concurrent.ExecutionException;
27 import java.util.concurrent.ExecutorService;
28 import java.util.function.BiConsumer;
29 import java.util.stream.Collectors;
30 import java.util.stream.IntStream;
32 import static de.juplo.kafka.ApplicationTests.PARTITIONS;
33 import static de.juplo.kafka.ApplicationTests.TOPIC;
34 import static org.assertj.core.api.Assertions.*;
35 import static org.awaitility.Awaitility.*;
38 @SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class)
39 @TestMethodOrder(MethodOrderer.OrderAnnotation.class)
42 "consumer.bootstrap-server=${spring.embedded.kafka.brokers}",
43 "consumer.topic=" + TOPIC,
44 "consumer.commit-interval=1s",
45 "spring.mongodb.embedded.version=4.4.13" })
46 @EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
47 @EnableAutoConfiguration
48 @AutoConfigureDataMongo
50 class ApplicationTests
52 public static final String TOPIC = "FOO";
53 public static final int PARTITIONS = 10;
56 StringSerializer stringSerializer = new StringSerializer();
59 Serializer valueSerializer;
61 KafkaProducer<String, Bytes> kafkaProducer;
63 KafkaConsumer<String, Long> kafkaConsumer;
65 KafkaConsumer<Bytes, Bytes> offsetConsumer;
67 ApplicationProperties properties;
69 ExecutorService executor;
71 StateRepository stateRepository;
73 ApplicationRebalanceListener rebalanceListener;
75 ApplicationRecordHandler recordHandler;
77 EndlessConsumer<String, Long> endlessConsumer;
78 Map<TopicPartition, Long> oldOffsets;
79 Map<TopicPartition, Long> newOffsets;
80 Set<ConsumerRecord<String, Long>> receivedRecords;
86 void commitsCurrentOffsetsOnSuccess() throws ExecutionException, InterruptedException
88 send100Messages((partition, key, counter) ->
90 Bytes value = new Bytes(valueSerializer.serialize(TOPIC, counter));
91 return new ProducerRecord<>(TOPIC, partition, key, value);
94 await("100 records received")
95 .atMost(Duration.ofSeconds(30))
96 .pollInterval(Duration.ofSeconds(1))
97 .until(() -> receivedRecords.size() >= 100);
99 await("Offsets committed")
100 .atMost(Duration.ofSeconds(10))
101 .pollInterval(Duration.ofSeconds(1))
104 checkSeenOffsetsForProgress();
105 compareToCommitedOffsets(newOffsets);
108 assertThatExceptionOfType(IllegalStateException.class)
109 .isThrownBy(() -> endlessConsumer.exitStatus())
110 .describedAs("Consumer should still be running");
114 void commitsOffsetOfErrorForReprocessingOnDeserializationError()
116 send100Messages((partition, key, counter) ->
118 Bytes value = counter == 77
119 ? new Bytes(stringSerializer.serialize(TOPIC, "BOOM!"))
120 : new Bytes(valueSerializer.serialize(TOPIC, counter));
121 return new ProducerRecord<>(TOPIC, partition, key, value);
124 await("Consumer failed")
125 .atMost(Duration.ofSeconds(30))
126 .pollInterval(Duration.ofSeconds(1))
127 .until(() -> !endlessConsumer.running());
129 checkSeenOffsetsForProgress();
130 compareToCommitedOffsets(newOffsets);
132 endlessConsumer.start();
133 await("Consumer failed")
134 .atMost(Duration.ofSeconds(30))
135 .pollInterval(Duration.ofSeconds(1))
136 .until(() -> !endlessConsumer.running());
138 checkSeenOffsetsForProgress();
139 compareToCommitedOffsets(newOffsets);
140 assertThat(receivedRecords.size())
141 .describedAs("Received not all sent events")
144 assertThatNoException()
145 .describedAs("Consumer should not be running")
146 .isThrownBy(() -> endlessConsumer.exitStatus());
147 assertThat(endlessConsumer.exitStatus())
148 .describedAs("Consumer should have exited abnormally")
149 .containsInstanceOf(RecordDeserializationException.class);
153 /** Helper methods for the verification of expectations */
155 void compareToCommitedOffsets(Map<TopicPartition, Long> offsetsToCheck)
157 doForCurrentOffsets((tp, offset) ->
159 Long expected = offsetsToCheck.get(tp) + 1;
160 log.debug("Checking, if the offset for {} is {}", tp, expected);
162 .describedAs("Committed offset corresponds to the offset of the consumer")
163 .isEqualTo(expected);
167 void checkSeenOffsetsForProgress()
169 // Be sure, that some messages were consumed...!
170 Set<TopicPartition> withProgress = new HashSet<>();
171 partitions().forEach(tp ->
173 Long oldOffset = oldOffsets.get(tp) + 1;
174 Long newOffset = newOffsets.get(tp) + 1;
175 if (!oldOffset.equals(newOffset))
177 log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset);
178 withProgress.add(tp);
181 assertThat(withProgress)
182 .describedAs("Some offsets must have changed, compared to the old offset-positions")
187 /** Helper methods for setting up and running the tests */
191 offsetConsumer.assign(partitions());
192 partitions().forEach(tp ->
194 Long offset = offsetConsumer.position(tp);
195 log.info("New position for {}: {}", tp, offset);
196 Integer partition = tp.partition();
197 StateDocument document =
199 .findById(partition.toString())
200 .orElse(new StateDocument(partition));
201 document.offset = offset;
202 stateRepository.save(document);
204 offsetConsumer.unsubscribe();
207 void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
209 partitions().forEach(tp ->
211 String partition = Integer.toString(tp.partition());
212 Optional<Long> offset = stateRepository.findById(partition).map(document -> document.offset);
213 consumer.accept(tp, offset.orElse(0l));
217 List<TopicPartition> partitions()
221 .range(0, PARTITIONS)
222 .mapToObj(partition -> new TopicPartition(TOPIC, partition))
223 .collect(Collectors.toList());
227 public interface RecordGenerator<K, V>
229 public ProducerRecord<String, Bytes> generate(int partition, String key, long counter);
232 void send100Messages(RecordGenerator recordGenerator)
236 for (int partition = 0; partition < 10; partition++)
238 for (int key = 0; key < 10; key++)
240 ProducerRecord<String, Bytes> record =
241 recordGenerator.generate(partition, Integer.toString(partition*10+key%2), ++i);
243 kafkaProducer.send(record, (metadata, e) ->
245 if (metadata != null)
249 metadata.partition(),
257 "Exception for {}={}: {}",
273 oldOffsets = new HashMap<>();
274 newOffsets = new HashMap<>();
275 receivedRecords = new HashSet<>();
277 doForCurrentOffsets((tp, offset) ->
279 oldOffsets.put(tp, offset - 1);
280 newOffsets.put(tp, offset - 1);
283 TestRecordHandler<String, Long> captureOffsetAndExecuteTestHandler =
284 new TestRecordHandler<String, Long>(recordHandler) {
286 public void onNewRecord(ConsumerRecord<String, Long> record)
289 new TopicPartition(record.topic(), record.partition()),
291 receivedRecords.add(record);
296 new EndlessConsumer<>(
298 properties.getClientId(),
299 properties.getTopic(),
302 captureOffsetAndExecuteTestHandler);
304 endlessConsumer.start();
312 endlessConsumer.stop();
316 log.info("Exception while stopping the consumer: {}", e.toString());
322 @Import(ApplicationConfiguration.class)
323 public static class Configuration
326 Serializer<Long> serializer()
328 return new LongSerializer();
332 KafkaProducer<String, Bytes> kafkaProducer(ApplicationProperties properties)
334 Properties props = new Properties();
335 props.put("bootstrap.servers", properties.getBootstrapServer());
336 props.put("linger.ms", 100);
337 props.put("key.serializer", StringSerializer.class.getName());
338 props.put("value.serializer", BytesSerializer.class.getName());
340 return new KafkaProducer<>(props);
344 KafkaConsumer<Bytes, Bytes> offsetConsumer(ApplicationProperties properties)
346 Properties props = new Properties();
347 props.put("bootstrap.servers", properties.getBootstrapServer());
348 props.put("client.id", "OFFSET-CONSUMER");
349 props.put("enable.auto.commit", false);
350 props.put("auto.offset.reset", "latest");
351 props.put("key.deserializer", BytesDeserializer.class.getName());
352 props.put("value.deserializer", BytesDeserializer.class.getName());
354 return new KafkaConsumer<>(props);