1 package de.juplo.kafka;
3 import lombok.extern.slf4j.Slf4j;
4 import org.apache.kafka.clients.consumer.ConsumerRecord;
5 import org.apache.kafka.clients.consumer.KafkaConsumer;
6 import org.apache.kafka.clients.producer.KafkaProducer;
7 import org.apache.kafka.clients.producer.ProducerRecord;
8 import org.apache.kafka.common.TopicPartition;
9 import org.apache.kafka.common.errors.RecordDeserializationException;
10 import org.apache.kafka.common.serialization.*;
11 import org.apache.kafka.common.utils.Bytes;
12 import org.junit.jupiter.api.*;
13 import org.springframework.beans.factory.annotation.Autowired;
14 import org.springframework.boot.autoconfigure.kafka.KafkaAutoConfiguration;
15 import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
16 import org.springframework.boot.test.context.TestConfiguration;
17 import org.springframework.context.annotation.Bean;
18 import org.springframework.context.annotation.Import;
19 import org.springframework.context.annotation.Primary;
20 import org.springframework.kafka.test.context.EmbeddedKafka;
21 import org.springframework.test.context.TestPropertySource;
22 import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
24 import java.time.Duration;
26 import java.util.concurrent.ExecutionException;
27 import java.util.function.BiConsumer;
28 import java.util.function.Consumer;
29 import java.util.function.Function;
30 import java.util.stream.Collectors;
31 import java.util.stream.IntStream;
33 import static de.juplo.kafka.ApplicationTests.PARTITIONS;
34 import static de.juplo.kafka.ApplicationTests.TOPIC;
35 import static org.assertj.core.api.Assertions.*;
36 import static org.awaitility.Awaitility.*;
40 initializers = ConfigDataApplicationContextInitializer.class,
42 EndlessConsumer.class,
43 KafkaAutoConfiguration.class,
44 ApplicationTests.Configuration.class })
45 @TestMethodOrder(MethodOrderer.OrderAnnotation.class)
48 "consumer.bootstrap-server=${spring.embedded.kafka.brokers}",
49 "consumer.topic=" + TOPIC })
50 @EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
52 class ApplicationTests
54 public static final String TOPIC = "FOO";
55 public static final int PARTITIONS = 10;
58 StringSerializer stringSerializer = new StringSerializer();
61 Serializer valueSerializer;
63 KafkaProducer<String, Bytes> kafkaProducer;
65 KafkaConsumer<Bytes, Bytes> offsetConsumer;
67 ApplicationProperties properties;
69 EndlessConsumer endlessConsumer;
71 RecordHandler recordHandler;
73 Map<TopicPartition, Long> oldOffsets;
74 Map<TopicPartition, Long> newOffsets;
75 Set<ConsumerRecord<String, Long>> receivedRecords;
81 @Order(1) // << The poistion pill is not skipped. Hence, this test must run first
82 void commitsCurrentOffsetsOnSuccess() throws ExecutionException, InterruptedException
84 send100Messages(i -> new Bytes(valueSerializer.serialize(TOPIC, i)));
86 await("100 records received")
87 .atMost(Duration.ofSeconds(30))
88 .until(() -> receivedRecords.size() >= 100);
90 await("Offsets committed")
91 .atMost(Duration.ofSeconds(10))
94 checkSeenOffsetsForProgress();
95 compareToCommitedOffsets(newOffsets);
98 assertThatExceptionOfType(IllegalStateException.class)
99 .isThrownBy(() -> endlessConsumer.exitStatus())
100 .describedAs("Consumer should still be running");
105 void commitsOffsetOfErrorForReprocessingOnError()
107 send100Messages(counter ->
109 ? new Bytes(stringSerializer.serialize(TOPIC, "BOOM!"))
110 : new Bytes(valueSerializer.serialize(TOPIC, counter)));
112 await("Consumer failed")
113 .atMost(Duration.ofSeconds(30))
114 .untilAsserted(() -> checkSeenOffsetsForProgress());
116 compareToCommitedOffsets(newOffsets);
117 assertThat(receivedRecords.size())
118 .describedAs("Received not all sent events")
121 assertThatNoException()
122 .describedAs("Consumer should not be running")
123 .isThrownBy(() -> endlessConsumer.exitStatus());
124 assertThat(endlessConsumer.exitStatus())
125 .containsInstanceOf(RecordDeserializationException.class)
126 .describedAs("Consumer should have exited abnormally");
130 /** Helper methods for the verification of expectations */
132 void compareToCommitedOffsets(Map<TopicPartition, Long> offsetsToCheck)
134 doForCurrentOffsets((tp, offset) ->
136 Long expected = offsetsToCheck.get(tp) + 1;
137 log.debug("Checking, if the offset for {} is {}", tp, expected);
139 .describedAs("Committed offset corresponds to the offset of the consumer")
140 .isEqualTo(expected);
144 void checkSeenOffsetsForProgress()
146 // Be sure, that some messages were consumed...!
147 Set<TopicPartition> withProgress = new HashSet<>();
148 partitions().forEach(tp ->
150 Long oldOffset = oldOffsets.get(tp);
151 Long newOffset = newOffsets.get(tp);
152 if (!oldOffset.equals(newOffset))
154 log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset);
155 withProgress.add(tp);
158 assertThat(withProgress)
159 .describedAs("Some offsets must have changed, compared to the old offset-positions")
164 /** Helper methods for setting up and running the tests */
166 void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
168 offsetConsumer.assign(partitions());
169 partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp)));
170 offsetConsumer.unsubscribe();
173 List<TopicPartition> partitions()
177 .range(0, PARTITIONS)
178 .mapToObj(partition -> new TopicPartition(TOPIC, partition))
179 .collect(Collectors.toList());
183 void send100Messages(Function<Long, Bytes> messageGenerator)
187 for (int partition = 0; partition < 10; partition++)
189 for (int key = 0; key < 10; key++)
191 Bytes value = messageGenerator.apply(++i);
193 ProducerRecord<String, Bytes> record =
194 new ProducerRecord<>(
197 Integer.toString(key%2),
200 kafkaProducer.send(record, (metadata, e) ->
202 if (metadata != null)
206 metadata.partition(),
214 "Exception for {}={}: {}",
228 recordHandler.testHandler = (record) -> {};
230 oldOffsets = new HashMap<>();
231 newOffsets = new HashMap<>();
232 receivedRecords = new HashSet<>();
234 doForCurrentOffsets((tp, offset) ->
236 oldOffsets.put(tp, offset - 1);
237 newOffsets.put(tp, offset - 1);
240 recordHandler.captureOffsets =
243 receivedRecords.add(record);
245 new TopicPartition(record.topic(), record.partition()),
249 endlessConsumer.start();
257 endlessConsumer.stop();
261 log.info("Exception while stopping the consumer: {}", e.toString());
265 public static class RecordHandler implements Consumer<ConsumerRecord<String, Long>>
267 Consumer<ConsumerRecord<String, Long>> captureOffsets;
268 Consumer<ConsumerRecord<String, Long>> testHandler;
272 public void accept(ConsumerRecord<String, Long> record)
275 .andThen(testHandler)
281 @Import(ApplicationConfiguration.class)
282 public static class Configuration
286 public Consumer<ConsumerRecord<String, Long>> testHandler()
288 return new RecordHandler();
292 Serializer<Long> serializer()
294 return new LongSerializer();
298 KafkaProducer<String, Bytes> kafkaProducer(ApplicationProperties properties)
300 Properties props = new Properties();
301 props.put("bootstrap.servers", properties.getBootstrapServer());
302 props.put("linger.ms", 100);
303 props.put("key.serializer", StringSerializer.class.getName());
304 props.put("value.serializer", BytesSerializer.class.getName());
306 return new KafkaProducer<>(props);
310 KafkaConsumer<Bytes, Bytes> offsetConsumer(ApplicationProperties properties)
312 Properties props = new Properties();
313 props.put("bootstrap.servers", properties.getBootstrapServer());
314 props.put("client.id", "OFFSET-CONSUMER");
315 props.put("group.id", properties.getGroupId());
316 props.put("key.deserializer", BytesDeserializer.class.getName());
317 props.put("value.deserializer", BytesDeserializer.class.getName());
319 return new KafkaConsumer<>(props);