1 package de.juplo.kafka;
3 import lombok.extern.slf4j.Slf4j;
4 import org.apache.kafka.clients.consumer.ConsumerRecord;
5 import org.apache.kafka.clients.consumer.KafkaConsumer;
6 import org.apache.kafka.clients.producer.KafkaProducer;
7 import org.apache.kafka.clients.producer.ProducerRecord;
8 import org.apache.kafka.common.TopicPartition;
9 import org.apache.kafka.common.errors.RecordDeserializationException;
10 import org.apache.kafka.common.serialization.*;
11 import org.apache.kafka.common.utils.Bytes;
12 import org.junit.jupiter.api.*;
13 import org.springframework.beans.factory.annotation.Autowired;
14 import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
15 import org.springframework.boot.test.context.TestConfiguration;
16 import org.springframework.context.annotation.Bean;
17 import org.springframework.context.annotation.Import;
18 import org.springframework.kafka.support.serializer.JsonSerializer;
19 import org.springframework.kafka.test.context.EmbeddedKafka;
20 import org.springframework.test.context.TestPropertySource;
21 import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
23 import java.time.Duration;
24 import java.time.LocalDateTime;
26 import java.util.concurrent.ExecutionException;
27 import java.util.concurrent.ExecutorService;
28 import java.util.function.BiConsumer;
29 import java.util.function.Consumer;
30 import java.util.stream.Collectors;
31 import java.util.stream.IntStream;
33 import static de.juplo.kafka.ApplicationTests.PARTITIONS;
34 import static de.juplo.kafka.ApplicationTests.TOPIC;
35 import static org.assertj.core.api.Assertions.*;
36 import static org.awaitility.Awaitility.*;
39 @SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class)
40 @TestMethodOrder(MethodOrderer.OrderAnnotation.class)
43 "consumer.bootstrap-server=${spring.embedded.kafka.brokers}",
44 "consumer.topic=" + TOPIC,
45 "consumer.commit-interval=1s" })
46 @EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
48 class ApplicationTests
50 public static final String TOPIC = "FOO";
51 public static final int PARTITIONS = 10;
54 StringSerializer stringSerializer = new StringSerializer();
57 Serializer valueSerializer;
59 KafkaProducer<String, Bytes> kafkaProducer;
61 KafkaConsumer<String, ValidMessage> kafkaConsumer;
63 KafkaConsumer<Bytes, Bytes> offsetConsumer;
65 ApplicationProperties properties;
67 ExecutorService executor;
69 Consumer<ConsumerRecord<String, ValidMessage>> testHandler;
70 EndlessConsumer<String, ValidMessage> endlessConsumer;
71 Map<TopicPartition, Long> oldOffsets;
72 Map<TopicPartition, Long> newOffsets;
73 Set<ConsumerRecord<String, ValidMessage>> receivedRecords;
79 void commitsCurrentOffsetsOnSuccess() throws ExecutionException, InterruptedException
81 send100Messages((partition, key, counter) ->
88 value = serializeClientMessage(key, counter);
92 value = serializeGreeting(key, counter);
96 return toRecord(partition, key, value, type);
99 await("100 records received")
100 .atMost(Duration.ofSeconds(30))
101 .pollInterval(Duration.ofSeconds(1))
102 .until(() -> receivedRecords.size() >= 100);
104 await("Offsets committed")
105 .atMost(Duration.ofSeconds(10))
106 .pollInterval(Duration.ofSeconds(1))
109 checkSeenOffsetsForProgress();
110 compareToCommitedOffsets(newOffsets);
113 assertThatExceptionOfType(IllegalStateException.class)
114 .isThrownBy(() -> endlessConsumer.exitStatus())
115 .describedAs("Consumer should still be running");
119 void commitsOffsetOfErrorForReprocessingOnDeserializationError()
121 send100Messages((partition, key, counter) ->
128 value = serializeFooMessage(key, counter);
135 value = serializeClientMessage(key, counter);
139 value = serializeGreeting(key, counter);
144 return toRecord(partition, key, value, type);
147 await("Consumer failed")
148 .atMost(Duration.ofSeconds(30))
149 .pollInterval(Duration.ofSeconds(1))
150 .until(() -> !endlessConsumer.running());
152 checkSeenOffsetsForProgress();
153 compareToCommitedOffsets(newOffsets);
155 endlessConsumer.start();
156 await("Consumer failed")
157 .atMost(Duration.ofSeconds(30))
158 .pollInterval(Duration.ofSeconds(1))
159 .until(() -> !endlessConsumer.running());
161 checkSeenOffsetsForProgress();
162 compareToCommitedOffsets(newOffsets);
163 assertThat(receivedRecords.size())
164 .describedAs("Received not all sent events")
167 assertThatNoException()
168 .describedAs("Consumer should not be running")
169 .isThrownBy(() -> endlessConsumer.exitStatus());
170 assertThat(endlessConsumer.exitStatus())
171 .describedAs("Consumer should have exited abnormally")
172 .containsInstanceOf(RecordDeserializationException.class);
176 /** Helper methods for the verification of expectations */
178 void compareToCommitedOffsets(Map<TopicPartition, Long> offsetsToCheck)
180 doForCurrentOffsets((tp, offset) ->
182 Long expected = offsetsToCheck.get(tp) + 1;
183 log.debug("Checking, if the offset for {} is {}", tp, expected);
185 .describedAs("Committed offset corresponds to the offset of the consumer")
186 .isEqualTo(expected);
190 void checkSeenOffsetsForProgress()
192 // Be sure, that some messages were consumed...!
193 Set<TopicPartition> withProgress = new HashSet<>();
194 partitions().forEach(tp ->
196 Long oldOffset = oldOffsets.get(tp) + 1;
197 Long newOffset = newOffsets.get(tp) + 1;
198 if (!oldOffset.equals(newOffset))
200 log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset);
201 withProgress.add(tp);
204 assertThat(withProgress)
205 .describedAs("Some offsets must have changed, compared to the old offset-positions")
210 /** Helper methods for setting up and running the tests */
214 offsetConsumer.assign(partitions());
215 offsetConsumer.seekToEnd(partitions());
216 partitions().forEach(tp ->
218 // seekToEnd() works lazily: it only takes effect on poll()/position()
219 Long offset = offsetConsumer.position(tp);
220 log.info("New position for {}: {}", tp, offset);
222 // The new positions must be commited!
223 offsetConsumer.commitSync();
224 offsetConsumer.unsubscribe();
227 void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
229 offsetConsumer.assign(partitions());
230 partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp)));
231 offsetConsumer.unsubscribe();
234 List<TopicPartition> partitions()
238 .range(0, PARTITIONS)
239 .mapToObj(partition -> new TopicPartition(TOPIC, partition))
240 .collect(Collectors.toList());
244 public interface RecordGenerator<K, V>
246 public ProducerRecord<String, Bytes> generate(int partition, String key, long counter);
249 void send100Messages(RecordGenerator recordGenerator)
253 for (int partition = 0; partition < 10; partition++)
255 for (int key = 0; key < 10; key++)
257 ProducerRecord<String, Bytes> record =
258 recordGenerator.generate(partition, Integer.toString(partition*10+key%2), ++i);
260 kafkaProducer.send(record, (metadata, e) ->
262 if (metadata != null)
266 metadata.partition(),
274 "Exception for {}={}: {}",
284 ProducerRecord<String, Bytes> toRecord(int partition, String key, Bytes value, String type)
286 ProducerRecord<String, Bytes> record =
287 new ProducerRecord<>(TOPIC, partition, key, value);
288 record.headers().add("__TypeId__", type.getBytes());
292 Bytes serializeClientMessage(String key, Long value)
294 TestClientMessage message = new TestClientMessage(key, value.toString());
295 return new Bytes(valueSerializer.serialize(TOPIC, message));
298 Bytes serializeGreeting(String key, Long value)
300 TestGreeting message = new TestGreeting(key, LocalDateTime.now());
301 return new Bytes(valueSerializer.serialize(TOPIC, message));
304 Bytes serializeFooMessage(String key, Long value)
306 TestFooMessage message = new TestFooMessage(key, value);
307 return new Bytes(valueSerializer.serialize(TOPIC, message));
313 testHandler = record -> {} ;
317 oldOffsets = new HashMap<>();
318 newOffsets = new HashMap<>();
319 receivedRecords = new HashSet<>();
321 doForCurrentOffsets((tp, offset) ->
323 oldOffsets.put(tp, offset - 1);
324 newOffsets.put(tp, offset - 1);
327 Consumer<ConsumerRecord<String, ValidMessage>> captureOffsetAndExecuteTestHandler =
331 new TopicPartition(record.topic(), record.partition()),
333 receivedRecords.add(record);
334 testHandler.accept(record);
338 new EndlessConsumer<>(
340 properties.getClientId(),
341 properties.getTopic(),
343 captureOffsetAndExecuteTestHandler);
345 endlessConsumer.start();
353 endlessConsumer.stop();
357 log.info("Exception while stopping the consumer: {}", e.toString());
363 @Import(ApplicationConfiguration.class)
364 public static class Configuration
367 Serializer<ValidMessage> serializer()
369 return new JsonSerializer<>();
373 KafkaProducer<String, Bytes> kafkaProducer(ApplicationProperties properties)
375 Properties props = new Properties();
376 props.put("bootstrap.servers", properties.getBootstrapServer());
377 props.put("linger.ms", 100);
378 props.put("key.serializer", StringSerializer.class.getName());
379 props.put("value.serializer", BytesSerializer.class.getName());
381 return new KafkaProducer<>(props);
385 KafkaConsumer<Bytes, Bytes> offsetConsumer(ApplicationProperties properties)
387 Properties props = new Properties();
388 props.put("bootstrap.servers", properties.getBootstrapServer());
389 props.put("client.id", "OFFSET-CONSUMER");
390 props.put("group.id", properties.getGroupId());
391 props.put("key.deserializer", BytesDeserializer.class.getName());
392 props.put("value.deserializer", BytesDeserializer.class.getName());
394 return new KafkaConsumer<>(props);