1 package de.juplo.kafka;
3 import lombok.extern.slf4j.Slf4j;
4 import org.apache.kafka.clients.consumer.ConsumerRecord;
5 import org.apache.kafka.clients.consumer.KafkaConsumer;
6 import org.apache.kafka.clients.producer.KafkaProducer;
7 import org.apache.kafka.clients.producer.ProducerRecord;
8 import org.apache.kafka.common.TopicPartition;
9 import org.apache.kafka.common.serialization.*;
10 import org.apache.kafka.common.utils.Bytes;
11 import org.junit.jupiter.api.*;
12 import org.springframework.beans.factory.annotation.Autowired;
13 import org.springframework.boot.autoconfigure.kafka.KafkaAutoConfiguration;
14 import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
15 import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
16 import org.springframework.boot.test.context.TestConfiguration;
17 import org.springframework.context.annotation.Bean;
18 import org.springframework.context.annotation.Import;
19 import org.springframework.context.annotation.Primary;
20 import org.springframework.kafka.support.serializer.JsonSerializer;
21 import org.springframework.kafka.test.context.EmbeddedKafka;
22 import org.springframework.test.context.TestPropertySource;
23 import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
25 import java.time.Duration;
27 import java.util.concurrent.ExecutionException;
28 import java.util.function.BiConsumer;
29 import java.util.function.BiFunction;
30 import java.util.function.Consumer;
31 import java.util.stream.Collectors;
32 import java.util.stream.IntStream;
34 import static de.juplo.kafka.ApplicationTests.PARTITIONS;
35 import static de.juplo.kafka.ApplicationTests.TOPIC;
36 import static org.assertj.core.api.Assertions.*;
37 import static org.awaitility.Awaitility.*;
41 initializers = ConfigDataApplicationContextInitializer.class,
43 EndlessConsumer.class,
44 KafkaAutoConfiguration.class,
45 ApplicationTests.Configuration.class })
48 "spring.kafka.consumer.bootstrap-servers=${spring.embedded.kafka.brokers}",
49 "spring.kafka.producer.bootstrap-servers=${spring.embedded.kafka.brokers}",
50 "consumer.topic=" + TOPIC })
51 @EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
53 class ApplicationTests
55 public static final String TOPIC = "FOO";
56 public static final int PARTITIONS = 10;
59 StringSerializer stringSerializer = new StringSerializer();
62 Serializer valueSerializer;
64 KafkaProducer<String, Bytes> kafkaProducer;
66 KafkaConsumer<Bytes, Bytes> offsetConsumer;
68 ApplicationProperties applicationProperties;
70 KafkaProperties kafkaProperties;
72 EndlessConsumer endlessConsumer;
74 RecordHandler recordHandler;
76 Map<TopicPartition, Long> oldOffsets;
77 Map<TopicPartition, Long> newOffsets;
78 Set<ConsumerRecord<String, ClientMessage>> receivedRecords;
84 void commitsCurrentOffsetsOnSuccess() throws ExecutionException, InterruptedException
86 send100Messages((key, counter) -> serialize(key, counter));
88 await("100 records received")
89 .atMost(Duration.ofSeconds(30))
90 .until(() -> receivedRecords.size() == 100);
92 await("Offsets committed")
93 .atMost(Duration.ofSeconds(10))
96 checkSeenOffsetsForProgress();
97 compareToCommitedOffsets(newOffsets);
100 assertThat(endlessConsumer.isRunning())
101 .describedAs("Consumer should still be running")
106 void commitsCurrentOffsetsOnDeserializationError()
108 send100Messages((key, counter) ->
110 ? new Bytes(stringSerializer.serialize(TOPIC, "BOOM!"))
111 : serialize(key, counter));
113 await("99 records received")
114 .atMost(Duration.ofSeconds(30))
115 .until(() -> receivedRecords.size() == 99);
117 await("Offsets committed")
118 .atMost(Duration.ofSeconds(10))
122 // Funktioniert nur, weil nach der Nachrichten, die den
123 // Deserialisierungs-Fehler auslöst noch valide Nachrichten
126 // Der MessageHandler sieht den Offset der Fehlerhaften
128 checkSeenOffsetsForProgress();
129 compareToCommitedOffsets(newOffsets);
132 assertThat(endlessConsumer.isRunning())
133 .describedAs("Consumer should still be running")
138 void commitsOffsetOnProgramLogicErrorFoo()
140 recordHandler.testHandler = (record) ->
142 if (Integer.parseInt(record.value().message)%10 ==0)
143 throw new RuntimeException("BOOM: " + record.value().message + "%10 == 0");
146 send100Messages((key, counter) -> serialize(key, counter));
148 await("80 records received")
149 .atMost(Duration.ofSeconds(30))
150 .until(() -> receivedRecords.size() == 100);
152 await("Offsets committed")
153 .atMost(Duration.ofSeconds(10))
154 .pollDelay(Duration.ofSeconds(1))
157 checkSeenOffsetsForProgress();
158 compareToCommitedOffsets(newOffsets);
161 assertThat(endlessConsumer.isRunning())
162 .describedAs("Consumer should still be running")
167 /** Helper methods for the verification of expectations */
169 void compareToCommitedOffsets(Map<TopicPartition, Long> offsetsToCheck)
171 doForCurrentOffsets((tp, offset) ->
173 Long expected = offsetsToCheck.get(tp) + 1;
174 log.debug("TEST: Comparing the expected offset of {} for {} to {}", expected, tp, offset);
176 .describedAs("Committed offset corresponds to the offset of the consumer")
177 .isEqualTo(expected);
181 void checkSeenOffsetsForProgress()
183 // Be sure, that some messages were consumed...!
184 Set<TopicPartition> withProgress = new HashSet<>();
185 partitions().forEach(tp ->
187 Long oldOffset = oldOffsets.get(tp);
188 Long newOffset = newOffsets.get(tp);
189 if (!oldOffset.equals(newOffset))
191 log.debug("TEST: Progress for {}: {} -> {}", tp, oldOffset, newOffset);
192 withProgress.add(tp);
195 log.debug("TEST: Offsets with progress: {}", withProgress);
196 assertThat(withProgress)
197 .describedAs("Some offsets must have changed, compared to the old offset-positions")
202 /** Helper methods for setting up and running the tests */
204 void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
206 offsetConsumer.assign(partitions());
207 partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp)));
208 offsetConsumer.unsubscribe();
211 List<TopicPartition> partitions()
215 .range(0, PARTITIONS)
216 .mapToObj(partition -> new TopicPartition(TOPIC, partition))
217 .collect(Collectors.toList());
221 void send100Messages(BiFunction<Integer, Long, Bytes> messageGenerator)
225 for (int partition = 0; partition < 10; partition++)
227 for (int key = 0; key < 10; key++)
229 Bytes value = messageGenerator.apply(key, ++i);
231 ProducerRecord<String, Bytes> record =
232 new ProducerRecord<>(
235 Integer.toString(key%2),
238 record.headers().add("__TypeId__", "message".getBytes());
239 kafkaProducer.send(record, (metadata, e) ->
241 if (metadata != null)
244 "TEST: Sending partition={}, offset={} - {}={}",
245 metadata.partition(),
253 "TEST: Exception for {}={}: {}",
263 Bytes serialize(Integer key, Long value)
265 ClientMessage message = new ClientMessage();
266 message.setClient(key.toString());
267 message.setMessage(value.toString());
268 return new Bytes(valueSerializer.serialize(TOPIC, message));
275 recordHandler.testHandler = (record) -> {};
277 oldOffsets = new HashMap<>();
278 newOffsets = new HashMap<>();
279 receivedRecords = new HashSet<>();
281 doForCurrentOffsets((tp, offset) ->
283 oldOffsets.put(tp, offset - 1);
284 newOffsets.put(tp, offset - 1);
287 recordHandler.captureOffsets =
290 receivedRecords.add(record);
291 log.debug("TEST: Processing record #{}: {}", receivedRecords.size(), record.value());
293 new TopicPartition(record.topic(), record.partition()),
297 endlessConsumer.start();
305 endlessConsumer.stop();
309 log.info("TEST: Exception while stopping the consumer: {}", e.toString());
313 public static class RecordHandler implements Consumer<ConsumerRecord<String, ClientMessage>>
315 Consumer<ConsumerRecord<String, ClientMessage>> captureOffsets;
316 Consumer<ConsumerRecord<String, ClientMessage>> testHandler;
320 public void accept(ConsumerRecord<String, ClientMessage> record)
323 .andThen(testHandler)
329 @Import(ApplicationConfiguration.class)
330 public static class Configuration
334 public Consumer<ConsumerRecord<String, ClientMessage>> testHandler()
336 return new RecordHandler();
340 Serializer<ClientMessage> serializer()
342 return new JsonSerializer<>();
346 KafkaProducer<String, Bytes> kafkaProducer(KafkaProperties properties)
348 Properties props = new Properties();
349 props.put("bootstrap.servers", properties.getConsumer().getBootstrapServers());
350 props.put("linger.ms", 100);
351 props.put("key.serializer", StringSerializer.class.getName());
352 props.put("value.serializer", BytesSerializer.class.getName());
354 return new KafkaProducer<>(props);
358 KafkaConsumer<Bytes, Bytes> offsetConsumer(KafkaProperties properties)
360 Properties props = new Properties();
361 props.put("bootstrap.servers", properties.getConsumer().getBootstrapServers());
362 props.put("client.id", "OFFSET-CONSUMER");
363 props.put("group.id", properties.getConsumer().getGroupId());
364 props.put("key.deserializer", BytesDeserializer.class.getName());
365 props.put("value.deserializer", BytesDeserializer.class.getName());
367 return new KafkaConsumer<>(props);