1 package de.juplo.kafka;
3 import lombok.extern.slf4j.Slf4j;
4 import org.apache.kafka.clients.consumer.ConsumerRecord;
5 import org.apache.kafka.clients.consumer.KafkaConsumer;
6 import org.apache.kafka.clients.producer.KafkaProducer;
7 import org.apache.kafka.clients.producer.ProducerRecord;
8 import org.apache.kafka.common.TopicPartition;
9 import org.apache.kafka.common.serialization.*;
10 import org.apache.kafka.common.utils.Bytes;
11 import org.junit.jupiter.api.*;
12 import org.springframework.beans.factory.annotation.Autowired;
13 import org.springframework.boot.autoconfigure.kafka.KafkaAutoConfiguration;
14 import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
15 import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
16 import org.springframework.boot.test.context.TestConfiguration;
17 import org.springframework.context.annotation.Bean;
18 import org.springframework.context.annotation.Import;
19 import org.springframework.context.annotation.Primary;
20 import org.springframework.kafka.support.serializer.JsonSerializer;
21 import org.springframework.kafka.test.context.EmbeddedKafka;
22 import org.springframework.test.context.TestPropertySource;
23 import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
25 import java.time.Duration;
27 import java.util.concurrent.ExecutionException;
28 import java.util.function.BiConsumer;
29 import java.util.function.BiFunction;
30 import java.util.function.Consumer;
31 import java.util.stream.Collectors;
32 import java.util.stream.IntStream;
34 import static de.juplo.kafka.ApplicationTests.PARTITIONS;
35 import static de.juplo.kafka.ApplicationTests.TOPIC;
36 import static org.assertj.core.api.Assertions.*;
37 import static org.awaitility.Awaitility.*;
41 initializers = ConfigDataApplicationContextInitializer.class,
43 EndlessConsumer.class,
44 KafkaAutoConfiguration.class,
45 ApplicationTests.Configuration.class })
48 "spring.kafka.consumer.bootstrap-servers=${spring.embedded.kafka.brokers}",
49 "spring.kafka.producer.bootstrap-servers=${spring.embedded.kafka.brokers}",
50 "consumer.topic=" + TOPIC })
51 @EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
53 class ApplicationTests
55 public static final String TOPIC = "FOO";
56 public static final int PARTITIONS = 10;
59 StringSerializer stringSerializer = new StringSerializer();
62 Serializer valueSerializer;
64 KafkaProducer<String, Bytes> kafkaProducer;
66 org.apache.kafka.clients.consumer.Consumer<String, ClientMessage> kafkaConsumer;
68 KafkaConsumer<Bytes, Bytes> offsetConsumer;
70 ApplicationProperties applicationProperties;
72 KafkaProperties kafkaProperties;
74 EndlessConsumer endlessConsumer;
76 RecordHandler recordHandler;
78 Map<TopicPartition, Long> oldOffsets;
79 Map<TopicPartition, Long> newOffsets;
80 Set<ConsumerRecord<String, ClientMessage>> receivedRecords;
86 void commitsCurrentOffsetsOnSuccess() throws ExecutionException, InterruptedException
88 send100Messages((key, counter) -> serialize(key, counter));
90 await("100 records received")
91 .atMost(Duration.ofSeconds(30))
92 .until(() -> receivedRecords.size() == 100);
94 await("Offsets committed")
95 .atMost(Duration.ofSeconds(10))
98 checkSeenOffsetsForProgress();
99 compareToCommitedOffsets(newOffsets);
102 assertThat(endlessConsumer.isRunning())
103 .describedAs("Consumer should still be running")
108 void commitsCurrentOffsetsOnDeserializationError()
110 send100Messages((key, counter) ->
112 ? new Bytes(stringSerializer.serialize(TOPIC, "BOOM!"))
113 : serialize(key, counter));
115 await("99 records received")
116 .atMost(Duration.ofSeconds(30))
117 .until(() -> receivedRecords.size() == 99);
119 await("Offsets committed")
120 .atMost(Duration.ofSeconds(10))
124 // Funktioniert nur, weil nach der Nachrichten, die den
125 // Deserialisierungs-Fehler auslöst noch valide Nachrichten
128 // Der MessageHandler sieht den Offset der Fehlerhaften
130 checkSeenOffsetsForProgress();
131 compareToCommitedOffsets(newOffsets);
134 assertThat(endlessConsumer.isRunning())
135 .describedAs("Consumer should still be running")
140 void commitsOffsetOnProgramLogicErrorFoo()
142 recordHandler.testHandler = (record) ->
144 if (Integer.parseInt(record.value().message)%10 ==0)
145 throw new RuntimeException("BOOM: " + record.value().message + "%10 == 0");
148 send100Messages((key, counter) -> serialize(key, counter));
150 await("80 records received")
151 .atMost(Duration.ofSeconds(30))
152 .until(() -> receivedRecords.size() == 100);
154 await("Offsets committed")
155 .atMost(Duration.ofSeconds(10))
156 .pollDelay(Duration.ofSeconds(1))
159 checkSeenOffsetsForProgress();
160 compareToCommitedOffsets(newOffsets);
163 assertThat(endlessConsumer.isRunning())
164 .describedAs("Consumer should still be running")
169 /** Helper methods for the verification of expectations */
171 void compareToCommitedOffsets(Map<TopicPartition, Long> offsetsToCheck)
173 doForCurrentOffsets((tp, offset) ->
175 Long expected = offsetsToCheck.get(tp) + 1;
176 log.debug("TEST: Comparing the expected offset of {} for {} to {}", expected, tp, offset);
178 .describedAs("Committed offset corresponds to the offset of the consumer")
179 .isEqualTo(expected);
183 void checkSeenOffsetsForProgress()
185 // Be sure, that some messages were consumed...!
186 Set<TopicPartition> withProgress = new HashSet<>();
187 partitions().forEach(tp ->
189 Long oldOffset = oldOffsets.get(tp);
190 Long newOffset = newOffsets.get(tp);
191 if (!oldOffset.equals(newOffset))
193 log.debug("TEST: Progress for {}: {} -> {}", tp, oldOffset, newOffset);
194 withProgress.add(tp);
197 log.debug("TEST: Offsets with progress: {}", withProgress);
198 assertThat(withProgress)
199 .describedAs("Some offsets must have changed, compared to the old offset-positions")
204 /** Helper methods for setting up and running the tests */
206 void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
208 offsetConsumer.assign(partitions());
209 partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp)));
210 offsetConsumer.unsubscribe();
213 List<TopicPartition> partitions()
217 .range(0, PARTITIONS)
218 .mapToObj(partition -> new TopicPartition(TOPIC, partition))
219 .collect(Collectors.toList());
223 void send100Messages(BiFunction<Integer, Long, Bytes> messageGenerator)
227 for (int partition = 0; partition < 10; partition++)
229 for (int key = 0; key < 10; key++)
231 Bytes value = messageGenerator.apply(key, ++i);
233 ProducerRecord<String, Bytes> record =
234 new ProducerRecord<>(
237 Integer.toString(key%2),
240 record.headers().add("__TypeId__", "message".getBytes());
241 kafkaProducer.send(record, (metadata, e) ->
243 if (metadata != null)
246 "TEST: Sending partition={}, offset={} - {}={}",
247 metadata.partition(),
255 "TEST: Exception for {}={}: {}",
265 Bytes serialize(Integer key, Long value)
267 ClientMessage message = new ClientMessage();
268 message.setClient(key.toString());
269 message.setMessage(value.toString());
270 return new Bytes(valueSerializer.serialize(TOPIC, message));
277 recordHandler.testHandler = (record) -> {};
279 oldOffsets = new HashMap<>();
280 newOffsets = new HashMap<>();
281 receivedRecords = new HashSet<>();
283 doForCurrentOffsets((tp, offset) ->
285 oldOffsets.put(tp, offset - 1);
286 newOffsets.put(tp, offset - 1);
289 recordHandler.captureOffsets =
292 receivedRecords.add(record);
293 log.debug("TEST: Processing record #{}: {}", receivedRecords.size(), record.value());
295 new TopicPartition(record.topic(), record.partition()),
299 endlessConsumer.start();
307 endlessConsumer.stop();
311 log.info("TEST: Exception while stopping the consumer: {}", e.toString());
315 public static class RecordHandler implements Consumer<ConsumerRecord<String, ClientMessage>>
317 Consumer<ConsumerRecord<String, ClientMessage>> captureOffsets;
318 Consumer<ConsumerRecord<String, ClientMessage>> testHandler;
322 public void accept(ConsumerRecord<String, ClientMessage> record)
325 .andThen(testHandler)
331 @Import(ApplicationConfiguration.class)
332 public static class Configuration
336 public Consumer<ConsumerRecord<String, ClientMessage>> testHandler()
338 return new RecordHandler();
342 Serializer<ClientMessage> serializer()
344 return new JsonSerializer<>();
348 KafkaProducer<String, Bytes> kafkaProducer(KafkaProperties properties)
350 Properties props = new Properties();
351 props.put("bootstrap.servers", properties.getConsumer().getBootstrapServers());
352 props.put("linger.ms", 100);
353 props.put("key.serializer", StringSerializer.class.getName());
354 props.put("value.serializer", BytesSerializer.class.getName());
356 return new KafkaProducer<>(props);
360 KafkaConsumer<Bytes, Bytes> offsetConsumer(KafkaProperties properties)
362 Properties props = new Properties();
363 props.put("bootstrap.servers", properties.getConsumer().getBootstrapServers());
364 props.put("client.id", "OFFSET-CONSUMER");
365 props.put("group.id", properties.getConsumer().getGroupId());
366 props.put("key.deserializer", BytesDeserializer.class.getName());
367 props.put("value.deserializer", BytesDeserializer.class.getName());
369 return new KafkaConsumer<>(props);