1 package de.juplo.kafka;
3 import lombok.extern.slf4j.Slf4j;
4 import org.apache.kafka.clients.consumer.ConsumerRecord;
5 import org.apache.kafka.clients.consumer.KafkaConsumer;
6 import org.apache.kafka.clients.producer.KafkaProducer;
7 import org.apache.kafka.clients.producer.ProducerRecord;
8 import org.apache.kafka.common.TopicPartition;
9 import org.apache.kafka.common.serialization.*;
10 import org.apache.kafka.common.utils.Bytes;
11 import org.junit.jupiter.api.*;
12 import org.springframework.beans.factory.annotation.Autowired;
13 import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
14 import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo;
15 import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
16 import org.springframework.boot.test.context.TestConfiguration;
17 import org.springframework.context.annotation.Bean;
18 import org.springframework.context.annotation.Import;
19 import org.springframework.kafka.test.context.EmbeddedKafka;
20 import org.springframework.test.context.TestPropertySource;
21 import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
23 import java.time.Duration;
25 import java.util.concurrent.ExecutionException;
26 import java.util.concurrent.ExecutorService;
27 import java.util.function.BiConsumer;
28 import java.util.stream.Collectors;
29 import java.util.stream.IntStream;
31 import static de.juplo.kafka.ApplicationTests.PARTITIONS;
32 import static de.juplo.kafka.ApplicationTests.TOPIC;
33 import static org.assertj.core.api.Assertions.*;
34 import static org.awaitility.Awaitility.*;
37 @SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class)
38 @TestMethodOrder(MethodOrderer.OrderAnnotation.class)
41 "consumer.bootstrap-server=${spring.embedded.kafka.brokers}",
42 "consumer.topic=" + TOPIC,
43 "consumer.commit-interval=1s",
44 "spring.mongodb.embedded.version=4.4.13" })
45 @EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
46 @EnableAutoConfiguration
47 @AutoConfigureDataMongo
49 class ApplicationTests
51 public static final String TOPIC = "FOO";
52 public static final int PARTITIONS = 10;
55 StringSerializer stringSerializer = new StringSerializer();
58 Serializer valueSerializer;
60 KafkaProducer<String, Bytes> kafkaProducer;
62 KafkaConsumer<String, String> kafkaConsumer;
64 KafkaConsumer<Bytes, Bytes> offsetConsumer;
66 PartitionStatisticsRepository partitionStatisticsRepository;
68 ApplicationProperties properties;
70 ExecutorService executor;
72 PartitionStatisticsRepository repository;
74 SumRebalanceListener sumRebalanceListener;
76 SumRecordHandler sumRecordHandler;
78 EndlessConsumer<String, String> endlessConsumer;
79 Map<TopicPartition, Long> oldOffsets;
80 Map<TopicPartition, Long> newOffsets;
81 Set<ConsumerRecord<String, String>> receivedRecords;
87 void commitsCurrentOffsetsOnSuccess() throws ExecutionException, InterruptedException
89 send100Messages((partition, key, counter) ->
91 Bytes value = new Bytes(valueSerializer.serialize(TOPIC, counter));
92 return new ProducerRecord<>(TOPIC, partition, key, value);
95 await("100 records received")
96 .atMost(Duration.ofSeconds(30))
97 .pollInterval(Duration.ofSeconds(1))
98 .until(() -> receivedRecords.size() >= 100);
100 await("Offsets committed")
101 .atMost(Duration.ofSeconds(10))
102 .pollInterval(Duration.ofSeconds(1))
105 checkSeenOffsetsForProgress();
106 compareToCommitedOffsets(newOffsets);
109 assertThatExceptionOfType(IllegalStateException.class)
110 .isThrownBy(() -> endlessConsumer.exitStatus())
111 .describedAs("Consumer should still be running");
115 /** Helper methods for the verification of expectations */
117 void compareToCommitedOffsets(Map<TopicPartition, Long> offsetsToCheck)
119 doForCurrentOffsets((tp, offset) ->
121 Long expected = offsetsToCheck.get(tp) + 1;
122 log.debug("Checking, if the offset for {} is {}", tp, expected);
124 .describedAs("Committed offset corresponds to the offset of the consumer")
125 .isEqualTo(expected);
129 void checkSeenOffsetsForProgress()
131 // Be sure, that some messages were consumed...!
132 Set<TopicPartition> withProgress = new HashSet<>();
133 partitions().forEach(tp ->
135 Long oldOffset = oldOffsets.get(tp) + 1;
136 Long newOffset = newOffsets.get(tp) + 1;
137 if (!oldOffset.equals(newOffset))
139 log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset);
140 withProgress.add(tp);
143 assertThat(withProgress)
144 .describedAs("Some offsets must have changed, compared to the old offset-positions")
149 /** Helper methods for setting up and running the tests */
153 offsetConsumer.assign(partitions());
154 partitions().forEach(tp ->
156 Long offset = offsetConsumer.position(tp);
157 log.info("New position for {}: {}", tp, offset);
158 Integer partition = tp.partition();
159 StatisticsDocument document =
160 partitionStatisticsRepository
161 .findById(partition.toString())
162 .orElse(new StatisticsDocument(partition));
163 document.offset = offset;
164 partitionStatisticsRepository.save(document);
166 offsetConsumer.unsubscribe();
169 void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
171 partitions().forEach(tp ->
173 String partition = Integer.toString(tp.partition());
174 Optional<Long> offset = partitionStatisticsRepository.findById(partition).map(document -> document.offset);
175 consumer.accept(tp, offset.orElse(0l));
179 List<TopicPartition> partitions()
183 .range(0, PARTITIONS)
184 .mapToObj(partition -> new TopicPartition(TOPIC, partition))
185 .collect(Collectors.toList());
189 public interface RecordGenerator<K, V>
191 public ProducerRecord<String, Bytes> generate(int partition, String key, long counter);
194 void send100Messages(RecordGenerator recordGenerator)
198 for (int partition = 0; partition < 10; partition++)
200 for (int key = 0; key < 10; key++)
202 ProducerRecord<String, Bytes> record =
203 recordGenerator.generate(partition, Integer.toString(partition*10+key%2), ++i);
205 kafkaProducer.send(record, (metadata, e) ->
207 if (metadata != null)
211 metadata.partition(),
219 "Exception for {}={}: {}",
235 oldOffsets = new HashMap<>();
236 newOffsets = new HashMap<>();
237 receivedRecords = new HashSet<>();
239 doForCurrentOffsets((tp, offset) ->
241 oldOffsets.put(tp, offset - 1);
242 newOffsets.put(tp, offset - 1);
245 TestRecordHandler<String, String> captureOffsetAndExecuteTestHandler =
246 new TestRecordHandler<String, String>(sumRecordHandler) {
248 public void onNewRecord(ConsumerRecord<String, String> record)
251 new TopicPartition(record.topic(), record.partition()),
253 receivedRecords.add(record);
258 new EndlessConsumer<>(
260 properties.getClientId(),
261 properties.getTopic(),
263 sumRebalanceListener,
264 captureOffsetAndExecuteTestHandler);
266 endlessConsumer.start();
274 endlessConsumer.stop();
278 log.info("Exception while stopping the consumer: {}", e.toString());
284 @Import(ApplicationConfiguration.class)
285 public static class Configuration
288 Serializer<Long> serializer()
290 return new LongSerializer();
294 KafkaProducer<String, Bytes> kafkaProducer(ApplicationProperties properties)
296 Properties props = new Properties();
297 props.put("bootstrap.servers", properties.getBootstrapServer());
298 props.put("linger.ms", 100);
299 props.put("key.serializer", StringSerializer.class.getName());
300 props.put("value.serializer", BytesSerializer.class.getName());
302 return new KafkaProducer<>(props);
306 KafkaConsumer<Bytes, Bytes> offsetConsumer(ApplicationProperties properties)
308 Properties props = new Properties();
309 props.put("bootstrap.servers", properties.getBootstrapServer());
310 props.put("client.id", "OFFSET-CONSUMER");
311 props.put("enable.auto.commit", false);
312 props.put("auto.offset.reset", "latest");
313 props.put("key.deserializer", BytesDeserializer.class.getName());
314 props.put("value.deserializer", BytesDeserializer.class.getName());
316 return new KafkaConsumer<>(props);