1 package de.juplo.kafka;
3 import lombok.extern.slf4j.Slf4j;
4 import org.apache.kafka.clients.consumer.ConsumerRecord;
5 import org.apache.kafka.clients.consumer.KafkaConsumer;
6 import org.apache.kafka.clients.producer.KafkaProducer;
7 import org.apache.kafka.clients.producer.ProducerRecord;
8 import org.apache.kafka.common.TopicPartition;
9 import org.apache.kafka.common.serialization.*;
10 import org.apache.kafka.common.utils.Bytes;
11 import org.junit.jupiter.api.*;
12 import org.springframework.beans.factory.annotation.Autowired;
13 import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
14 import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo;
15 import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
16 import org.springframework.boot.test.context.TestConfiguration;
17 import org.springframework.context.annotation.Bean;
18 import org.springframework.context.annotation.Import;
19 import org.springframework.kafka.test.context.EmbeddedKafka;
20 import org.springframework.test.context.TestPropertySource;
21 import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
23 import java.time.Clock;
24 import java.time.Duration;
26 import java.util.concurrent.ExecutionException;
27 import java.util.concurrent.ExecutorService;
28 import java.util.function.BiConsumer;
29 import java.util.function.Function;
30 import java.util.stream.Collectors;
31 import java.util.stream.IntStream;
33 import static de.juplo.kafka.ApplicationTests.PARTITIONS;
34 import static de.juplo.kafka.ApplicationTests.TOPIC;
35 import static org.assertj.core.api.Assertions.*;
36 import static org.awaitility.Awaitility.*;
39 @SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class)
40 @TestMethodOrder(MethodOrderer.OrderAnnotation.class)
43 "consumer.bootstrap-server=${spring.embedded.kafka.brokers}",
44 "consumer.topic=" + TOPIC,
45 "consumer.commit-interval=1s",
46 "spring.mongodb.embedded.version=4.4.13" })
47 @EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
48 @EnableAutoConfiguration
49 @AutoConfigureDataMongo
51 class ApplicationTests
53 public static final String TOPIC = "FOO";
54 public static final int PARTITIONS = 10;
57 StringSerializer stringSerializer = new StringSerializer();
60 Serializer valueSerializer;
62 KafkaProducer<String, Bytes> kafkaProducer;
64 KafkaConsumer<String, String> kafkaConsumer;
66 PartitionStatisticsRepository partitionStatisticsRepository;
68 ApplicationProperties properties;
70 ExecutorService executor;
72 PartitionStatisticsRepository repository;
74 WordcountRebalanceListener wordcountRebalanceListener;
76 WordcountRecordHandler wordcountRecordHandler;
78 EndlessConsumer<String, String> endlessConsumer;
79 Map<TopicPartition, Long> oldOffsets;
80 Map<TopicPartition, Long> newOffsets;
81 Set<ConsumerRecord<String, String>> receivedRecords;
87 void commitsCurrentOffsetsOnSuccess() throws ExecutionException, InterruptedException
89 send100Messages(i -> new Bytes(valueSerializer.serialize(TOPIC, i)));
91 await("100 records received")
92 .atMost(Duration.ofSeconds(30))
93 .until(() -> receivedRecords.size() >= 100);
95 await("Offsets committed")
96 .atMost(Duration.ofSeconds(10))
99 checkSeenOffsetsForProgress();
100 compareToCommitedOffsets(newOffsets);
103 assertThatExceptionOfType(IllegalStateException.class)
104 .isThrownBy(() -> endlessConsumer.exitStatus())
105 .describedAs("Consumer should still be running");
109 /** Helper methods for the verification of expectations */
111 void compareToCommitedOffsets(Map<TopicPartition, Long> offsetsToCheck)
113 doForCurrentOffsets((tp, offset) ->
115 Long expected = offsetsToCheck.get(tp) + 1;
116 log.debug("Checking, if the offset for {} is {}", tp, expected);
118 .describedAs("Committed offset corresponds to the offset of the consumer")
119 .isEqualTo(expected);
123 void checkSeenOffsetsForProgress()
125 // Be sure, that some messages were consumed...!
126 Set<TopicPartition> withProgress = new HashSet<>();
127 partitions().forEach(tp ->
129 Long oldOffset = oldOffsets.get(tp);
130 Long newOffset = newOffsets.get(tp);
131 if (!oldOffset.equals(newOffset))
133 log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset);
134 withProgress.add(tp);
137 assertThat(withProgress)
138 .describedAs("Some offsets must have changed, compared to the old offset-positions")
143 /** Helper methods for setting up and running the tests */
145 void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
147 partitions().forEach(tp ->
149 String partition = Integer.toString(tp.partition());
150 Optional<Long> offset = partitionStatisticsRepository.findById(partition).map(document -> document.offset);
151 consumer.accept(tp, offset.orElse(0l));
155 List<TopicPartition> partitions()
159 .range(0, PARTITIONS)
160 .mapToObj(partition -> new TopicPartition(TOPIC, partition))
161 .collect(Collectors.toList());
165 void send100Messages(Function<Long, Bytes> messageGenerator)
169 for (int partition = 0; partition < 10; partition++)
171 for (int key = 0; key < 10; key++)
173 Bytes value = messageGenerator.apply(++i);
175 ProducerRecord<String, Bytes> record =
176 new ProducerRecord<>(
179 Integer.toString(key%2),
182 kafkaProducer.send(record, (metadata, e) ->
184 if (metadata != null)
188 metadata.partition(),
196 "Exception for {}={}: {}",
210 oldOffsets = new HashMap<>();
211 newOffsets = new HashMap<>();
212 receivedRecords = new HashSet<>();
214 doForCurrentOffsets((tp, offset) ->
216 oldOffsets.put(tp, offset - 1);
217 newOffsets.put(tp, offset - 1);
220 TestRecordHandler<String, String> captureOffsetAndExecuteTestHandler =
221 new TestRecordHandler<String, String>(wordcountRecordHandler) {
223 public void onNewRecord(ConsumerRecord<String, String> record)
226 new TopicPartition(record.topic(), record.partition()),
228 receivedRecords.add(record);
233 new EndlessConsumer<>(
235 properties.getClientId(),
236 properties.getTopic(),
238 wordcountRebalanceListener,
239 captureOffsetAndExecuteTestHandler);
241 endlessConsumer.start();
249 endlessConsumer.stop();
253 log.info("Exception while stopping the consumer: {}", e.toString());
259 @Import(ApplicationConfiguration.class)
260 public static class Configuration
263 Serializer<Long> serializer()
265 return new LongSerializer();
269 KafkaProducer<String, Bytes> kafkaProducer(ApplicationProperties properties)
271 Properties props = new Properties();
272 props.put("bootstrap.servers", properties.getBootstrapServer());
273 props.put("linger.ms", 100);
274 props.put("key.serializer", StringSerializer.class.getName());
275 props.put("value.serializer", BytesSerializer.class.getName());
277 return new KafkaProducer<>(props);