1 package de.juplo.kafka;
3 import lombok.extern.slf4j.Slf4j;
4 import org.apache.kafka.clients.consumer.ConsumerRecord;
5 import org.apache.kafka.clients.consumer.KafkaConsumer;
6 import org.apache.kafka.clients.producer.KafkaProducer;
7 import org.apache.kafka.clients.producer.ProducerRecord;
8 import org.apache.kafka.common.TopicPartition;
9 import org.apache.kafka.common.serialization.*;
10 import org.apache.kafka.common.utils.Bytes;
11 import org.junit.jupiter.api.*;
12 import org.springframework.beans.factory.annotation.Autowired;
13 import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
14 import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo;
15 import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
16 import org.springframework.boot.test.context.TestConfiguration;
17 import org.springframework.context.annotation.Bean;
18 import org.springframework.context.annotation.Import;
19 import org.springframework.kafka.test.context.EmbeddedKafka;
20 import org.springframework.test.context.TestPropertySource;
21 import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
23 import java.time.Duration;
25 import java.util.concurrent.ExecutionException;
26 import java.util.concurrent.ExecutorService;
27 import java.util.function.BiConsumer;
28 import java.util.stream.Collectors;
29 import java.util.stream.IntStream;
31 import static de.juplo.kafka.ApplicationTests.PARTITIONS;
32 import static de.juplo.kafka.ApplicationTests.TOPIC;
33 import static org.assertj.core.api.Assertions.*;
34 import static org.awaitility.Awaitility.*;
37 @SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class)
38 @TestMethodOrder(MethodOrderer.OrderAnnotation.class)
41 "sumup.adder.bootstrap-server=${spring.embedded.kafka.brokers}",
42 "sumup.adder.topic=" + TOPIC,
43 "sumup.adder.commit-interval=1s",
44 "spring.mongodb.embedded.version=4.4.13" })
45 @EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
46 @EnableAutoConfiguration
47 @AutoConfigureDataMongo
49 class ApplicationTests
51 public static final String TOPIC = "FOO";
52 public static final int PARTITIONS = 10;
55 StringSerializer stringSerializer = new StringSerializer();
58 Serializer valueSerializer;
60 KafkaProducer<String, Bytes> kafkaProducer;
62 KafkaConsumer<String, String> kafkaConsumer;
64 KafkaConsumer<Bytes, Bytes> offsetConsumer;
66 PartitionStatisticsRepository partitionStatisticsRepository;
68 ApplicationProperties properties;
70 ExecutorService executor;
72 PartitionStatisticsRepository repository;
74 AdderRebalanceListener adderRebalanceListener;
76 AdderRecordHandler adderRecordHandler;
78 EndlessConsumer<String, String> endlessConsumer;
79 Map<TopicPartition, Long> oldOffsets;
80 Map<TopicPartition, Long> newOffsets;
81 Set<ConsumerRecord<String, String>> receivedRecords;
87 @Disabled("Vorübergehend deaktivert, bis der Testfall angepasst ist")
88 void commitsCurrentOffsetsOnSuccess() throws ExecutionException, InterruptedException
90 send100Messages((partition, key, counter) ->
92 Bytes value = new Bytes(valueSerializer.serialize(TOPIC, counter));
93 return new ProducerRecord<>(TOPIC, partition, key, value);
96 await("100 records received")
97 .atMost(Duration.ofSeconds(30))
98 .pollInterval(Duration.ofSeconds(1))
99 .until(() -> receivedRecords.size() >= 100);
101 await("Offsets committed")
102 .atMost(Duration.ofSeconds(10))
103 .pollInterval(Duration.ofSeconds(1))
106 checkSeenOffsetsForProgress();
107 compareToCommitedOffsets(newOffsets);
110 assertThatExceptionOfType(IllegalStateException.class)
111 .isThrownBy(() -> endlessConsumer.exitStatus())
112 .describedAs("Consumer should still be running");
116 /** Helper methods for the verification of expectations */
118 void compareToCommitedOffsets(Map<TopicPartition, Long> offsetsToCheck)
120 doForCurrentOffsets((tp, offset) ->
122 Long expected = offsetsToCheck.get(tp) + 1;
123 log.debug("Checking, if the offset for {} is {}", tp, expected);
125 .describedAs("Committed offset corresponds to the offset of the consumer")
126 .isEqualTo(expected);
130 void checkSeenOffsetsForProgress()
132 // Be sure, that some messages were consumed...!
133 Set<TopicPartition> withProgress = new HashSet<>();
134 partitions().forEach(tp ->
136 Long oldOffset = oldOffsets.get(tp) + 1;
137 Long newOffset = newOffsets.get(tp) + 1;
138 if (!oldOffset.equals(newOffset))
140 log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset);
141 withProgress.add(tp);
144 assertThat(withProgress)
145 .describedAs("Some offsets must have changed, compared to the old offset-positions")
150 /** Helper methods for setting up and running the tests */
154 offsetConsumer.assign(partitions());
155 partitions().forEach(tp ->
157 Long offset = offsetConsumer.position(tp);
158 log.info("New position for {}: {}", tp, offset);
159 Integer partition = tp.partition();
160 StateDocument document =
161 partitionStatisticsRepository
162 .findById(partition.toString())
163 .orElse(new StateDocument(partition));
164 document.offset = offset;
165 partitionStatisticsRepository.save(document);
167 offsetConsumer.unsubscribe();
170 void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
172 partitions().forEach(tp ->
174 String partition = Integer.toString(tp.partition());
175 Optional<Long> offset = partitionStatisticsRepository.findById(partition).map(document -> document.offset);
176 consumer.accept(tp, offset.orElse(0l));
180 List<TopicPartition> partitions()
184 .range(0, PARTITIONS)
185 .mapToObj(partition -> new TopicPartition(TOPIC, partition))
186 .collect(Collectors.toList());
190 public interface RecordGenerator<K, V>
192 public ProducerRecord<String, Bytes> generate(int partition, String key, long counter);
195 void send100Messages(RecordGenerator recordGenerator)
199 for (int partition = 0; partition < 10; partition++)
201 for (int key = 0; key < 10; key++)
203 ProducerRecord<String, Bytes> record =
204 recordGenerator.generate(partition, Integer.toString(partition*10+key%2), ++i);
206 kafkaProducer.send(record, (metadata, e) ->
208 if (metadata != null)
212 metadata.partition(),
220 "Exception for {}={}: {}",
236 oldOffsets = new HashMap<>();
237 newOffsets = new HashMap<>();
238 receivedRecords = new HashSet<>();
240 doForCurrentOffsets((tp, offset) ->
242 oldOffsets.put(tp, offset - 1);
243 newOffsets.put(tp, offset - 1);
246 TestRecordHandler<String, String> captureOffsetAndExecuteTestHandler =
247 new TestRecordHandler<String, String>(adderRecordHandler) {
249 public void onNewRecord(ConsumerRecord<String, String> record)
252 new TopicPartition(record.topic(), record.partition()),
254 receivedRecords.add(record);
259 new EndlessConsumer<>(
261 properties.getClientId(),
262 properties.getTopic(),
264 adderRebalanceListener,
265 captureOffsetAndExecuteTestHandler);
267 endlessConsumer.start();
275 endlessConsumer.stop();
279 log.info("Exception while stopping the consumer: {}", e.toString());
285 @Import(ApplicationConfiguration.class)
286 public static class Configuration
289 Serializer<Long> serializer()
291 return new LongSerializer();
295 KafkaProducer<String, Bytes> kafkaProducer(ApplicationProperties properties)
297 Properties props = new Properties();
298 props.put("bootstrap.servers", properties.getBootstrapServer());
299 props.put("linger.ms", 100);
300 props.put("key.serializer", StringSerializer.class.getName());
301 props.put("value.serializer", BytesSerializer.class.getName());
303 return new KafkaProducer<>(props);
307 KafkaConsumer<Bytes, Bytes> offsetConsumer(ApplicationProperties properties)
309 Properties props = new Properties();
310 props.put("bootstrap.servers", properties.getBootstrapServer());
311 props.put("client.id", "OFFSET-CONSUMER");
312 props.put("enable.auto.commit", false);
313 props.put("auto.offset.reset", "latest");
314 props.put("key.deserializer", BytesDeserializer.class.getName());
315 props.put("value.deserializer", BytesDeserializer.class.getName());
317 return new KafkaConsumer<>(props);