Der Test prüft die Anzahl der Einträge im DLT
[demos/kafka/training] / src / test / java / de / juplo / kafka / GenericApplicationTests.java
1 package de.juplo.kafka;
2
3 import com.mongodb.client.MongoClient;
4 import lombok.extern.slf4j.Slf4j;
5 import org.apache.kafka.clients.consumer.KafkaConsumer;
6 import org.apache.kafka.clients.producer.KafkaProducer;
7 import org.apache.kafka.clients.producer.ProducerRecord;
8 import org.apache.kafka.common.TopicPartition;
9 import org.apache.kafka.common.serialization.*;
10 import org.apache.kafka.common.utils.Bytes;
11 import org.junit.jupiter.api.*;
12 import org.springframework.beans.factory.annotation.Autowired;
13 import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
14 import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
15 import org.springframework.boot.autoconfigure.mongo.MongoProperties;
16 import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo;
17 import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
18 import org.springframework.boot.test.context.TestConfiguration;
19 import org.springframework.context.annotation.Bean;
20 import org.springframework.context.annotation.Import;
21 import org.springframework.kafka.config.KafkaListenerEndpointRegistry;
22 import org.springframework.kafka.core.ConsumerFactory;
23 import org.springframework.kafka.test.context.EmbeddedKafka;
24 import org.springframework.test.context.TestPropertySource;
25 import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
26
27 import java.time.Duration;
28 import java.util.*;
29 import java.util.function.BiConsumer;
30 import java.util.function.Consumer;
31 import java.util.stream.Collectors;
32 import java.util.stream.IntStream;
33
34 import static de.juplo.kafka.GenericApplicationTests.PARTITIONS;
35 import static de.juplo.kafka.GenericApplicationTests.TOPIC;
36 import static org.assertj.core.api.Assertions.*;
37 import static org.awaitility.Awaitility.*;
38
39
40 @SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class)
41 @TestPropertySource(
42                 properties = {
43                                 "spring.kafka.bootstrap-servers=${spring.embedded.kafka.brokers}",
44                                 "spring.kafka.producer.bootstrap-servers=${spring.embedded.kafka.brokers}",
45                                 "sumup.adder.topic=" + TOPIC,
46                                 "spring.kafka.consumer.auto-commit-interval=500ms",
47                                 "spring.mongodb.embedded.version=4.4.13" })
48 @EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
49 @EnableAutoConfiguration
50 @AutoConfigureDataMongo
51 @Slf4j
52 abstract class GenericApplicationTests<K, V>
53 {
54         public static final String TOPIC = "FOO";
55         public static final int PARTITIONS = 10;
56
57
58         @Autowired
59         org.apache.kafka.clients.consumer.Consumer<K, V> kafkaConsumer;
60         @Autowired
61         KafkaProperties kafkaProperties;
62         @Autowired
63         ApplicationProperties applicationProperties;
64         @Autowired
65         MongoClient mongoClient;
66         @Autowired
67         MongoProperties mongoProperties;
68         @Autowired
69         KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry;
70         @Autowired
71         TestRecordHandler recordHandler;
72         @Autowired
73         DeadLetterTopicConsumer deadLetterTopicConsumer;
74         @Autowired
75         EndlessConsumer endlessConsumer;
76
77         KafkaProducer<Bytes, Bytes> testRecordProducer;
78         KafkaConsumer<Bytes, Bytes> offsetConsumer;
79         Map<TopicPartition, Long> oldOffsets;
80
81
82         final RecordGenerator recordGenerator;
83         final Consumer<ProducerRecord<Bytes, Bytes>> messageSender;
84
85         public GenericApplicationTests(RecordGenerator recordGenerator)
86         {
87                 this.recordGenerator = recordGenerator;
88                 this.messageSender = (record) -> sendMessage(record);
89         }
90
91
92         /** Tests methods */
93
94         @Test
95         void commitsCurrentOffsetsOnSuccess() throws Exception
96         {
97                 recordGenerator.generate(false, false, messageSender);
98
99                 int numberOfGeneratedMessages = recordGenerator.getNumberOfMessages();
100
101                 await(numberOfGeneratedMessages + " records received")
102                                 .atMost(Duration.ofSeconds(30))
103                                 .pollInterval(Duration.ofSeconds(1))
104                                 .until(() -> recordHandler.receivedMessages >= numberOfGeneratedMessages);
105
106                 await("Offsets committed")
107                                 .atMost(Duration.ofSeconds(10))
108                                 .pollInterval(Duration.ofSeconds(1))
109                                 .untilAsserted(() ->
110                                 {
111                                         checkSeenOffsetsForProgress();
112                                         assertSeenOffsetsEqualCommittedOffsets(recordHandler.seenOffsets);
113                                 });
114
115                 assertThat(endlessConsumer.running())
116                                 .describedAs("Consumer should still be running")
117                                 .isTrue();
118
119                 endlessConsumer.stop();
120                 recordGenerator.assertBusinessLogic();
121         }
122
123         @Test
124         @SkipWhenErrorCannotBeGenerated(poisonPill = true)
125         void commitsOffsetOfErrorForReprocessingOnDeserializationError()
126         {
127                 recordGenerator.generate(true, false, messageSender);
128
129                 int numberOfValidMessages =
130                                 recordGenerator.getNumberOfMessages() -
131                                 recordGenerator.getNumberOfPoisonPills();
132
133                 await(numberOfValidMessages + " records received")
134                                 .atMost(Duration.ofSeconds(30))
135                                 .pollInterval(Duration.ofSeconds(1))
136                                 .until(() -> recordHandler.receivedMessages >= numberOfValidMessages);
137                 await(recordGenerator.getNumberOfPoisonPills() + " poison-pills received")
138                                 .atMost(Duration.ofSeconds(30))
139                                 .pollInterval(Duration.ofSeconds(1))
140                                 .until(() -> deadLetterTopicConsumer.messages.size() == recordGenerator.getNumberOfPoisonPills());
141
142                 await("Offsets committed")
143                                 .atMost(Duration.ofSeconds(10))
144                                 .pollInterval(Duration.ofSeconds(1))
145                                 .untilAsserted(() ->
146                                 {
147                                         checkSeenOffsetsForProgress();
148                                         assertSeenOffsetsEqualCommittedOffsets(recordHandler.seenOffsets);
149                                 });
150
151                 assertThat(endlessConsumer.running())
152                                 .describedAs("Consumer should still be running")
153                                 .isTrue();
154
155                 endlessConsumer.stop();
156                 recordGenerator.assertBusinessLogic();
157         }
158
159         @Test
160         @SkipWhenErrorCannotBeGenerated(logicError = true)
161         void commitsOffsetsOfUnseenRecordsOnLogicError()
162         {
163                 recordGenerator.generate(false, true, messageSender);
164
165                 int numberOfValidMessages =
166                                 recordGenerator.getNumberOfMessages() -
167                                 recordGenerator.getNumberOfLogicErrors();
168
169                 await(numberOfValidMessages + " records received")
170                                 .atMost(Duration.ofSeconds(30))
171                                 .pollInterval(Duration.ofSeconds(1))
172                                 .until(() -> recordHandler.receivedMessages >= numberOfValidMessages);
173                 await(recordGenerator.getNumberOfLogicErrors() + " logic-errors received")
174                                 .atMost(Duration.ofSeconds(30))
175                                 .pollInterval(Duration.ofSeconds(1))
176                                 .until(() -> deadLetterTopicConsumer.messages.size() == recordGenerator.getNumberOfLogicErrors());
177
178                 await("Offsets committed")
179                                 .atMost(Duration.ofSeconds(10))
180                                 .pollInterval(Duration.ofSeconds(1))
181                                 .untilAsserted(() ->
182                                 {
183                                         checkSeenOffsetsForProgress();
184                                         assertSeenOffsetsEqualCommittedOffsets(recordHandler.seenOffsets);
185                                 });
186
187                 assertThat(endlessConsumer.running())
188                                 .describedAs("Consumer should still be running")
189                                 .isTrue();
190
191                 endlessConsumer.stop();
192                 recordGenerator.assertBusinessLogic();
193         }
194
195
196         /** Helper methods for the verification of expectations */
197
198         void assertSeenOffsetsEqualCommittedOffsets(Map<TopicPartition, Long> offsetsToCheck)
199         {
200                 doForCurrentOffsets((tp, offset) ->
201                 {
202                         Long expected = offsetsToCheck.get(tp) + 1;
203                         log.debug("Checking, if the offset {} for {} is exactly {}", offset, tp, expected);
204                         assertThat(offset)
205                                         .describedAs("Committed offset corresponds to the offset of the consumer")
206                                         .isEqualTo(expected);
207                 });
208         }
209
210         void assertSeenOffsetsAreBehindCommittedOffsets(Map<TopicPartition, Long> offsetsToCheck)
211         {
212                 List<Boolean> isOffsetBehindSeen = new LinkedList<>();
213
214                 doForCurrentOffsets((tp, offset) ->
215                 {
216                         Long expected = offsetsToCheck.get(tp) + 1;
217                         log.debug("Checking, if the offset {} for {} is at most {}", offset, tp, expected);
218                         assertThat(offset)
219                                         .describedAs("Committed offset must be at most equal to the offset of the consumer")
220                                         .isLessThanOrEqualTo(expected);
221                         isOffsetBehindSeen.add(offset < expected);
222                 });
223
224                 assertThat(isOffsetBehindSeen.stream().reduce(false, (result, next) -> result | next))
225                                 .describedAs("Committed offsets are behind seen offsets")
226                                 .isTrue();
227         }
228
229         void checkSeenOffsetsForProgress()
230         {
231                 // Be sure, that some messages were consumed...!
232                 Set<TopicPartition> withProgress = new HashSet<>();
233                 partitions().forEach(tp ->
234                 {
235                         Long oldOffset = oldOffsets.get(tp) + 1;
236                         Long newOffset = recordHandler.seenOffsets.get(tp) + 1;
237                         if (!oldOffset.equals(newOffset))
238                         {
239                                 log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset);
240                                 withProgress.add(tp);
241                         }
242                 });
243                 assertThat(withProgress)
244                                 .describedAs("Some offsets must have changed, compared to the old offset-positions")
245                                 .isNotEmpty();
246         }
247
248
249         /** Helper methods for setting up and running the tests */
250
251         void seekToEnd()
252         {
253                 offsetConsumer.assign(partitions());
254                 offsetConsumer.seekToEnd(partitions());
255                 partitions().forEach(tp ->
256                 {
257                         // seekToEnd() works lazily: it only takes effect on poll()/position()
258                         Long offset = offsetConsumer.position(tp);
259                         log.info("New position for {}: {}", tp, offset);
260                 });
261                 // The new positions must be commited!
262                 offsetConsumer.commitSync();
263                 offsetConsumer.unsubscribe();
264         }
265
266         void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
267         {
268                 offsetConsumer.assign(partitions());
269                 partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp)));
270                 offsetConsumer.unsubscribe();
271         }
272
273         List<TopicPartition> partitions()
274         {
275                 return
276                                 IntStream
277                                                 .range(0, PARTITIONS)
278                                                 .mapToObj(partition -> new TopicPartition(TOPIC, partition))
279                                                 .collect(Collectors.toList());
280         }
281
282
283         public interface RecordGenerator
284         {
285                 void generate(
286                                 boolean poisonPills,
287                                 boolean logicErrors,
288                                 Consumer<ProducerRecord<Bytes, Bytes>> messageSender);
289
290                 int getNumberOfMessages();
291                 int getNumberOfPoisonPills();
292                 int getNumberOfLogicErrors();
293
294                 default boolean canGeneratePoisonPill()
295                 {
296                         return true;
297                 }
298
299                 default boolean canGenerateLogicError()
300                 {
301                         return true;
302                 }
303
304                 default void assertBusinessLogic()
305                 {
306                         log.debug("No business-logic to assert");
307                 }
308         }
309
310         void sendMessage(ProducerRecord<Bytes, Bytes> record)
311         {
312                 testRecordProducer.send(record, (metadata, e) ->
313                 {
314                         if (metadata != null)
315                         {
316                                 log.debug(
317                                                 "{}|{} - {}={}",
318                                                 metadata.partition(),
319                                                 metadata.offset(),
320                                                 record.key(),
321                                                 record.value());
322                         }
323                         else
324                         {
325                                 log.warn(
326                                                 "Exception for {}={}: {}",
327                                                 record.key(),
328                                                 record.value(),
329                                                 e.toString());
330                         }
331                 });
332         }
333
334
335         @BeforeEach
336         public void init()
337         {
338                 Properties props;
339                 props = new Properties();
340                 props.put("bootstrap.servers", kafkaProperties.getBootstrapServers());
341                 props.put("linger.ms", 100);
342                 props.put("key.serializer", BytesSerializer.class.getName());
343                 props.put("value.serializer", BytesSerializer.class.getName());
344                 testRecordProducer = new KafkaProducer<>(props);
345
346                 props = new Properties();
347                 props.put("bootstrap.servers", kafkaProperties.getBootstrapServers());
348                 props.put("client.id", "OFFSET-CONSUMER");
349                 props.put("group.id", kafkaProperties.getConsumer().getGroupId());
350                 props.put("key.deserializer", BytesDeserializer.class.getName());
351                 props.put("value.deserializer", BytesDeserializer.class.getName());
352                 offsetConsumer = new KafkaConsumer<>(props);
353
354                 mongoClient.getDatabase(mongoProperties.getDatabase()).drop();
355                 seekToEnd();
356
357                 oldOffsets = new HashMap<>();
358                 recordHandler.seenOffsets = new HashMap<>();
359                 recordHandler.receivedMessages = 0;
360
361                 deadLetterTopicConsumer.messages.clear();
362
363                 doForCurrentOffsets((tp, offset) ->
364                 {
365                         oldOffsets.put(tp, offset - 1);
366                         recordHandler.seenOffsets.put(tp, offset - 1);
367                 });
368
369                 endlessConsumer.start();
370         }
371
372         @AfterEach
373         public void deinit()
374         {
375                 try
376                 {
377                         endlessConsumer.stop();
378                 }
379                 catch (Exception e)
380                 {
381                         log.debug("{}", e.toString());
382                 }
383
384                 try
385                 {
386                         testRecordProducer.close();
387                         offsetConsumer.close();
388                 }
389                 catch (Exception e)
390                 {
391                         log.info("Exception while stopping the consumer: {}", e.toString());
392                 }
393         }
394
395
396         @TestConfiguration
397         @Import(ApplicationConfiguration.class)
398         public static class Configuration
399         {
400                 @Bean
401                 public RecordHandler recordHandler(RecordHandler applicationRecordHandler)
402                 {
403                         return new TestRecordHandler(applicationRecordHandler);
404                 }
405
406                 @Bean(destroyMethod = "close")
407                 public org.apache.kafka.clients.consumer.Consumer<String, Message> kafkaConsumer(ConsumerFactory<String, Message> factory)
408                 {
409                         return factory.createConsumer();
410                 }
411
412                 @Bean
413                 public DeadLetterTopicConsumer deadLetterTopicConsumer()
414                 {
415                         return new DeadLetterTopicConsumer();
416                 }
417         }
418 }