X-Git-Url: https://juplo.de/gitweb/?a=blobdiff_plain;f=src%2Ftest%2Fjava%2Fde%2Fjuplo%2Fkafka%2FGenericApplicationTests.java;h=b019373250d2c5c15062d43c4b262d5d7486f5e8;hb=d2eb370acf1a2195c36421ffc471f67cb4a8e86e;hp=1aacb945c5d62495ef45d01aad5669e8bfa3cc0b;hpb=27768041f2c2f4b1cbb8c45c9a5d665490050f76;p=demos%2Fkafka%2Ftraining diff --git a/src/test/java/de/juplo/kafka/GenericApplicationTests.java b/src/test/java/de/juplo/kafka/GenericApplicationTests.java index 1aacb94..b019373 100644 --- a/src/test/java/de/juplo/kafka/GenericApplicationTests.java +++ b/src/test/java/de/juplo/kafka/GenericApplicationTests.java @@ -1,5 +1,6 @@ package de.juplo.kafka; +import com.mongodb.client.MongoClient; import lombok.extern.slf4j.Slf4j; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.KafkaConsumer; @@ -11,6 +12,9 @@ import org.apache.kafka.common.serialization.*; import org.apache.kafka.common.utils.Bytes; import org.junit.jupiter.api.*; import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.autoconfigure.EnableAutoConfiguration; +import org.springframework.boot.autoconfigure.mongo.MongoProperties; +import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo; import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer; import org.springframework.boot.test.context.TestConfiguration; import org.springframework.context.annotation.Import; @@ -35,10 +39,13 @@ import static org.awaitility.Awaitility.*; @SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class) @TestPropertySource( properties = { - "consumer.bootstrap-server=${spring.embedded.kafka.brokers}", - "consumer.topic=" + TOPIC, - "consumer.commit-interval=1s" }) + "sumup.adder.bootstrap-server=${spring.embedded.kafka.brokers}", + "sumup.adder.topic=" + TOPIC, + "sumup.adder.commit-interval=500ms", + "spring.mongodb.embedded.version=4.4.13" }) @EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS) +@EnableAutoConfiguration +@AutoConfigureDataMongo @Slf4j abstract class GenericApplicationTests { @@ -54,6 +61,16 @@ abstract class GenericApplicationTests ApplicationProperties properties; @Autowired ExecutorService executor; + @Autowired + StateRepository stateRepository; + @Autowired + MongoClient mongoClient; + @Autowired + MongoProperties mongoProperties; + @Autowired + PollIntervalAwareConsumerRebalanceListener rebalanceListener; + @Autowired + RecordHandler recordHandler; KafkaProducer testRecordProducer; KafkaConsumer offsetConsumer; @@ -78,12 +95,13 @@ abstract class GenericApplicationTests @Test void commitsCurrentOffsetsOnSuccess() { - recordGenerator.generate(100, Set.of(), Set.of(), messageSender); + int numberOfGeneratedMessages = + recordGenerator.generate(false, false, messageSender); - await("100 records received") + await(numberOfGeneratedMessages + " records received") .atMost(Duration.ofSeconds(30)) .pollInterval(Duration.ofSeconds(1)) - .until(() -> receivedRecords.size() >= 100); + .until(() -> receivedRecords.size() >= numberOfGeneratedMessages); await("Offsets committed") .atMost(Duration.ofSeconds(10)) @@ -97,13 +115,16 @@ abstract class GenericApplicationTests assertThatExceptionOfType(IllegalStateException.class) .isThrownBy(() -> endlessConsumer.exitStatus()) .describedAs("Consumer should still be running"); + + recordGenerator.assertBusinessLogic(); } @Test @SkipWhenErrorCannotBeGenerated(poisonPill = true) void commitsOffsetOfErrorForReprocessingOnDeserializationError() { - recordGenerator.generate(100, Set.of(77), Set.of(), messageSender); + int numberOfGeneratedMessages = + recordGenerator.generate(true, false, messageSender); await("Consumer failed") .atMost(Duration.ofSeconds(30)) @@ -123,7 +144,7 @@ abstract class GenericApplicationTests compareToCommitedOffsets(newOffsets); assertThat(receivedRecords.size()) .describedAs("Received not all sent events") - .isLessThan(100); + .isLessThan(numberOfGeneratedMessages); assertThatNoException() .describedAs("Consumer should not be running") @@ -131,13 +152,16 @@ abstract class GenericApplicationTests assertThat(endlessConsumer.exitStatus()) .describedAs("Consumer should have exited abnormally") .containsInstanceOf(RecordDeserializationException.class); + + recordGenerator.assertBusinessLogic(); } @Test @SkipWhenErrorCannotBeGenerated(logicError = true) void doesNotCommitOffsetsOnLogicError() { - recordGenerator.generate(100, Set.of(), Set.of(77), messageSender); + int numberOfGeneratedMessages = + recordGenerator.generate(false, true, messageSender); await("Consumer failed") .atMost(Duration.ofSeconds(30)) @@ -155,9 +179,6 @@ abstract class GenericApplicationTests checkSeenOffsetsForProgress(); compareToCommitedOffsets(oldOffsets); - assertThat(receivedRecords.size()) - .describedAs("Received not all sent events") - .isLessThan(100); assertThatNoException() .describedAs("Consumer should not be running") @@ -165,6 +186,8 @@ abstract class GenericApplicationTests assertThat(endlessConsumer.exitStatus()) .describedAs("Consumer should have exited abnormally") .containsInstanceOf(RuntimeException.class); + + recordGenerator.assertBusinessLogic(); } @@ -207,23 +230,29 @@ abstract class GenericApplicationTests void seekToEnd() { offsetConsumer.assign(partitions()); - offsetConsumer.seekToEnd(partitions()); partitions().forEach(tp -> { - // seekToEnd() works lazily: it only takes effect on poll()/position() Long offset = offsetConsumer.position(tp); log.info("New position for {}: {}", tp, offset); + Integer partition = tp.partition(); + StateDocument document = + stateRepository + .findById(partition.toString()) + .orElse(new StateDocument(partition)); + document.offset = offset; + stateRepository.save(document); }); - // The new positions must be commited! - offsetConsumer.commitSync(); offsetConsumer.unsubscribe(); } void doForCurrentOffsets(BiConsumer consumer) { - offsetConsumer.assign(partitions()); - partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp))); - offsetConsumer.unsubscribe(); + partitions().forEach(tp -> + { + String partition = Integer.toString(tp.partition()); + Optional offset = stateRepository.findById(partition).map(document -> document.offset); + consumer.accept(tp, offset.orElse(0l)); + }); } List partitions() @@ -238,12 +267,12 @@ abstract class GenericApplicationTests public interface RecordGenerator { - void generate( - Set poisonPills, - Set logicErrors, + int generate( + boolean poisonPills, + boolean logicErrors, Consumer> messageSender); - default boolean canGeneratePoisionPill() + default boolean canGeneratePoisonPill() { return true; } @@ -252,6 +281,11 @@ abstract class GenericApplicationTests { return true; } + + default void assertBusinessLogic() + { + log.debug("No business-logic to assert"); + } } void sendMessage(ProducerRecord record) @@ -298,6 +332,7 @@ abstract class GenericApplicationTests props.put("value.deserializer", BytesDeserializer.class.getName()); offsetConsumer = new KafkaConsumer<>(props); + mongoClient.getDatabase(mongoProperties.getDatabase()).drop(); seekToEnd(); oldOffsets = new HashMap<>(); @@ -310,14 +345,17 @@ abstract class GenericApplicationTests newOffsets.put(tp, offset - 1); }); - Consumer> captureOffsetAndExecuteTestHandler = - record -> + TestRecordHandler captureOffsetAndExecuteTestHandler = + new TestRecordHandler(recordHandler) { - newOffsets.put( - new TopicPartition(record.topic(), record.partition()), - record.offset()); - receivedRecords.add(record); - consumer.accept(record); + @Override + public void onNewRecord(ConsumerRecord record) + { + newOffsets.put( + new TopicPartition(record.topic(), record.partition()), + record.offset()); + receivedRecords.add(record); + } }; endlessConsumer = @@ -326,6 +364,7 @@ abstract class GenericApplicationTests properties.getClientId(), properties.getTopic(), kafkaConsumer, + rebalanceListener, captureOffsetAndExecuteTestHandler); endlessConsumer.start();