X-Git-Url: http://juplo.de/gitweb/?a=blobdiff_plain;f=src%2Ftest%2Fjava%2Fde%2Fjuplo%2Fkafka%2FApplicationTests.java;h=431431bbd605917ff4c960d646a45f28bcc39c10;hb=ecd63311e0b6af698185bcf1e085f2b3237bd264;hp=f5410934d3b85184d6ce89e03978f59ac3e0a8be;hpb=0742ee524d115b9e2969a66a52607be9eb5d699b;p=demos%2Fkafka%2Ftraining diff --git a/src/test/java/de/juplo/kafka/ApplicationTests.java b/src/test/java/de/juplo/kafka/ApplicationTests.java index f541093..431431b 100644 --- a/src/test/java/de/juplo/kafka/ApplicationTests.java +++ b/src/test/java/de/juplo/kafka/ApplicationTests.java @@ -6,13 +6,13 @@ import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.serialization.BytesDeserializer; -import org.apache.kafka.common.serialization.BytesSerializer; -import org.apache.kafka.common.serialization.LongSerializer; -import org.apache.kafka.common.serialization.StringSerializer; +import org.apache.kafka.common.errors.RecordDeserializationException; +import org.apache.kafka.common.serialization.*; import org.apache.kafka.common.utils.Bytes; import org.junit.jupiter.api.*; import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.autoconfigure.EnableAutoConfiguration; +import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo; import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer; import org.springframework.boot.test.context.TestConfiguration; import org.springframework.context.annotation.Bean; @@ -21,6 +21,7 @@ import org.springframework.kafka.test.context.EmbeddedKafka; import org.springframework.test.context.TestPropertySource; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import java.time.Clock; import java.time.Duration; import java.util.*; import java.util.concurrent.ExecutionException; @@ -33,7 +34,7 @@ import java.util.stream.IntStream; import static de.juplo.kafka.ApplicationTests.PARTITIONS; import static de.juplo.kafka.ApplicationTests.TOPIC; -import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.*; import static org.awaitility.Awaitility.*; @@ -42,8 +43,12 @@ import static org.awaitility.Awaitility.*; @TestPropertySource( properties = { "consumer.bootstrap-server=${spring.embedded.kafka.brokers}", - "consumer.topic=" + TOPIC }) + "consumer.topic=" + TOPIC, + "consumer.commit-interval=1s", + "spring.mongodb.embedded.version=4.4.13" }) @EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS) +@EnableAutoConfiguration +@AutoConfigureDataMongo @Slf4j class ApplicationTests { @@ -52,23 +57,27 @@ class ApplicationTests StringSerializer stringSerializer = new StringSerializer(); - LongSerializer longSerializer = new LongSerializer(); + @Autowired + Serializer valueSerializer; @Autowired KafkaProducer kafkaProducer; @Autowired KafkaConsumer kafkaConsumer; @Autowired - KafkaConsumer offsetConsumer; + PartitionStatisticsRepository partitionStatisticsRepository; @Autowired ApplicationProperties properties; @Autowired ExecutorService executor; + @Autowired + PartitionStatisticsRepository repository; Consumer> testHandler; EndlessConsumer endlessConsumer; Map oldOffsets; Map newOffsets; + Set> receivedRecords; /** Tests methods */ @@ -77,14 +86,11 @@ class ApplicationTests @Order(1) // << The poistion pill is not skipped. Hence, this test must run first void commitsCurrentOffsetsOnSuccess() throws ExecutionException, InterruptedException { - send100Messages(i -> new Bytes(longSerializer.serialize(TOPIC, i))); - - Set> received = new HashSet<>(); - testHandler = record -> received.add(record); + send100Messages(i -> new Bytes(valueSerializer.serialize(TOPIC, i))); await("100 records received") .atMost(Duration.ofSeconds(30)) - .until(() -> received.size() >= 100); + .until(() -> receivedRecords.size() >= 100); await("Offsets committed") .atMost(Duration.ofSeconds(10)) @@ -93,6 +99,10 @@ class ApplicationTests checkSeenOffsetsForProgress(); compareToCommitedOffsets(newOffsets); }); + + assertThatExceptionOfType(IllegalStateException.class) + .isThrownBy(() -> endlessConsumer.exitStatus()) + .describedAs("Consumer should still be running"); } @Test @@ -102,7 +112,7 @@ class ApplicationTests send100Messages(counter -> counter == 77 ? new Bytes(stringSerializer.serialize(TOPIC, "BOOM!")) - : new Bytes(longSerializer.serialize(TOPIC, counter))); + : new Bytes(valueSerializer.serialize(TOPIC, counter))); await("Consumer failed") .atMost(Duration.ofSeconds(30)) @@ -118,6 +128,16 @@ class ApplicationTests checkSeenOffsetsForProgress(); compareToCommitedOffsets(newOffsets); + assertThat(receivedRecords.size()) + .describedAs("Received not all sent events") + .isLessThan(100); + + assertThatNoException() + .describedAs("Consumer should not be running") + .isThrownBy(() -> endlessConsumer.exitStatus()); + assertThat(endlessConsumer.exitStatus()) + .describedAs("Consumer should have exited abnormally") + .containsInstanceOf(RecordDeserializationException.class); } @@ -159,9 +179,12 @@ class ApplicationTests void doForCurrentOffsets(BiConsumer consumer) { - offsetConsumer.assign(partitions()); - partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp))); - offsetConsumer.unsubscribe(); + partitions().forEach(tp -> + { + String partition = Integer.toString(tp.partition()); + Optional offset = partitionStatisticsRepository.findById(partition).map(document -> document.offset); + consumer.accept(tp, offset.orElse(0l)); + }); } List partitions() @@ -223,6 +246,7 @@ class ApplicationTests oldOffsets = new HashMap<>(); newOffsets = new HashMap<>(); + receivedRecords = new HashSet<>(); doForCurrentOffsets((tp, offset) -> { @@ -236,14 +260,18 @@ class ApplicationTests newOffsets.put( new TopicPartition(record.topic(), record.partition()), record.offset()); + receivedRecords.add(record); testHandler.accept(record); }; endlessConsumer = new EndlessConsumer<>( executor, + repository, properties.getClientId(), properties.getTopic(), + Clock.systemDefaultZone(), + properties.getCommitInterval(), kafkaConsumer, captureOffsetAndExecuteTestHandler); @@ -268,6 +296,12 @@ class ApplicationTests @Import(ApplicationConfiguration.class) public static class Configuration { + @Bean + Serializer serializer() + { + return new LongSerializer(); + } + @Bean KafkaProducer kafkaProducer(ApplicationProperties properties) { @@ -279,18 +313,5 @@ class ApplicationTests return new KafkaProducer<>(props); } - - @Bean - KafkaConsumer offsetConsumer(ApplicationProperties properties) - { - Properties props = new Properties(); - props.put("bootstrap.servers", properties.getBootstrapServer()); - props.put("client.id", "OFFSET-CONSUMER"); - props.put("group.id", properties.getGroupId()); - props.put("key.deserializer", BytesDeserializer.class.getName()); - props.put("value.deserializer", BytesDeserializer.class.getName()); - - return new KafkaConsumer<>(props); - } } }