Refaktorisierungen aus 'wordcount' nach 'stored-offsets' zurück portiert
[demos/kafka/training] / src / test / java / de / juplo / kafka / ApplicationTests.java
index 92074ff..a632a89 100644 (file)
@@ -11,6 +11,8 @@ import org.apache.kafka.common.serialization.*;
 import org.apache.kafka.common.utils.Bytes;
 import org.junit.jupiter.api.*;
 import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
+import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo;
 import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
 import org.springframework.boot.test.context.TestConfiguration;
 import org.springframework.context.annotation.Bean;
@@ -24,17 +26,14 @@ import java.util.*;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.function.BiConsumer;
-import java.util.function.Consumer;
 import java.util.function.Function;
 import java.util.stream.Collectors;
 import java.util.stream.IntStream;
 
 import static de.juplo.kafka.ApplicationTests.PARTITIONS;
 import static de.juplo.kafka.ApplicationTests.TOPIC;
-import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.*;
 import static org.awaitility.Awaitility.*;
-import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
-import static org.junit.jupiter.api.Assertions.assertThrows;
 
 
 @SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class)
@@ -42,8 +41,12 @@ import static org.junit.jupiter.api.Assertions.assertThrows;
 @TestPropertySource(
                properties = {
                                "consumer.bootstrap-server=${spring.embedded.kafka.brokers}",
-                               "consumer.topic=" + TOPIC })
+                               "consumer.topic=" + TOPIC,
+                               "consumer.commit-interval=1s",
+                               "spring.mongodb.embedded.version=4.4.13" })
 @EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
+@EnableAutoConfiguration
+@AutoConfigureDataMongo
 @Slf4j
 class ApplicationTests
 {
@@ -60,13 +63,18 @@ class ApplicationTests
        @Autowired
        KafkaConsumer<String, Long> kafkaConsumer;
        @Autowired
-       KafkaConsumer<Bytes, Bytes> offsetConsumer;
+       PartitionStatisticsRepository partitionStatisticsRepository;
        @Autowired
        ApplicationProperties properties;
        @Autowired
        ExecutorService executor;
+       @Autowired
+       PartitionStatisticsRepository repository;
+       @Autowired
+       KeyCountingRebalanceListener keyCountingRebalanceListener;
+       @Autowired
+       KeyCountingRecordHandler keyCountingRecordHandler;
 
-       Consumer<ConsumerRecord<String, Long>> testHandler;
        EndlessConsumer<String, Long> endlessConsumer;
        Map<TopicPartition, Long> oldOffsets;
        Map<TopicPartition, Long> newOffsets;
@@ -93,10 +101,9 @@ class ApplicationTests
                                        compareToCommitedOffsets(newOffsets);
                                });
 
-               assertThrows(
-                               IllegalStateException.class,
-                               () -> endlessConsumer.exitStatus(),
-                               "Consumer should still be running");
+               assertThatExceptionOfType(IllegalStateException.class)
+                               .isThrownBy(() -> endlessConsumer.exitStatus())
+                               .describedAs("Consumer should still be running");
        }
 
        @Test
@@ -126,9 +133,9 @@ class ApplicationTests
                                .describedAs("Received not all sent events")
                                .isLessThan(100);
 
-               assertDoesNotThrow(
-                               () -> endlessConsumer.exitStatus(),
-                               "Consumer should not be running");
+               assertThatNoException()
+                               .describedAs("Consumer should not be running")
+                               .isThrownBy(() -> endlessConsumer.exitStatus());
                assertThat(endlessConsumer.exitStatus())
                                .describedAs("Consumer should have exited abnormally")
                                .containsInstanceOf(RecordDeserializationException.class);
@@ -173,10 +180,13 @@ class ApplicationTests
 
        void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
        {
-               offsetConsumer.assign(partitions());
-               partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp)));
-               offsetConsumer.unsubscribe();
-       }
+               partitions().forEach(tp ->
+               {
+                       String partition = Integer.toString(tp.partition());
+                       Optional<Long> offset = partitionStatisticsRepository.findById(partition).map(document -> document.offset);
+                       consumer.accept(tp, offset.orElse(0l));
+               });
+               }
 
        List<TopicPartition> partitions()
        {
@@ -233,8 +243,6 @@ class ApplicationTests
        @BeforeEach
        public void init()
        {
-               testHandler = record -> {} ;
-
                oldOffsets = new HashMap<>();
                newOffsets = new HashMap<>();
                receivedRecords = new HashSet<>();
@@ -245,14 +253,16 @@ class ApplicationTests
                        newOffsets.put(tp, offset - 1);
                });
 
-               Consumer<ConsumerRecord<String, Long>> captureOffsetAndExecuteTestHandler =
-                               record ->
-                               {
-                                       newOffsets.put(
-                                                       new TopicPartition(record.topic(), record.partition()),
-                                                       record.offset());
-                                       receivedRecords.add(record);
-                                       testHandler.accept(record);
+               TestRecordHandler<String, Long> captureOffsetAndExecuteTestHandler =
+                               new TestRecordHandler<String, Long>(keyCountingRecordHandler) {
+                                       @Override
+                                       public void onNewRecord(ConsumerRecord<String, Long> record)
+                                       {
+                                               newOffsets.put(
+                                                               new TopicPartition(record.topic(), record.partition()),
+                                                               record.offset());
+                                               receivedRecords.add(record);
+                                       }
                                };
 
                endlessConsumer =
@@ -261,6 +271,7 @@ class ApplicationTests
                                                properties.getClientId(),
                                                properties.getTopic(),
                                                kafkaConsumer,
+                                               keyCountingRebalanceListener,
                                                captureOffsetAndExecuteTestHandler);
 
                endlessConsumer.start();
@@ -301,18 +312,5 @@ class ApplicationTests
 
                        return new KafkaProducer<>(props);
                }
-
-               @Bean
-               KafkaConsumer<Bytes, Bytes> offsetConsumer(ApplicationProperties properties)
-               {
-                       Properties props = new Properties();
-                       props.put("bootstrap.servers", properties.getBootstrapServer());
-                       props.put("client.id", "OFFSET-CONSUMER");
-                       props.put("group.id", properties.getGroupId());
-                       props.put("key.deserializer", BytesDeserializer.class.getName());
-                       props.put("value.deserializer", BytesDeserializer.class.getName());
-
-                       return new KafkaConsumer<>(props);
-               }
        }
 }