X-Git-Url: https://juplo.de/gitweb/?a=blobdiff_plain;f=src%2Ftest%2Fjava%2Fde%2Fjuplo%2Fkafka%2FApplicationTests.java;fp=src%2Ftest%2Fjava%2Fde%2Fjuplo%2Fkafka%2FApplicationTests.java;h=4ddf8a909c509a336180aaa670f4e54dda06b084;hb=c9d7601fc551069cd3a77da06a6333f22101a8a0;hp=5285145ba2dba903dc79bac2df10b7669217d0ef;hpb=3e6248a8f09479abc38cf9396602ec2e26e473c5;p=demos%2Fkafka%2Ftraining diff --git a/src/test/java/de/juplo/kafka/ApplicationTests.java b/src/test/java/de/juplo/kafka/ApplicationTests.java index 5285145..4ddf8a9 100644 --- a/src/test/java/de/juplo/kafka/ApplicationTests.java +++ b/src/test/java/de/juplo/kafka/ApplicationTests.java @@ -1,320 +1,84 @@ package de.juplo.kafka; -import lombok.extern.slf4j.Slf4j; -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.apache.kafka.clients.consumer.KafkaConsumer; -import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.ProducerRecord; -import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.serialization.*; +import org.apache.kafka.common.serialization.StringSerializer; import org.apache.kafka.common.utils.Bytes; -import org.junit.jupiter.api.*; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.autoconfigure.EnableAutoConfiguration; -import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo; -import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer; -import org.springframework.boot.test.context.TestConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Import; -import org.springframework.kafka.test.context.EmbeddedKafka; -import org.springframework.test.context.TestPropertySource; -import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; -import java.time.Duration; -import java.util.*; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.function.BiConsumer; -import java.util.stream.Collectors; +import java.util.function.Consumer; import java.util.stream.IntStream; -import static de.juplo.kafka.ApplicationTests.PARTITIONS; -import static de.juplo.kafka.ApplicationTests.TOPIC; -import static org.assertj.core.api.Assertions.*; -import static org.awaitility.Awaitility.*; - -@SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class) -@TestMethodOrder(MethodOrderer.OrderAnnotation.class) -@TestPropertySource( - properties = { - "sumup.adder.bootstrap-server=${spring.embedded.kafka.brokers}", - "sumup.adder.topic=" + TOPIC, - "sumup.adder.commit-interval=1s", - "spring.mongodb.embedded.version=4.4.13" }) -@EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS) -@EnableAutoConfiguration -@AutoConfigureDataMongo -@Slf4j -class ApplicationTests +public class ApplicationTests extends GenericApplicationTests { - public static final String TOPIC = "FOO"; - public static final int PARTITIONS = 10; - - - StringSerializer stringSerializer = new StringSerializer(); - - @Autowired - Serializer valueSerializer; - @Autowired - KafkaProducer kafkaProducer; - @Autowired - KafkaConsumer kafkaConsumer; - @Autowired - KafkaConsumer offsetConsumer; - @Autowired - PartitionStatisticsRepository partitionStatisticsRepository; - @Autowired - ApplicationProperties properties; - @Autowired - ExecutorService executor; - @Autowired - PartitionStatisticsRepository repository; - @Autowired - AdderRebalanceListener adderRebalanceListener; - @Autowired - AdderRecordHandler adderRecordHandler; - - EndlessConsumer endlessConsumer; - Map oldOffsets; - Map newOffsets; - Set> receivedRecords; - - - /** Tests methods */ - - @Test - @Disabled("Vorübergehend deaktivert, bis der Testfall angepasst ist") - void commitsCurrentOffsetsOnSuccess() throws ExecutionException, InterruptedException - { - send100Messages((partition, key, counter) -> - { - Bytes value = new Bytes(valueSerializer.serialize(TOPIC, counter)); - return new ProducerRecord<>(TOPIC, partition, key, value); - }); - - await("100 records received") - .atMost(Duration.ofSeconds(30)) - .pollInterval(Duration.ofSeconds(1)) - .until(() -> receivedRecords.size() >= 100); - - await("Offsets committed") - .atMost(Duration.ofSeconds(10)) - .pollInterval(Duration.ofSeconds(1)) - .untilAsserted(() -> - { - checkSeenOffsetsForProgress(); - compareToCommitedOffsets(newOffsets); - }); - - assertThatExceptionOfType(IllegalStateException.class) - .isThrownBy(() -> endlessConsumer.exitStatus()) - .describedAs("Consumer should still be running"); - } - - - /** Helper methods for the verification of expectations */ - - void compareToCommitedOffsets(Map offsetsToCheck) - { - doForCurrentOffsets((tp, offset) -> - { - Long expected = offsetsToCheck.get(tp) + 1; - log.debug("Checking, if the offset for {} is {}", tp, expected); - assertThat(offset) - .describedAs("Committed offset corresponds to the offset of the consumer") - .isEqualTo(expected); - }); - } - - void checkSeenOffsetsForProgress() - { - // Be sure, that some messages were consumed...! - Set withProgress = new HashSet<>(); - partitions().forEach(tp -> - { - Long oldOffset = oldOffsets.get(tp) + 1; - Long newOffset = newOffsets.get(tp) + 1; - if (!oldOffset.equals(newOffset)) - { - log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset); - withProgress.add(tp); - } - }); - assertThat(withProgress) - .describedAs("Some offsets must have changed, compared to the old offset-positions") - .isNotEmpty(); - } - - - /** Helper methods for setting up and running the tests */ - - void seekToEnd() - { - offsetConsumer.assign(partitions()); - partitions().forEach(tp -> - { - Long offset = offsetConsumer.position(tp); - log.info("New position for {}: {}", tp, offset); - Integer partition = tp.partition(); - StateDocument document = - partitionStatisticsRepository - .findById(partition.toString()) - .orElse(new StateDocument(partition)); - document.offset = offset; - partitionStatisticsRepository.save(document); - }); - offsetConsumer.unsubscribe(); - } - - void doForCurrentOffsets(BiConsumer consumer) - { - partitions().forEach(tp -> - { - String partition = Integer.toString(tp.partition()); - Optional offset = partitionStatisticsRepository.findById(partition).map(document -> document.offset); - consumer.accept(tp, offset.orElse(0l)); - }); - } - - List partitions() - { - return - IntStream - .range(0, PARTITIONS) - .mapToObj(partition -> new TopicPartition(TOPIC, partition)) - .collect(Collectors.toList()); - } - - - public interface RecordGenerator - { - public ProducerRecord generate(int partition, String key, long counter); - } - - void send100Messages(RecordGenerator recordGenerator) - { - long i = 0; - - for (int partition = 0; partition < 10; partition++) - { - for (int key = 0; key < 10; key++) - { - ProducerRecord record = - recordGenerator.generate(partition, Integer.toString(partition*10+key%2), ++i); - - kafkaProducer.send(record, (metadata, e) -> - { - if (metadata != null) - { - log.debug( - "{}|{} - {}={}", - metadata.partition(), - metadata.offset(), - record.key(), - record.value()); - } - else - { - log.warn( - "Exception for {}={}: {}", - record.key(), - record.value(), - e.toString()); - } - }); - } - } - } - - - @BeforeEach - public void init() - { - seekToEnd(); - - oldOffsets = new HashMap<>(); - newOffsets = new HashMap<>(); - receivedRecords = new HashSet<>(); - - doForCurrentOffsets((tp, offset) -> - { - oldOffsets.put(tp, offset - 1); - newOffsets.put(tp, offset - 1); - }); - - TestRecordHandler captureOffsetAndExecuteTestHandler = - new TestRecordHandler(adderRecordHandler) { - @Override - public void onNewRecord(ConsumerRecord record) - { - newOffsets.put( - new TopicPartition(record.topic(), record.partition()), - record.offset()); - receivedRecords.add(record); - } - }; - - endlessConsumer = - new EndlessConsumer<>( - executor, - properties.getClientId(), - properties.getTopic(), - kafkaConsumer, - adderRebalanceListener, - captureOffsetAndExecuteTestHandler); - - endlessConsumer.start(); - } - - @AfterEach - public void deinit() - { - try - { - endlessConsumer.stop(); - } - catch (Exception e) - { - log.info("Exception while stopping the consumer: {}", e.toString()); - } - } - - - @TestConfiguration - @Import(ApplicationConfiguration.class) - public static class Configuration - { - @Bean - Serializer serializer() - { - return new LongSerializer(); - } - - @Bean - KafkaProducer kafkaProducer(ApplicationProperties properties) - { - Properties props = new Properties(); - props.put("bootstrap.servers", properties.getBootstrapServer()); - props.put("linger.ms", 100); - props.put("key.serializer", StringSerializer.class.getName()); - props.put("value.serializer", BytesSerializer.class.getName()); - - return new KafkaProducer<>(props); - } - - @Bean - KafkaConsumer offsetConsumer(ApplicationProperties properties) - { - Properties props = new Properties(); - props.put("bootstrap.servers", properties.getBootstrapServer()); - props.put("client.id", "OFFSET-CONSUMER"); - props.put("enable.auto.commit", false); - props.put("auto.offset.reset", "latest"); - props.put("key.deserializer", BytesDeserializer.class.getName()); - props.put("value.deserializer", BytesDeserializer.class.getName()); - - return new KafkaConsumer<>(props); - } - } + public ApplicationTests() + { + super( + new RecordGenerator() + { + final int[] numbers = { 1, 7, 3, 2, 33, 6, 11 }; + final String[] dieWilden13 = + IntStream + .range(1,14) + .mapToObj(i -> "seeräuber-" + i) + .toArray(i -> new String[i]); + final StringSerializer stringSerializer = new StringSerializer(); + final Bytes startMessage = new Bytes(stringSerializer.serialize(TOPIC, "START")); + final Bytes endMessage = new Bytes(stringSerializer.serialize(TOPIC, "END")); + + int counter = 0; + + + @Override + public int generate( + boolean poisonPills, + boolean logicErrors, + Consumer> messageSender) + { + counter = 0; + + for (int i = 0; i < 33; i++) + { + String seeräuber = dieWilden13[i%13]; + int number = numbers[i%7]; + + Bytes key = new Bytes(stringSerializer.serialize(TOPIC, seeräuber)); + + send(key, startMessage, logicErrors, messageSender); + for (int message = 1; message <= number; message++) + { + Bytes value = new Bytes(stringSerializer.serialize(TOPIC, Integer.toString(message))); + send(key, value, logicErrors, messageSender); + } + send(key, endMessage, logicErrors, messageSender); + } + + return counter; + } + + void send( + Bytes key, + Bytes value, + boolean logicErrors, + Consumer> messageSender) + { + counter++; + + if (counter == 77) + { + if (logicErrors) + { + value = value.equals(startMessage) ? endMessage : startMessage; + } + } + + messageSender.accept(new ProducerRecord<>(TOPIC, key, value)); + } + + @Override + public boolean canGeneratePoisonPill() + { + return false; + } + }); + } }