X-Git-Url: http://juplo.de/gitweb/?a=blobdiff_plain;f=src%2Ftest%2Fjava%2Fde%2Fjuplo%2Fkafka%2FApplicationTests.java;h=6a037eb7136ec1353e9fa11bb5dbee1c5ce63688;hb=5c4b0d9c8e554fc497a6e7c1828081d64a46aa52;hp=a632a89c41f936b88f1f23742e78183279138ef3;hpb=b3777fba0ae679d9e2c9d36626fa208a952f83e8;p=demos%2Fkafka%2Ftraining diff --git a/src/test/java/de/juplo/kafka/ApplicationTests.java b/src/test/java/de/juplo/kafka/ApplicationTests.java index a632a89..6a037eb 100644 --- a/src/test/java/de/juplo/kafka/ApplicationTests.java +++ b/src/test/java/de/juplo/kafka/ApplicationTests.java @@ -1,316 +1,154 @@ package de.juplo.kafka; import lombok.extern.slf4j.Slf4j; -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.apache.kafka.clients.consumer.KafkaConsumer; -import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.ProducerRecord; -import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.errors.RecordDeserializationException; -import org.apache.kafka.common.serialization.*; +import org.apache.kafka.common.serialization.StringSerializer; import org.apache.kafka.common.utils.Bytes; -import org.junit.jupiter.api.*; import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.autoconfigure.EnableAutoConfiguration; -import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo; -import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer; -import org.springframework.boot.test.context.TestConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Import; -import org.springframework.kafka.test.context.EmbeddedKafka; -import org.springframework.test.context.TestPropertySource; -import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; -import java.time.Duration; import java.util.*; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.function.BiConsumer; -import java.util.function.Function; +import java.util.function.Consumer; import java.util.stream.Collectors; import java.util.stream.IntStream; -import static de.juplo.kafka.ApplicationTests.PARTITIONS; -import static de.juplo.kafka.ApplicationTests.TOPIC; -import static org.assertj.core.api.Assertions.*; -import static org.awaitility.Awaitility.*; +import static org.assertj.core.api.Assertions.assertThat; -@SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class) -@TestMethodOrder(MethodOrderer.OrderAnnotation.class) -@TestPropertySource( - properties = { - "consumer.bootstrap-server=${spring.embedded.kafka.brokers}", - "consumer.topic=" + TOPIC, - "consumer.commit-interval=1s", - "spring.mongodb.embedded.version=4.4.13" }) -@EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS) -@EnableAutoConfiguration -@AutoConfigureDataMongo @Slf4j -class ApplicationTests +public class ApplicationTests extends GenericApplicationTests { - public static final String TOPIC = "FOO"; - public static final int PARTITIONS = 10; - - - StringSerializer stringSerializer = new StringSerializer(); - - @Autowired - Serializer valueSerializer; - @Autowired - KafkaProducer kafkaProducer; - @Autowired - KafkaConsumer kafkaConsumer; - @Autowired - PartitionStatisticsRepository partitionStatisticsRepository; - @Autowired - ApplicationProperties properties; - @Autowired - ExecutorService executor; - @Autowired - PartitionStatisticsRepository repository; - @Autowired - KeyCountingRebalanceListener keyCountingRebalanceListener; - @Autowired - KeyCountingRecordHandler keyCountingRecordHandler; - - EndlessConsumer endlessConsumer; - Map oldOffsets; - Map newOffsets; - Set> receivedRecords; - - - /** Tests methods */ - - @Test - @Order(1) // << The poistion pill is not skipped. Hence, this test must run first - void commitsCurrentOffsetsOnSuccess() throws ExecutionException, InterruptedException - { - send100Messages(i -> new Bytes(valueSerializer.serialize(TOPIC, i))); - - await("100 records received") - .atMost(Duration.ofSeconds(30)) - .until(() -> receivedRecords.size() >= 100); - - await("Offsets committed") - .atMost(Duration.ofSeconds(10)) - .untilAsserted(() -> - { - checkSeenOffsetsForProgress(); - compareToCommitedOffsets(newOffsets); - }); - - assertThatExceptionOfType(IllegalStateException.class) - .isThrownBy(() -> endlessConsumer.exitStatus()) - .describedAs("Consumer should still be running"); - } - - @Test - @Order(2) - void commitsOffsetOfErrorForReprocessingOnError() - { - send100Messages(counter -> - counter == 77 - ? new Bytes(stringSerializer.serialize(TOPIC, "BOOM!")) - : new Bytes(valueSerializer.serialize(TOPIC, counter))); - - await("Consumer failed") - .atMost(Duration.ofSeconds(30)) - .until(() -> !endlessConsumer.running()); - - checkSeenOffsetsForProgress(); - compareToCommitedOffsets(newOffsets); - - endlessConsumer.start(); - await("Consumer failed") - .atMost(Duration.ofSeconds(30)) - .until(() -> !endlessConsumer.running()); - - checkSeenOffsetsForProgress(); - compareToCommitedOffsets(newOffsets); - assertThat(receivedRecords.size()) - .describedAs("Received not all sent events") - .isLessThan(100); - - assertThatNoException() - .describedAs("Consumer should not be running") - .isThrownBy(() -> endlessConsumer.exitStatus()); - assertThat(endlessConsumer.exitStatus()) - .describedAs("Consumer should have exited abnormally") - .containsInstanceOf(RecordDeserializationException.class); - } - - - /** Helper methods for the verification of expectations */ - - void compareToCommitedOffsets(Map offsetsToCheck) - { - doForCurrentOffsets((tp, offset) -> - { - Long expected = offsetsToCheck.get(tp) + 1; - log.debug("Checking, if the offset for {} is {}", tp, expected); - assertThat(offset) - .describedAs("Committed offset corresponds to the offset of the consumer") - .isEqualTo(expected); - }); - } - - void checkSeenOffsetsForProgress() - { - // Be sure, that some messages were consumed...! - Set withProgress = new HashSet<>(); - partitions().forEach(tp -> - { - Long oldOffset = oldOffsets.get(tp); - Long newOffset = newOffsets.get(tp); - if (!oldOffset.equals(newOffset)) - { - log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset); - withProgress.add(tp); - } - }); - assertThat(withProgress) - .describedAs("Some offsets must have changed, compared to the old offset-positions") - .isNotEmpty(); - } - - - /** Helper methods for setting up and running the tests */ - - void doForCurrentOffsets(BiConsumer consumer) - { - partitions().forEach(tp -> - { - String partition = Integer.toString(tp.partition()); - Optional offset = partitionStatisticsRepository.findById(partition).map(document -> document.offset); - consumer.accept(tp, offset.orElse(0l)); - }); - } - - List partitions() - { - return - IntStream - .range(0, PARTITIONS) - .mapToObj(partition -> new TopicPartition(TOPIC, partition)) - .collect(Collectors.toList()); - } - - - void send100Messages(Function messageGenerator) - { - long i = 0; - - for (int partition = 0; partition < 10; partition++) - { - for (int key = 0; key < 10; key++) - { - Bytes value = messageGenerator.apply(++i); - - ProducerRecord record = - new ProducerRecord<>( - TOPIC, - partition, - Integer.toString(key%2), - value); - - kafkaProducer.send(record, (metadata, e) -> - { - if (metadata != null) - { - log.debug( - "{}|{} - {}={}", - metadata.partition(), - metadata.offset(), - record.key(), - record.value()); - } - else - { - log.warn( - "Exception for {}={}: {}", - record.key(), - record.value(), - e.toString()); - } - }); - } - } - } - - - @BeforeEach - public void init() - { - oldOffsets = new HashMap<>(); - newOffsets = new HashMap<>(); - receivedRecords = new HashSet<>(); - - doForCurrentOffsets((tp, offset) -> - { - oldOffsets.put(tp, offset - 1); - newOffsets.put(tp, offset - 1); - }); - - TestRecordHandler captureOffsetAndExecuteTestHandler = - new TestRecordHandler(keyCountingRecordHandler) { - @Override - public void onNewRecord(ConsumerRecord record) - { - newOffsets.put( - new TopicPartition(record.topic(), record.partition()), - record.offset()); - receivedRecords.add(record); - } - }; - - endlessConsumer = - new EndlessConsumer<>( - executor, - properties.getClientId(), - properties.getTopic(), - kafkaConsumer, - keyCountingRebalanceListener, - captureOffsetAndExecuteTestHandler); - - endlessConsumer.start(); - } - - @AfterEach - public void deinit() - { - try - { - endlessConsumer.stop(); - } - catch (Exception e) - { - log.info("Exception while stopping the consumer: {}", e.toString()); - } - } - - - @TestConfiguration - @Import(ApplicationConfiguration.class) - public static class Configuration - { - @Bean - Serializer serializer() - { - return new LongSerializer(); - } - - @Bean - KafkaProducer kafkaProducer(ApplicationProperties properties) - { - Properties props = new Properties(); - props.put("bootstrap.servers", properties.getBootstrapServer()); - props.put("linger.ms", 100); - props.put("key.serializer", StringSerializer.class.getName()); - props.put("value.serializer", BytesSerializer.class.getName()); - - return new KafkaProducer<>(props); - } - } + @Autowired + StateRepository stateRepository; + + + public ApplicationTests() + { + super(new ApplicationTestRecrodGenerator()); + ((ApplicationTestRecrodGenerator)recordGenerator).tests = this; + } + + + static class ApplicationTestRecrodGenerator implements RecordGenerator + { + ApplicationTests tests; + + final int[] numbers = {1, 77, 33, 2, 66, 666, 11}; + final String[] dieWilden13 = + IntStream + .range(1, 14) + .mapToObj(i -> "seeräuber-" + i) + .toArray(i -> new String[i]); + final StringSerializer stringSerializer = new StringSerializer(); + final Bytes calculateMessage = new Bytes(stringSerializer.serialize(TOPIC, "CALCULATE")); + + int counter = 0; + + Map> state; + + @Override + public int generate( + boolean poisonPills, + boolean logicErrors, + Consumer> messageSender) + { + counter = 0; + state = + Arrays + .stream(dieWilden13) + .collect(Collectors.toMap( + seeräuber -> seeräuber, + seeräuber -> new LinkedList())); + + int number[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; + int message[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; + int next = 0; + + for (int pass = 0; pass < 333; pass++) + { + for (int i = 0; i<13; i++) + { + String seeräuber = dieWilden13[i]; + Bytes key = new Bytes(stringSerializer.serialize(TOPIC, seeräuber)); + + if (message[i] > number[i]) + { + send(key, calculateMessage, fail(logicErrors, pass, counter), messageSender); + state.get(seeräuber).add(new AdderResult(number[i], (number[i] + 1) * number[i] / 2)); + // Pick next number to calculate + number[i] = numbers[next++%numbers.length]; + message[i] = 1; + log.debug("Seeräuber {} will die Summe für {} berechnen", seeräuber, number[i]); + } + + Bytes value = new Bytes(stringSerializer.serialize(TOPIC, Integer.toString(message[i]++))); + send(key, value, fail(logicErrors, pass, counter), messageSender); + } + } + + return counter; + } + + boolean fail (boolean logicErrors, int pass, int counter) + { + return logicErrors && pass > 300 && counter%77 == 0; + } + + void send( + Bytes key, + Bytes value, + boolean fail, + Consumer> messageSender) + { + counter++; + + if (fail) + { + value = new Bytes(stringSerializer.serialize(TOPIC, Integer.toString(-1))); + } + + messageSender.accept(new ProducerRecord<>(TOPIC, key, value)); + } + + @Override + public boolean canGeneratePoisonPill() + { + return false; + } + + @Override + public void assertBusinessLogic() + { + for (int i=0; i + { + String user = entry.getKey(); + List resultsForUser = entry.getValue(); + + for (int j=0; j < resultsForUser.size(); j++) + { + if (!(j < state.get(user).size())) + { + break; + } + + assertThat(resultsForUser.get(j)) + .as("Unexpected results calculation %d of user %s", j, user) + .isEqualTo(state.get(user).get(j)); + } + + assertThat(state.get(user)) + .as("More results calculated for user %s as expected", user) + .containsAll(resultsForUser); + }); + } + } + } }