From: Kai Moritz Date: Wed, 17 Aug 2022 20:51:10 +0000 (+0200) Subject: ROT: Korrigierten/Verbesserten Test und Überarbeitetes Setup gemerged X-Git-Tag: sumup-adder---lvm-2-tage~10 X-Git-Url: https://juplo.de/gitweb/?a=commitdiff_plain;h=d2eb370acf1a2195c36421ffc471f67cb4a8e86e;hp=ecadcd52ab7fe117fef3450a27b9d6ecdf621716;p=demos%2Fkafka%2Ftraining ROT: Korrigierten/Verbesserten Test und Überarbeitetes Setup gemerged * Merge branch 'sumup-adder--ohne--stored-offsets' into sumup-adder. * In dem gemergten Branch ist es nicht wichtig, wann genau die Mongo-DB zwischen den Tests zurückgesetzt wird, da sie nur den Zustand des Consumers enthält. * Wenn die Offsets mit in der Mongo-DB gespeichert werden, ist es wesentlich, an zu welchem Zeitpunkt während der Test-Vorbereitung diese zurückgesetzt wird! * ROT: Der verbesserte/verschärfte Test deckt Fehler in der Test-Logik auf. --- diff --git a/README.sh b/README.sh index 2845ab1..05178a3 100755 --- a/README.sh +++ b/README.sh @@ -32,13 +32,15 @@ while ! [[ $(http 0:8080/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "Wait while ! [[ $(http 0:8081/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "Waiting for requests..."; sleep 1; done while ! [[ $(http 0:8082/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "Waiting for adder..."; sleep 1; done -echo 66 | http -v :8080/foo -echo 666 | http -v :8080/bar +docker-compose up -d peter ute franz beate klaus uschi -sleep 5 +http -v :8082/results +sleep 3 +http -v :8082/results -http -v :8082/state -http -v :8082/state/foo -http -v :8082/state/bar +docker-compose kill -s 9 adder -docker-compose logs adder +docker-compose up -d adder +sleep 3 +docker-compose kill -s 9 peter ute franz beate klaus uschi +http -v :8082/results diff --git a/docker-compose.yml b/docker-compose.yml index fec5bca..ba2566e 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -86,7 +86,7 @@ services: kafka-topics --bootstrap-server kafka:9092 --delete --if-exists --topic in kafka-topics --bootstrap-server kafka:9092 --delete --if-exists --topic out kafka-topics --bootstrap-server kafka:9092 --create --topic in --partitions 2 --replication-factor 3 --config min.insync.replicas=2 - kafka-topics --bootstrap-server kafka:9092 --create --topic out --partitions 1 --replication-factor 1 + kafka-topics --bootstrap-server kafka:9092 --create --topic out --partitions 2 --replication-factor 3 --config min.insync.replicas=2 kafka-topics --bootstrap-server kafka:9092 --describe --topic in kafka-topics --bootstrap-server kafka:9092 --describe --topic out " @@ -124,3 +124,64 @@ services: sumup.adder.client-id: adder spring.data.mongodb.uri: mongodb://juplo:training@mongo:27017 spring.data.mongodb.database: juplo + + peter: + image: juplo/toolbox + command: > + bash -c " + while [[ true ]]; + do + echo 666 | http -v gateway:8080/peter; + sleep 1; + done + " + ute: + image: juplo/toolbox + command: > + bash -c " + while [[ true ]]; + do + echo 666 | http -v gateway:8080/ute; + sleep 2; + done + " + franz: + image: juplo/toolbox + command: > + bash -c " + while [[ true ]]; + do + echo 666 | http -v gateway:8080/franz; + sleep 3; + done + " + beate: + image: juplo/toolbox + command: > + bash -c " + while [[ true ]]; + do + echo 666 | http -v gateway:8080/beate; + sleep 4; + done + " + klaus: + image: juplo/toolbox + command: > + bash -c " + while [[ true ]]; + do + echo 666 | http -v gateway:8080/klaus; + sleep 5; + done + " + uschi: + image: juplo/toolbox + command: > + bash -c " + while [[ true ]]; + do + echo 666 | http -v gateway:8080/uschi; + sleep 6; + done + " diff --git a/src/test/java/de/juplo/kafka/ApplicationTests.java b/src/test/java/de/juplo/kafka/ApplicationTests.java index 1336050..740c09c 100644 --- a/src/test/java/de/juplo/kafka/ApplicationTests.java +++ b/src/test/java/de/juplo/kafka/ApplicationTests.java @@ -1,5 +1,6 @@ package de.juplo.kafka; +import lombok.extern.slf4j.Slf4j; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.common.serialization.StringSerializer; import org.apache.kafka.common.utils.Bytes; @@ -13,10 +14,11 @@ import java.util.stream.IntStream; import static org.assertj.core.api.Assertions.assertThat; +@Slf4j public class ApplicationTests extends GenericApplicationTests { @Autowired - AdderResults results; + StateRepository stateRepository; public ApplicationTests() @@ -30,7 +32,7 @@ public class ApplicationTests extends GenericApplicationTests { ApplicationTests tests; - final int[] numbers = {1, 7, 3, 2, 33, 6, 11}; + final int[] numbers = {1, 77, 33, 2, 66, 666, 11}; final String[] dieWilden13 = IntStream .range(1, 14) @@ -57,40 +59,51 @@ public class ApplicationTests extends GenericApplicationTests seeräuber -> seeräuber, seeräuber -> new LinkedList())); - for (int i = 0; i < 33; i++) - { - String seeräuber = dieWilden13[i % 13]; - int number = numbers[i % 7]; - - Bytes key = new Bytes(stringSerializer.serialize(TOPIC, seeräuber)); + int number[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; + int message[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; + int next = 0; - for (int message = 1; message <= number; message++) + for (int pass = 0; pass < 333; pass++) + { + for (int i = 0; i<13; i++) { - Bytes value = new Bytes(stringSerializer.serialize(TOPIC, Integer.toString(message))); - send(key, value, logicErrors, messageSender); - } - send(key, calculateMessage, logicErrors, messageSender); + String seeräuber = dieWilden13[i]; + Bytes key = new Bytes(stringSerializer.serialize(TOPIC, seeräuber)); - state.get(seeräuber).add(new AdderResult(number, (number + 1) * number / 2)); + if (message[i] > number[i]) + { + send(key, calculateMessage, fail(logicErrors, pass, counter), messageSender); + state.get(seeräuber).add(new AdderResult(number[i], (number[i] + 1) * number[i] / 2)); + // Pick next number to calculate + number[i] = numbers[next++%numbers.length]; + message[i] = 1; + log.debug("Seeräuber {} will die Summe für {} berechnen", seeräuber, number[i]); + } + + Bytes value = new Bytes(stringSerializer.serialize(TOPIC, Integer.toString(message[i]++))); + send(key, value, fail(logicErrors, pass, counter), messageSender); + } } return counter; } + boolean fail (boolean logicErrors, int pass, int counter) + { + return logicErrors && pass > 300 && counter%77 == 0; + } + void send( Bytes key, Bytes value, - boolean logicErrors, + boolean fail, Consumer> messageSender) { counter++; - if (counter == 77) + if (fail) { - if (logicErrors) - { - value = new Bytes(stringSerializer.serialize(TOPIC, Integer.toString(-1))); - } + value = new Bytes(stringSerializer.serialize(TOPIC, Integer.toString(-1))); } messageSender.accept(new ProducerRecord<>(TOPIC, key, value)); @@ -105,20 +118,23 @@ public class ApplicationTests extends GenericApplicationTests @Override public void assertBusinessLogic() { - tests.results - .getState() - .values() - .stream() - .flatMap(map -> map.entrySet().stream()) - .forEach(entry -> - { - String user = entry.getKey(); - List resultsForUser = entry.getValue(); - - assertThat(state.get(user)) - .describedAs("Unexpected results for user {}", user) - .containsExactlyElementsOf(resultsForUser); - }); + for (int i=0; i + { + String user = entry.getKey(); + List resultsForUser = entry.getValue(); + + assertThat(state.get(user)) + .as("Unexpected results for user %s", user) + .containsExactlyElementsOf(resultsForUser); + }); + } } } } diff --git a/src/test/java/de/juplo/kafka/GenericApplicationTests.java b/src/test/java/de/juplo/kafka/GenericApplicationTests.java index 9a6f812..b019373 100644 --- a/src/test/java/de/juplo/kafka/GenericApplicationTests.java +++ b/src/test/java/de/juplo/kafka/GenericApplicationTests.java @@ -1,5 +1,6 @@ package de.juplo.kafka; +import com.mongodb.client.MongoClient; import lombok.extern.slf4j.Slf4j; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.KafkaConsumer; @@ -12,6 +13,7 @@ import org.apache.kafka.common.utils.Bytes; import org.junit.jupiter.api.*; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.autoconfigure.EnableAutoConfiguration; +import org.springframework.boot.autoconfigure.mongo.MongoProperties; import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo; import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer; import org.springframework.boot.test.context.TestConfiguration; @@ -39,7 +41,7 @@ import static org.awaitility.Awaitility.*; properties = { "sumup.adder.bootstrap-server=${spring.embedded.kafka.brokers}", "sumup.adder.topic=" + TOPIC, - "sumup.adder.commit-interval=1s", + "sumup.adder.commit-interval=500ms", "spring.mongodb.embedded.version=4.4.13" }) @EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS) @EnableAutoConfiguration @@ -62,6 +64,10 @@ abstract class GenericApplicationTests @Autowired StateRepository stateRepository; @Autowired + MongoClient mongoClient; + @Autowired + MongoProperties mongoProperties; + @Autowired PollIntervalAwareConsumerRebalanceListener rebalanceListener; @Autowired RecordHandler recordHandler; @@ -173,9 +179,6 @@ abstract class GenericApplicationTests checkSeenOffsetsForProgress(); compareToCommitedOffsets(oldOffsets); - assertThat(receivedRecords.size()) - .describedAs("Received not all sent events") - .isLessThan(numberOfGeneratedMessages); assertThatNoException() .describedAs("Consumer should not be running") @@ -329,6 +332,7 @@ abstract class GenericApplicationTests props.put("value.deserializer", BytesDeserializer.class.getName()); offsetConsumer = new KafkaConsumer<>(props); + mongoClient.getDatabase(mongoProperties.getDatabase()).drop(); seekToEnd(); oldOffsets = new HashMap<>();