kafka-topics --bootstrap-server kafka:9092 --delete --if-exists --topic in
kafka-topics --bootstrap-server kafka:9092 --delete --if-exists --topic out
kafka-topics --bootstrap-server kafka:9092 --create --topic in --partitions 2 --replication-factor 3 --config min.insync.replicas=2
- kafka-topics --bootstrap-server kafka:9092 --create --topic out --partitions 1 --replication-factor 1
+ kafka-topics --bootstrap-server kafka:9092 --create --topic out --partitions 2 --replication-factor 3 --config min.insync.replicas=2
kafka-topics --bootstrap-server kafka:9092 --describe --topic in
kafka-topics --bootstrap-server kafka:9092 --describe --topic out
"
sumup.adder.client-id: adder
spring.data.mongodb.uri: mongodb://juplo:training@mongo:27017
spring.data.mongodb.database: juplo
+
+ peter:
+ image: juplo/toolbox
+ command: >
+ bash -c "
+ while [[ true ]];
+ do
+ echo 666 | http -v gateway:8080/peter;
+ sleep 1;
+ done
+ "
+ ute:
+ image: juplo/toolbox
+ command: >
+ bash -c "
+ while [[ true ]];
+ do
+ echo 666 | http -v gateway:8080/ute;
+ sleep 2;
+ done
+ "
+ franz:
+ image: juplo/toolbox
+ command: >
+ bash -c "
+ while [[ true ]];
+ do
+ echo 666 | http -v gateway:8080/franz;
+ sleep 3;
+ done
+ "
+ beate:
+ image: juplo/toolbox
+ command: >
+ bash -c "
+ while [[ true ]];
+ do
+ echo 666 | http -v gateway:8080/beate;
+ sleep 4;
+ done
+ "
+ klaus:
+ image: juplo/toolbox
+ command: >
+ bash -c "
+ while [[ true ]];
+ do
+ echo 666 | http -v gateway:8080/klaus;
+ sleep 5;
+ done
+ "
+ uschi:
+ image: juplo/toolbox
+ command: >
+ bash -c "
+ while [[ true ]];
+ do
+ echo 666 | http -v gateway:8080/uschi;
+ sleep 6;
+ done
+ "
package de.juplo.kafka;
+import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.StringSerializer;
import org.apache.kafka.common.utils.Bytes;
import static org.assertj.core.api.Assertions.assertThat;
+@Slf4j
public class ApplicationTests extends GenericApplicationTests<String, String>
{
@Autowired
- AdderResults results;
+ StateRepository stateRepository;
public ApplicationTests()
{
ApplicationTests tests;
- final int[] numbers = {1, 7, 3, 2, 33, 6, 11};
+ final int[] numbers = {1, 77, 33, 2, 66, 666, 11};
final String[] dieWilden13 =
IntStream
.range(1, 14)
seeräuber -> seeräuber,
seeräuber -> new LinkedList()));
- for (int i = 0; i < 33; i++)
- {
- String seeräuber = dieWilden13[i % 13];
- int number = numbers[i % 7];
-
- Bytes key = new Bytes(stringSerializer.serialize(TOPIC, seeräuber));
+ int number[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
+ int message[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
+ int next = 0;
- for (int message = 1; message <= number; message++)
+ for (int pass = 0; pass < 333; pass++)
+ {
+ for (int i = 0; i<13; i++)
{
- Bytes value = new Bytes(stringSerializer.serialize(TOPIC, Integer.toString(message)));
- send(key, value, logicErrors, messageSender);
- }
- send(key, calculateMessage, logicErrors, messageSender);
+ String seeräuber = dieWilden13[i];
+ Bytes key = new Bytes(stringSerializer.serialize(TOPIC, seeräuber));
- state.get(seeräuber).add(new AdderResult(number, (number + 1) * number / 2));
+ if (message[i] > number[i])
+ {
+ send(key, calculateMessage, fail(logicErrors, pass, counter), messageSender);
+ state.get(seeräuber).add(new AdderResult(number[i], (number[i] + 1) * number[i] / 2));
+ // Pick next number to calculate
+ number[i] = numbers[next++%numbers.length];
+ message[i] = 1;
+ log.debug("Seeräuber {} will die Summe für {} berechnen", seeräuber, number[i]);
+ }
+
+ Bytes value = new Bytes(stringSerializer.serialize(TOPIC, Integer.toString(message[i]++)));
+ send(key, value, fail(logicErrors, pass, counter), messageSender);
+ }
}
return counter;
}
+ boolean fail (boolean logicErrors, int pass, int counter)
+ {
+ return logicErrors && pass > 300 && counter%77 == 0;
+ }
+
void send(
Bytes key,
Bytes value,
- boolean logicErrors,
+ boolean fail,
Consumer<ProducerRecord<Bytes, Bytes>> messageSender)
{
counter++;
- if (counter == 77)
+ if (fail)
{
- if (logicErrors)
- {
- value = new Bytes(stringSerializer.serialize(TOPIC, Integer.toString(-1)));
- }
+ value = new Bytes(stringSerializer.serialize(TOPIC, Integer.toString(-1)));
}
messageSender.accept(new ProducerRecord<>(TOPIC, key, value));
@Override
public void assertBusinessLogic()
{
- tests.results
- .getState()
- .values()
- .stream()
- .flatMap(map -> map.entrySet().stream())
- .forEach(entry ->
- {
- String user = entry.getKey();
- List<AdderResult> resultsForUser = entry.getValue();
-
- assertThat(state.get(user))
- .describedAs("Unexpected results for user {}", user)
- .containsExactlyElementsOf(resultsForUser);
- });
+ for (int i=0; i<PARTITIONS; i++)
+ {
+ StateDocument stateDocument =
+ tests.stateRepository.findById(Integer.toString(i)).get();
+
+ stateDocument
+ .results.entrySet().stream()
+ .forEach(entry ->
+ {
+ String user = entry.getKey();
+ List<AdderResult> resultsForUser = entry.getValue();
+
+ assertThat(state.get(user))
+ .as("Unexpected results for user %s", user)
+ .containsExactlyElementsOf(resultsForUser);
+ });
+ }
}
}
}
package de.juplo.kafka;
+import com.mongodb.client.MongoClient;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.junit.jupiter.api.*;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
+import org.springframework.boot.autoconfigure.mongo.MongoProperties;
import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo;
import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
import org.springframework.boot.test.context.TestConfiguration;
properties = {
"sumup.adder.bootstrap-server=${spring.embedded.kafka.brokers}",
"sumup.adder.topic=" + TOPIC,
- "sumup.adder.commit-interval=1s",
+ "sumup.adder.commit-interval=500ms",
"spring.mongodb.embedded.version=4.4.13" })
@EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
@EnableAutoConfiguration
@Autowired
StateRepository stateRepository;
@Autowired
+ MongoClient mongoClient;
+ @Autowired
+ MongoProperties mongoProperties;
+ @Autowired
PollIntervalAwareConsumerRebalanceListener rebalanceListener;
@Autowired
RecordHandler<K, V> recordHandler;
checkSeenOffsetsForProgress();
compareToCommitedOffsets(oldOffsets);
- assertThat(receivedRecords.size())
- .describedAs("Received not all sent events")
- .isLessThan(numberOfGeneratedMessages);
assertThatNoException()
.describedAs("Consumer should not be running")
props.put("value.deserializer", BytesDeserializer.class.getName());
offsetConsumer = new KafkaConsumer<>(props);
+ mongoClient.getDatabase(mongoProperties.getDatabase()).drop();
seekToEnd();
oldOffsets = new HashMap<>();