package de.juplo.kafka;
import lombok.extern.slf4j.Slf4j;
-import org.apache.kafka.clients.consumer.ConsumerRecord;
-import org.apache.kafka.clients.consumer.KafkaConsumer;
-import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
-import org.apache.kafka.common.TopicPartition;
-import org.apache.kafka.common.serialization.*;
+import org.apache.kafka.common.serialization.StringSerializer;
import org.apache.kafka.common.utils.Bytes;
-import org.junit.jupiter.api.*;
import org.springframework.beans.factory.annotation.Autowired;
-import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
-import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo;
-import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
-import org.springframework.boot.test.context.TestConfiguration;
-import org.springframework.context.annotation.Bean;
-import org.springframework.context.annotation.Import;
-import org.springframework.kafka.test.context.EmbeddedKafka;
-import org.springframework.test.context.TestPropertySource;
-import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
-import java.time.Clock;
-import java.time.Duration;
import java.util.*;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.function.BiConsumer;
import java.util.function.Consumer;
-import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
-import static de.juplo.kafka.ApplicationTests.PARTITIONS;
-import static de.juplo.kafka.ApplicationTests.TOPIC;
-import static org.assertj.core.api.Assertions.*;
-import static org.awaitility.Awaitility.*;
+import static org.assertj.core.api.Assertions.assertThat;
-@SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class)
-@TestMethodOrder(MethodOrderer.OrderAnnotation.class)
-@TestPropertySource(
- properties = {
- "consumer.bootstrap-server=${spring.embedded.kafka.brokers}",
- "consumer.topic=" + TOPIC,
- "consumer.commit-interval=1s",
- "spring.mongodb.embedded.version=4.4.13" })
-@EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
-@EnableAutoConfiguration
-@AutoConfigureDataMongo
@Slf4j
-class ApplicationTests
+public class ApplicationTests extends GenericApplicationTests<String, Message>
{
- public static final String TOPIC = "FOO";
- public static final int PARTITIONS = 10;
-
-
- StringSerializer stringSerializer = new StringSerializer();
-
- @Autowired
- Serializer valueSerializer;
- @Autowired
- KafkaProducer<String, Bytes> kafkaProducer;
- @Autowired
- KafkaConsumer<String, String> kafkaConsumer;
- @Autowired
- PartitionStatisticsRepository partitionStatisticsRepository;
- @Autowired
- ApplicationProperties properties;
- @Autowired
- ExecutorService executor;
- @Autowired
- PartitionStatisticsRepository repository;
-
- Consumer<ConsumerRecord<String, String>> testHandler;
- EndlessConsumer endlessConsumer;
- Map<TopicPartition, Long> oldOffsets;
- Map<TopicPartition, Long> newOffsets;
- Set<ConsumerRecord<String, String>> receivedRecords;
-
-
- /** Tests methods */
-
- @Test
- void commitsCurrentOffsetsOnSuccess() throws ExecutionException, InterruptedException
- {
- send100Messages(i -> new Bytes(valueSerializer.serialize(TOPIC, i)));
-
- await("100 records received")
- .atMost(Duration.ofSeconds(30))
- .until(() -> receivedRecords.size() >= 100);
-
- await("Offsets committed")
- .atMost(Duration.ofSeconds(10))
- .untilAsserted(() ->
- {
- checkSeenOffsetsForProgress();
- compareToCommitedOffsets(newOffsets);
- });
-
- assertThatExceptionOfType(IllegalStateException.class)
- .isThrownBy(() -> endlessConsumer.exitStatus())
- .describedAs("Consumer should still be running");
- }
-
-
- /** Helper methods for the verification of expectations */
-
- void compareToCommitedOffsets(Map<TopicPartition, Long> offsetsToCheck)
- {
- doForCurrentOffsets((tp, offset) ->
- {
- Long expected = offsetsToCheck.get(tp) + 1;
- log.debug("Checking, if the offset for {} is {}", tp, expected);
- assertThat(offset)
- .describedAs("Committed offset corresponds to the offset of the consumer")
- .isEqualTo(expected);
- });
- }
-
- void checkSeenOffsetsForProgress()
- {
- // Be sure, that some messages were consumed...!
- Set<TopicPartition> withProgress = new HashSet<>();
- partitions().forEach(tp ->
- {
- Long oldOffset = oldOffsets.get(tp);
- Long newOffset = newOffsets.get(tp);
- if (!oldOffset.equals(newOffset))
- {
- log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset);
- withProgress.add(tp);
- }
- });
- assertThat(withProgress)
- .describedAs("Some offsets must have changed, compared to the old offset-positions")
- .isNotEmpty();
- }
-
-
- /** Helper methods for setting up and running the tests */
-
- void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
- {
- partitions().forEach(tp ->
- {
- String partition = Integer.toString(tp.partition());
- Optional<Long> offset = partitionStatisticsRepository.findById(partition).map(document -> document.offset);
- consumer.accept(tp, offset.orElse(0l));
- });
- }
-
- List<TopicPartition> partitions()
- {
- return
- IntStream
- .range(0, PARTITIONS)
- .mapToObj(partition -> new TopicPartition(TOPIC, partition))
- .collect(Collectors.toList());
- }
-
-
- void send100Messages(Function<Long, Bytes> messageGenerator)
- {
- long i = 0;
-
- for (int partition = 0; partition < 10; partition++)
- {
- for (int key = 0; key < 10; key++)
- {
- Bytes value = messageGenerator.apply(++i);
-
- ProducerRecord<String, Bytes> record =
- new ProducerRecord<>(
- TOPIC,
- partition,
- Integer.toString(key%2),
- value);
-
- kafkaProducer.send(record, (metadata, e) ->
- {
- if (metadata != null)
- {
- log.debug(
- "{}|{} - {}={}",
- metadata.partition(),
- metadata.offset(),
- record.key(),
- record.value());
- }
- else
- {
- log.warn(
- "Exception for {}={}: {}",
- record.key(),
- record.value(),
- e.toString());
- }
- });
- }
- }
- }
-
-
- @BeforeEach
- public void init()
- {
- testHandler = record -> {} ;
-
- oldOffsets = new HashMap<>();
- newOffsets = new HashMap<>();
- receivedRecords = new HashSet<>();
-
- doForCurrentOffsets((tp, offset) ->
- {
- oldOffsets.put(tp, offset - 1);
- newOffsets.put(tp, offset - 1);
- });
-
- Consumer<ConsumerRecord<String, String>> captureOffsetAndExecuteTestHandler =
- record ->
- {
- newOffsets.put(
- new TopicPartition(record.topic(), record.partition()),
- record.offset());
- receivedRecords.add(record);
- testHandler.accept(record);
- };
-
- endlessConsumer =
- new EndlessConsumer(
- executor,
- repository,
- properties.getClientId(),
- properties.getTopic(),
- Clock.systemDefaultZone(),
- properties.getCommitInterval(),
- kafkaConsumer);
-
- endlessConsumer.start();
- }
-
- @AfterEach
- public void deinit()
- {
- try
- {
- endlessConsumer.stop();
- }
- catch (Exception e)
- {
- log.info("Exception while stopping the consumer: {}", e.toString());
- }
- }
-
-
- @TestConfiguration
- @Import(ApplicationConfiguration.class)
- public static class Configuration
- {
- @Bean
- Serializer<Long> serializer()
- {
- return new LongSerializer();
- }
-
- @Bean
- KafkaProducer<String, Bytes> kafkaProducer(ApplicationProperties properties)
- {
- Properties props = new Properties();
- props.put("bootstrap.servers", properties.getBootstrapServer());
- props.put("linger.ms", 100);
- props.put("key.serializer", StringSerializer.class.getName());
- props.put("value.serializer", BytesSerializer.class.getName());
-
- return new KafkaProducer<>(props);
- }
- }
+ @Autowired
+ StateRepository stateRepository;
+
+
+ public ApplicationTests()
+ {
+ super(new ApplicationTestRecrodGenerator());
+ ((ApplicationTestRecrodGenerator)recordGenerator).tests = this;
+ }
+
+
+ static class ApplicationTestRecrodGenerator implements RecordGenerator
+ {
+ ApplicationTests tests;
+
+ final int[] numbers = {1, 77, 33, 2, 66, 666, 11};
+ final String[] dieWilden13 =
+ IntStream
+ .range(1, 14)
+ .mapToObj(i -> "seeräuber-" + i)
+ .toArray(i -> new String[i]);
+ final StringSerializer stringSerializer = new StringSerializer();
+ final Bytes calculateMessage = new Bytes(stringSerializer.serialize(TOPIC, "{}"));
+
+ int counterMessages;
+ int counterPoisonPills;
+ int counterLogicErrors;
+
+ Map<String, List<AdderResult>> state;
+
+ @Override
+ public void generate(
+ boolean poisonPills,
+ boolean logicErrors,
+ Consumer<ProducerRecord<Bytes, Bytes>> messageSender)
+ {
+ counterMessages = 0;
+ counterPoisonPills = 0;
+ counterLogicErrors = 0;
+
+ state =
+ Arrays
+ .stream(dieWilden13)
+ .collect(Collectors.toMap(
+ seeräuber -> seeräuber,
+ seeräuber -> new LinkedList()));
+
+ int number[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
+ int message[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
+ int next = 0;
+
+ for (int pass = 0; pass < 333; pass++)
+ {
+ for (int i = 0; i<13; i++)
+ {
+ String seeräuber = dieWilden13[i];
+ Bytes key = new Bytes(stringSerializer.serialize(TOPIC, seeräuber));
+
+ if (message[i] > number[i])
+ {
+ send(
+ key,
+ calculateMessage,
+ Message.Type.CALC,
+ poisonPill(poisonPills, pass, counterMessages),
+ logicError(logicErrors, pass, counterMessages),
+ messageSender);
+ state.get(seeräuber).add(new AdderResult(number[i], (number[i] + 1) * number[i] / 2));
+ // Pick next number to calculate
+ number[i] = numbers[next++%numbers.length];
+ message[i] = 1;
+ log.debug("Seeräuber {} will die Summe für {} berechnen", seeräuber, number[i]);
+ }
+
+ send(
+ key,
+ new Bytes(stringSerializer.serialize(TOPIC, "{\"next\":" + message[i]++ + "}")),
+ Message.Type.ADD,
+ poisonPill(poisonPills, pass, counterMessages),
+ logicError(logicErrors, pass, counterMessages),
+ messageSender);
+ }
+ }
+ }
+
+ @Override
+ public int getNumberOfMessages()
+ {
+ return counterMessages;
+ }
+
+ @Override
+ public int getNumberOfPoisonPills()
+ {
+ return counterPoisonPills;
+ }
+
+ @Override
+ public int getNumberOfLogicErrors()
+ {
+ return counterLogicErrors;
+ }
+
+ boolean poisonPill (boolean poisonPills, int pass, int counter)
+ {
+ return poisonPills && pass > 300 && counter%99 == 0;
+ }
+
+ boolean logicError(boolean logicErrors, int pass, int counter)
+ {
+ return logicErrors && pass > 300 && counter%77 == 0;
+ }
+
+ void send(
+ Bytes key,
+ Bytes value,
+ Message.Type type,
+ boolean poisonPill,
+ boolean logicError,
+ Consumer<ProducerRecord<Bytes, Bytes>> messageSender)
+ {
+ counterMessages++;
+
+ if (logicError)
+ {
+ value = new Bytes(stringSerializer.serialize(TOPIC, "{\"next\":-1}"));
+ counterLogicErrors++;
+ }
+ if (poisonPill)
+ {
+ value = new Bytes("BOOM!".getBytes());
+ counterPoisonPills++;
+ }
+
+ ProducerRecord<Bytes, Bytes> record = new ProducerRecord<>(TOPIC, key, value);
+ record.headers().add("__TypeId__", type.toString().getBytes());
+ messageSender.accept(record);
+ }
+
+ @Override
+ public void assertBusinessLogic()
+ {
+ for (int i=0; i<PARTITIONS; i++)
+ {
+ StateDocument stateDocument =
+ tests.stateRepository.findById(Integer.toString(i)).get();
+
+ stateDocument
+ .results
+ .entrySet()
+ .stream()
+ .forEach(entry ->
+ {
+ String user = entry.getKey();
+ List<AdderResult> resultsForUser = entry.getValue();
+
+ for (int j=0; j < resultsForUser.size(); j++)
+ {
+ if (!(j < state.get(user).size()))
+ {
+ break;
+ }
+
+ assertThat(resultsForUser.get(j))
+ .as("Unexpected results calculation %d of user %s", j, user)
+ .isEqualTo(state.get(user).get(j));
+ }
+
+ assertThat(state.get(user))
+ .as("More results calculated for user %s as expected", user)
+ .containsAll(resultsForUser);
+ });
+ }
+ }
+ }
}