package de.juplo.kafka;
+import com.mongodb.client.MongoClient;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.utils.Bytes;
import org.junit.jupiter.api.*;
import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
+import org.springframework.boot.autoconfigure.mongo.MongoProperties;
+import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo;
import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
import org.springframework.boot.test.context.TestConfiguration;
import org.springframework.context.annotation.Import;
@SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class)
@TestPropertySource(
properties = {
- "consumer.bootstrap-server=${spring.embedded.kafka.brokers}",
- "consumer.topic=" + TOPIC,
- "consumer.commit-interval=1s" })
+ "sumup.adder.bootstrap-server=${spring.embedded.kafka.brokers}",
+ "sumup.adder.topic=" + TOPIC,
+ "sumup.adder.commit-interval=500ms",
+ "spring.mongodb.embedded.version=4.4.13" })
@EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
+@EnableAutoConfiguration
+@AutoConfigureDataMongo
@Slf4j
abstract class GenericApplicationTests<K, V>
{
ApplicationProperties properties;
@Autowired
ExecutorService executor;
+ @Autowired
+ StateRepository stateRepository;
+ @Autowired
+ MongoClient mongoClient;
+ @Autowired
+ MongoProperties mongoProperties;
+ @Autowired
+ PollIntervalAwareConsumerRebalanceListener rebalanceListener;
+ @Autowired
+ RecordHandler<K, V> recordHandler;
KafkaProducer<Bytes, Bytes> testRecordProducer;
KafkaConsumer<Bytes, Bytes> offsetConsumer;
@Test
void commitsCurrentOffsetsOnSuccess()
{
- recordGenerator.generate(100, Set.of(), Set.of(), messageSender);
+ int numberOfGeneratedMessages =
+ recordGenerator.generate(false, false, messageSender);
- await("100 records received")
+ await(numberOfGeneratedMessages + " records received")
.atMost(Duration.ofSeconds(30))
.pollInterval(Duration.ofSeconds(1))
- .until(() -> receivedRecords.size() >= 100);
+ .until(() -> receivedRecords.size() >= numberOfGeneratedMessages);
await("Offsets committed")
.atMost(Duration.ofSeconds(10))
assertThatExceptionOfType(IllegalStateException.class)
.isThrownBy(() -> endlessConsumer.exitStatus())
.describedAs("Consumer should still be running");
+
+ recordGenerator.assertBusinessLogic();
}
@Test
@SkipWhenErrorCannotBeGenerated(poisonPill = true)
void commitsOffsetOfErrorForReprocessingOnDeserializationError()
{
- recordGenerator.generate(100, Set.of(77), Set.of(), messageSender);
+ int numberOfGeneratedMessages =
+ recordGenerator.generate(true, false, messageSender);
await("Consumer failed")
.atMost(Duration.ofSeconds(30))
compareToCommitedOffsets(newOffsets);
assertThat(receivedRecords.size())
.describedAs("Received not all sent events")
- .isLessThan(100);
+ .isLessThan(numberOfGeneratedMessages);
assertThatNoException()
.describedAs("Consumer should not be running")
assertThat(endlessConsumer.exitStatus())
.describedAs("Consumer should have exited abnormally")
.containsInstanceOf(RecordDeserializationException.class);
+
+ recordGenerator.assertBusinessLogic();
}
@Test
@SkipWhenErrorCannotBeGenerated(logicError = true)
void doesNotCommitOffsetsOnLogicError()
{
- recordGenerator.generate(100, Set.of(), Set.of(77), messageSender);
+ int numberOfGeneratedMessages =
+ recordGenerator.generate(false, true, messageSender);
await("Consumer failed")
.atMost(Duration.ofSeconds(30))
checkSeenOffsetsForProgress();
compareToCommitedOffsets(oldOffsets);
- assertThat(receivedRecords.size())
- .describedAs("Received not all sent events")
- .isLessThan(100);
assertThatNoException()
.describedAs("Consumer should not be running")
assertThat(endlessConsumer.exitStatus())
.describedAs("Consumer should have exited abnormally")
.containsInstanceOf(RuntimeException.class);
+
+ recordGenerator.assertBusinessLogic();
}
void seekToEnd()
{
offsetConsumer.assign(partitions());
- offsetConsumer.seekToEnd(partitions());
partitions().forEach(tp ->
{
- // seekToEnd() works lazily: it only takes effect on poll()/position()
Long offset = offsetConsumer.position(tp);
log.info("New position for {}: {}", tp, offset);
+ Integer partition = tp.partition();
+ StateDocument document =
+ stateRepository
+ .findById(partition.toString())
+ .orElse(new StateDocument(partition));
+ document.offset = offset;
+ stateRepository.save(document);
});
- // The new positions must be commited!
- offsetConsumer.commitSync();
offsetConsumer.unsubscribe();
}
void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
{
- offsetConsumer.assign(partitions());
- partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp)));
- offsetConsumer.unsubscribe();
+ partitions().forEach(tp ->
+ {
+ String partition = Integer.toString(tp.partition());
+ Optional<Long> offset = stateRepository.findById(partition).map(document -> document.offset);
+ consumer.accept(tp, offset.orElse(0l));
+ });
}
List<TopicPartition> partitions()
public interface RecordGenerator
{
- void generate(
- Set<Integer> poisonPills,
- Set<Integer> logicErrors,
+ int generate(
+ boolean poisonPills,
+ boolean logicErrors,
Consumer<ProducerRecord<Bytes, Bytes>> messageSender);
- default boolean canGeneratePoisionPill()
+ default boolean canGeneratePoisonPill()
{
return true;
}
{
return true;
}
+
+ default void assertBusinessLogic()
+ {
+ log.debug("No business-logic to assert");
+ }
}
void sendMessage(ProducerRecord<Bytes, Bytes> record)
props.put("value.deserializer", BytesDeserializer.class.getName());
offsetConsumer = new KafkaConsumer<>(props);
+ mongoClient.getDatabase(mongoProperties.getDatabase()).drop();
seekToEnd();
oldOffsets = new HashMap<>();
newOffsets.put(tp, offset - 1);
});
- Consumer<ConsumerRecord<K, V>> captureOffsetAndExecuteTestHandler =
- record ->
+ TestRecordHandler<K, V> captureOffsetAndExecuteTestHandler =
+ new TestRecordHandler<K, V>(recordHandler)
{
- newOffsets.put(
- new TopicPartition(record.topic(), record.partition()),
- record.offset());
- receivedRecords.add(record);
- consumer.accept(record);
+ @Override
+ public void onNewRecord(ConsumerRecord<K, V> record)
+ {
+ newOffsets.put(
+ new TopicPartition(record.topic(), record.partition()),
+ record.offset());
+ receivedRecords.add(record);
+ }
};
endlessConsumer =
properties.getClientId(),
properties.getTopic(),
kafkaConsumer,
+ rebalanceListener,
captureOffsetAndExecuteTestHandler);
endlessConsumer.start();