package de.juplo.kafka;
+import com.mongodb.client.MongoClient;
import lombok.extern.slf4j.Slf4j;
+import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.common.utils.Bytes;
import org.junit.jupiter.api.*;
import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
+import org.springframework.boot.autoconfigure.kafka.KafkaAutoConfiguration;
+import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
+import org.springframework.boot.autoconfigure.mongo.MongoProperties;
+import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo;
import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
import org.springframework.boot.test.context.TestConfiguration;
import org.springframework.context.annotation.Import;
import static org.awaitility.Awaitility.*;
-@SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class)
+@SpringJUnitConfig(
+ initializers = ConfigDataApplicationContextInitializer.class,
+ classes = {
+ KafkaAutoConfiguration.class,
+ ApplicationTests.Configuration.class })
@TestPropertySource(
properties = {
- "consumer.bootstrap-server=${spring.embedded.kafka.brokers}",
- "consumer.topic=" + TOPIC,
- "consumer.commit-interval=500ms" })
+ "spring.kafka.bootstrap-servers=${spring.embedded.kafka.brokers}",
+ "sumup.adder.topic=" + TOPIC,
+ "spring.kafka.consumer.auto-commit-interval=500ms",
+ "spring.mongodb.embedded.version=4.4.13" })
@EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
+@EnableAutoConfiguration
+@AutoConfigureDataMongo
@Slf4j
abstract class GenericApplicationTests<K, V>
{
@Autowired
- KafkaConsumer<K, V> kafkaConsumer;
+ org.apache.kafka.clients.consumer.Consumer<K, V> kafkaConsumer;
@Autowired
Consumer<ConsumerRecord<K, V>> consumer;
@Autowired
- ApplicationProperties properties;
+ ApplicationProperties applicationProperties;
+ @Autowired
+ KafkaProperties kafkaProperties;
@Autowired
ExecutorService executor;
+ @Autowired
+ MongoClient mongoClient;
+ @Autowired
+ MongoProperties mongoProperties;
+ @Autowired
+ ConsumerRebalanceListener rebalanceListener;
+ @Autowired
+ RecordHandler<K, V> recordHandler;
KafkaProducer<Bytes, Bytes> testRecordProducer;
KafkaConsumer<Bytes, Bytes> offsetConsumer;
/** Tests methods */
@Test
- void commitsCurrentOffsetsOnSuccess()
+ void commitsCurrentOffsetsOnSuccess() throws Exception
{
int numberOfGeneratedMessages =
recordGenerator.generate(false, false, messageSender);
.isThrownBy(() -> endlessConsumer.exitStatus())
.describedAs("Consumer should still be running");
+ endlessConsumer.stop();
recordGenerator.assertBusinessLogic();
}
Long expected = offsetsToCheck.get(tp) + 1;
log.debug("Checking, if the offset {} for {} is at most {}", offset, tp, expected);
assertThat(offset)
- .describedAs("Committed offset corresponds to the offset of the consumer")
+ .describedAs("Committed offset must be at most equal to the offset of the consumer")
.isLessThanOrEqualTo(expected);
isOffsetBehindSeen.add(offset < expected);
});
{
Properties props;
props = new Properties();
- props.put("bootstrap.servers", properties.getBootstrapServer());
+ props.put("bootstrap.servers", kafkaProperties.getBootstrapServers());
props.put("linger.ms", 100);
props.put("key.serializer", BytesSerializer.class.getName());
props.put("value.serializer", BytesSerializer.class.getName());
testRecordProducer = new KafkaProducer<>(props);
props = new Properties();
- props.put("bootstrap.servers", properties.getBootstrapServer());
+ props.put("bootstrap.servers", kafkaProperties.getBootstrapServers());
props.put("client.id", "OFFSET-CONSUMER");
- props.put("group.id", properties.getGroupId());
+ props.put("group.id", kafkaProperties.getConsumer().getGroupId());
props.put("key.deserializer", BytesDeserializer.class.getName());
props.put("value.deserializer", BytesDeserializer.class.getName());
offsetConsumer = new KafkaConsumer<>(props);
+ mongoClient.getDatabase(mongoProperties.getDatabase()).drop();
seekToEnd();
oldOffsets = new HashMap<>();
seenOffsets.put(tp, offset - 1);
});
- Consumer<ConsumerRecord<K, V>> captureOffsetAndExecuteTestHandler =
- record ->
+ TestRecordHandler<K, V> captureOffsetAndExecuteTestHandler =
+ new TestRecordHandler<K, V>(recordHandler)
{
- seenOffsets.put(
- new TopicPartition(record.topic(), record.partition()),
- record.offset());
- receivedRecords.add(record);
- consumer.accept(record);
+ @Override
+ public void onNewRecord(ConsumerRecord<K, V> record)
+ {
+ seenOffsets.put(
+ new TopicPartition(record.topic(), record.partition()),
+ record.offset());
+ receivedRecords.add(record);
+ }
};
endlessConsumer =
new EndlessConsumer<>(
executor,
- properties.getClientId(),
- properties.getTopic(),
+ kafkaProperties.getClientId(),
+ applicationProperties.getTopic(),
kafkaConsumer,
+ rebalanceListener,
captureOffsetAndExecuteTestHandler);
endlessConsumer.start();
{
try
{
- endlessConsumer.stop();
testRecordProducer.close();
offsetConsumer.close();
}