import org.junit.jupiter.api.*;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
-import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo;
import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
import org.springframework.boot.test.context.TestConfiguration;
import org.springframework.context.annotation.Bean;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
-import static de.juplo.kafka.ApplicationTests.PARTITIONS;
-import static de.juplo.kafka.ApplicationTests.TOPIC;
+import static de.juplo.kafka.ApplicationTests.*;
import static org.assertj.core.api.Assertions.*;
import static org.awaitility.Awaitility.*;
@TestMethodOrder(MethodOrderer.OrderAnnotation.class)
@TestPropertySource(
properties = {
- "consumer.bootstrap-server=${spring.embedded.kafka.brokers}",
- "consumer.topic=" + TOPIC,
- "consumer.commit-interval=1s",
- "spring.mongodb.embedded.version=4.4.13" })
-@EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
+ "sumup.requests.bootstrap-server=${spring.embedded.kafka.brokers}",
+ "sumup.requests.topic-in=" + INPUT_TOPIC,
+ "sumup.requests.commit-interval=1s" })
+@EmbeddedKafka(topics = { INPUT_TOPIC, OUTPUT_TOPIC }, partitions = PARTITIONS)
@EnableAutoConfiguration
-@AutoConfigureDataMongo
@Slf4j
class ApplicationTests
{
- public static final String TOPIC = "FOO";
+ public static final String INPUT_TOPIC = "FOO";
+ public static final String OUTPUT_TOPIC = "BAR";
public static final int PARTITIONS = 10;
@Autowired
Serializer valueSerializer;
@Autowired
- KafkaProducer<String, Bytes> kafkaProducer;
+ KafkaProducer<String, Bytes> testProducer;
@Autowired
- KafkaConsumer<String, Long> kafkaConsumer;
+ KafkaConsumer<String, Integer> kafkaConsumer;
@Autowired
KafkaConsumer<Bytes, Bytes> offsetConsumer;
@Autowired
- PartitionStatisticsRepository partitionStatisticsRepository;
- @Autowired
ApplicationProperties properties;
@Autowired
ExecutorService executor;
@Autowired
- PartitionStatisticsRepository repository;
- @Autowired
- KeyCountingRebalanceListener keyCountingRebalanceListener;
- @Autowired
- KeyCountingRecordHandler keyCountingRecordHandler;
+ RecordHandler noopRecordHandler;
- EndlessConsumer<String, Long> endlessConsumer;
+ EndlessConsumer<String, Integer> endlessConsumer;
Map<TopicPartition, Long> oldOffsets;
Map<TopicPartition, Long> newOffsets;
- Set<ConsumerRecord<String, Long>> receivedRecords;
+ Set<ConsumerRecord<String, Integer>> receivedRecords;
/** Tests methods */
{
send100Messages((partition, key, counter) ->
{
- Bytes value = new Bytes(valueSerializer.serialize(TOPIC, counter));
- return new ProducerRecord<>(TOPIC, partition, key, value);
+ Bytes value = new Bytes(valueSerializer.serialize(INPUT_TOPIC, counter));
+ return new ProducerRecord<>(INPUT_TOPIC, partition, key, value);
});
await("100 records received")
send100Messages((partition, key, counter) ->
{
Bytes value = counter == 77
- ? new Bytes(stringSerializer.serialize(TOPIC, "BOOM!"))
- : new Bytes(valueSerializer.serialize(TOPIC, counter));
- return new ProducerRecord<>(TOPIC, partition, key, value);
+ ? new Bytes(stringSerializer.serialize(INPUT_TOPIC, "BOOM!"))
+ : new Bytes(valueSerializer.serialize(INPUT_TOPIC, counter));
+ return new ProducerRecord<>(INPUT_TOPIC, partition, key, value);
});
await("Consumer failed")
void seekToEnd()
{
offsetConsumer.assign(partitions());
+ offsetConsumer.seekToEnd(partitions());
partitions().forEach(tp ->
{
+ // seekToEnd() works lazily: it only takes effect on poll()/position()
Long offset = offsetConsumer.position(tp);
log.info("New position for {}: {}", tp, offset);
- Integer partition = tp.partition();
- StatisticsDocument document =
- partitionStatisticsRepository
- .findById(partition.toString())
- .orElse(new StatisticsDocument(partition));
- document.offset = offset;
- partitionStatisticsRepository.save(document);
});
+ // The new positions must be commited!
+ offsetConsumer.commitSync();
offsetConsumer.unsubscribe();
}
void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
{
- partitions().forEach(tp ->
- {
- String partition = Integer.toString(tp.partition());
- Optional<Long> offset = partitionStatisticsRepository.findById(partition).map(document -> document.offset);
- consumer.accept(tp, offset.orElse(0l));
- });
- }
+ offsetConsumer.assign(partitions());
+ partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp)));
+ offsetConsumer.unsubscribe();
+ }
List<TopicPartition> partitions()
{
return
IntStream
.range(0, PARTITIONS)
- .mapToObj(partition -> new TopicPartition(TOPIC, partition))
+ .mapToObj(partition -> new TopicPartition(INPUT_TOPIC, partition))
.collect(Collectors.toList());
}
public interface RecordGenerator<K, V>
{
- public ProducerRecord<String, Bytes> generate(int partition, String key, long counter);
+ public ProducerRecord<String, Bytes> generate(int partition, String key, int counter);
}
void send100Messages(RecordGenerator recordGenerator)
{
- long i = 0;
+ int i = 0;
for (int partition = 0; partition < 10; partition++)
{
ProducerRecord<String, Bytes> record =
recordGenerator.generate(partition, Integer.toString(partition*10+key%2), ++i);
- kafkaProducer.send(record, (metadata, e) ->
+ testProducer.send(record, (metadata, e) ->
{
if (metadata != null)
{
newOffsets.put(tp, offset - 1);
});
- TestRecordHandler<String, Long> captureOffsetAndExecuteTestHandler =
- new TestRecordHandler<String, Long>(keyCountingRecordHandler) {
+ TestRecordHandler<String, Integer> captureOffsetAndExecuteTestHandler =
+ new TestRecordHandler<String, Integer>(noopRecordHandler) {
@Override
- public void onNewRecord(ConsumerRecord<String, Long> record)
+ public void onNewRecord(ConsumerRecord<String, Integer> record)
{
newOffsets.put(
new TopicPartition(record.topic(), record.partition()),
new EndlessConsumer<>(
executor,
properties.getClientId(),
- properties.getTopic(),
+ properties.getTopicIn(),
kafkaConsumer,
- keyCountingRebalanceListener,
captureOffsetAndExecuteTestHandler);
endlessConsumer.start();
public static class Configuration
{
@Bean
- Serializer<Long> serializer()
+ Serializer<Integer> valueSerializer()
{
- return new LongSerializer();
+ return new IntegerSerializer();
}
@Bean
- KafkaProducer<String, Bytes> kafkaProducer(ApplicationProperties properties)
+ KafkaProducer<String, Bytes> testProducer(ApplicationProperties properties)
{
Properties props = new Properties();
props.put("bootstrap.servers", properties.getBootstrapServer());
Properties props = new Properties();
props.put("bootstrap.servers", properties.getBootstrapServer());
props.put("client.id", "OFFSET-CONSUMER");
- props.put("enable.auto.commit", false);
- props.put("auto.offset.reset", "latest");
+ props.put("group.id", properties.getGroupId());
props.put("key.deserializer", BytesDeserializer.class.getName());
props.put("value.deserializer", BytesDeserializer.class.getName());
return new KafkaConsumer<>(props);
}
+
+ @Bean
+ KafkaProducer<String, String> kafkaProducer(ApplicationProperties properties)
+ {
+ Properties props = new Properties();
+ props.put("bootstrap.servers", properties.getBootstrapServer());
+ props.put("key.serializer", StringSerializer.class.getName());
+ props.put("value.serializer", StringSerializer.class.getName());
+
+ return new KafkaProducer<>(props);
+ }
}
}