import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.TopicPartition;
-import org.apache.kafka.common.errors.RecordDeserializationException;
import org.apache.kafka.common.serialization.*;
import org.apache.kafka.common.utils.Bytes;
import org.junit.jupiter.api.*;
import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
+import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo;
import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
import org.springframework.boot.test.context.TestConfiguration;
import org.springframework.context.annotation.Bean;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.function.BiConsumer;
-import java.util.function.Consumer;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
properties = {
"consumer.bootstrap-server=${spring.embedded.kafka.brokers}",
"consumer.topic=" + TOPIC,
- "consumer.commit-interval=1s" })
+ "consumer.commit-interval=1s",
+ "spring.mongodb.embedded.version=4.4.13" })
@EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
+@EnableAutoConfiguration
+@AutoConfigureDataMongo
@Slf4j
class ApplicationTests
{
@Autowired
KafkaProducer<String, Bytes> kafkaProducer;
@Autowired
- KafkaConsumer<String, Long> kafkaConsumer;
+ KafkaConsumer<String, String> kafkaConsumer;
@Autowired
KafkaConsumer<Bytes, Bytes> offsetConsumer;
@Autowired
+ PartitionStatisticsRepository partitionStatisticsRepository;
+ @Autowired
ApplicationProperties properties;
@Autowired
ExecutorService executor;
+ @Autowired
+ PartitionStatisticsRepository repository;
+ @Autowired
+ WordcountRebalanceListener wordcountRebalanceListener;
+ @Autowired
+ WordcountRecordHandler wordcountRecordHandler;
- Consumer<ConsumerRecord<String, Long>> testHandler;
- EndlessConsumer<String, Long> endlessConsumer;
+ EndlessConsumer<String, String> endlessConsumer;
Map<TopicPartition, Long> oldOffsets;
Map<TopicPartition, Long> newOffsets;
- Set<ConsumerRecord<String, Long>> receivedRecords;
+ Set<ConsumerRecord<String, String>> receivedRecords;
/** Tests methods */
.describedAs("Consumer should still be running");
}
- @Test
- void commitsOffsetOfErrorForReprocessingOnDeserializationError()
- {
- send100Messages((partition, key, counter) ->
- {
- Bytes value = counter == 77
- ? new Bytes(stringSerializer.serialize(TOPIC, "BOOM!"))
- : new Bytes(valueSerializer.serialize(TOPIC, counter));
- return new ProducerRecord<>(TOPIC, partition, key, value);
- });
-
- await("Consumer failed")
- .atMost(Duration.ofSeconds(30))
- .pollInterval(Duration.ofSeconds(1))
- .until(() -> !endlessConsumer.running());
-
- checkSeenOffsetsForProgress();
- compareToCommitedOffsets(newOffsets);
-
- endlessConsumer.start();
- await("Consumer failed")
- .atMost(Duration.ofSeconds(30))
- .pollInterval(Duration.ofSeconds(1))
- .until(() -> !endlessConsumer.running());
-
- checkSeenOffsetsForProgress();
- compareToCommitedOffsets(newOffsets);
- assertThat(receivedRecords.size())
- .describedAs("Received not all sent events")
- .isLessThan(100);
-
- assertThatNoException()
- .describedAs("Consumer should not be running")
- .isThrownBy(() -> endlessConsumer.exitStatus());
- assertThat(endlessConsumer.exitStatus())
- .describedAs("Consumer should have exited abnormally")
- .containsInstanceOf(RecordDeserializationException.class);
- }
-
/** Helper methods for the verification of expectations */
void seekToEnd()
{
offsetConsumer.assign(partitions());
- offsetConsumer.seekToEnd(partitions());
partitions().forEach(tp ->
{
- // seekToEnd() works lazily: it only takes effect on poll()/position()
Long offset = offsetConsumer.position(tp);
log.info("New position for {}: {}", tp, offset);
+ Integer partition = tp.partition();
+ StatisticsDocument document =
+ partitionStatisticsRepository
+ .findById(partition.toString())
+ .orElse(new StatisticsDocument(partition));
+ document.offset = offset;
+ partitionStatisticsRepository.save(document);
});
- // The new positions must be commited!
- offsetConsumer.commitSync();
offsetConsumer.unsubscribe();
}
void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
{
- offsetConsumer.assign(partitions());
- partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp)));
- offsetConsumer.unsubscribe();
+ partitions().forEach(tp ->
+ {
+ String partition = Integer.toString(tp.partition());
+ Optional<Long> offset = partitionStatisticsRepository.findById(partition).map(document -> document.offset);
+ consumer.accept(tp, offset.orElse(0l));
+ });
}
List<TopicPartition> partitions()
@BeforeEach
public void init()
{
- testHandler = record -> {} ;
-
seekToEnd();
oldOffsets = new HashMap<>();
newOffsets.put(tp, offset - 1);
});
- Consumer<ConsumerRecord<String, Long>> captureOffsetAndExecuteTestHandler =
- record ->
- {
- newOffsets.put(
- new TopicPartition(record.topic(), record.partition()),
- record.offset());
- receivedRecords.add(record);
- testHandler.accept(record);
+ TestRecordHandler<String, String> captureOffsetAndExecuteTestHandler =
+ new TestRecordHandler<String, String>(wordcountRecordHandler) {
+ @Override
+ public void onNewRecord(ConsumerRecord<String, String> record)
+ {
+ newOffsets.put(
+ new TopicPartition(record.topic(), record.partition()),
+ record.offset());
+ receivedRecords.add(record);
+ }
};
endlessConsumer =
properties.getClientId(),
properties.getTopic(),
kafkaConsumer,
+ wordcountRebalanceListener,
captureOffsetAndExecuteTestHandler);
endlessConsumer.start();
Properties props = new Properties();
props.put("bootstrap.servers", properties.getBootstrapServer());
props.put("client.id", "OFFSET-CONSUMER");
- props.put("group.id", properties.getGroupId());
+ props.put("enable.auto.commit", false);
+ props.put("auto.offset.reset", "latest");
props.put("key.deserializer", BytesDeserializer.class.getName());
props.put("value.deserializer", BytesDeserializer.class.getName());