import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.function.BiConsumer;
-import java.util.function.Consumer;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
properties = {
"consumer.bootstrap-server=${spring.embedded.kafka.brokers}",
"consumer.topic=" + TOPIC,
+ "consumer.commit-interval=1s",
"spring.mongodb.embedded.version=4.4.13" })
@EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
@EnableAutoConfiguration
@Autowired
KafkaConsumer<String, Long> kafkaConsumer;
@Autowired
- KafkaConsumer<Bytes, Bytes> offsetConsumer;
+ PartitionStatisticsRepository partitionStatisticsRepository;
@Autowired
ApplicationProperties properties;
@Autowired
ExecutorService executor;
@Autowired
PartitionStatisticsRepository repository;
+ @Autowired
+ KeyCountingRebalanceListener keyCountingRebalanceListener;
+ @Autowired
+ KeyCountingRecordHandler keyCountingRecordHandler;
- Consumer<ConsumerRecord<String, Long>> testHandler;
EndlessConsumer<String, Long> endlessConsumer;
Map<TopicPartition, Long> oldOffsets;
Map<TopicPartition, Long> newOffsets;
void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
{
- offsetConsumer.assign(partitions());
- partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp)));
- offsetConsumer.unsubscribe();
- }
+ partitions().forEach(tp ->
+ {
+ String partition = Integer.toString(tp.partition());
+ Optional<Long> offset = partitionStatisticsRepository.findById(partition).map(document -> document.offset);
+ consumer.accept(tp, offset.orElse(0l));
+ });
+ }
List<TopicPartition> partitions()
{
@BeforeEach
public void init()
{
- testHandler = record -> {} ;
-
oldOffsets = new HashMap<>();
newOffsets = new HashMap<>();
receivedRecords = new HashSet<>();
newOffsets.put(tp, offset - 1);
});
- Consumer<ConsumerRecord<String, Long>> captureOffsetAndExecuteTestHandler =
- record ->
- {
- newOffsets.put(
- new TopicPartition(record.topic(), record.partition()),
- record.offset());
- receivedRecords.add(record);
- testHandler.accept(record);
+ TestRecordHandler<String, Long> captureOffsetAndExecuteTestHandler =
+ new TestRecordHandler<String, Long>(keyCountingRecordHandler) {
+ @Override
+ public void onNewRecord(ConsumerRecord<String, Long> record)
+ {
+ newOffsets.put(
+ new TopicPartition(record.topic(), record.partition()),
+ record.offset());
+ receivedRecords.add(record);
+ }
};
endlessConsumer =
new EndlessConsumer<>(
executor,
- repository,
properties.getClientId(),
properties.getTopic(),
kafkaConsumer,
+ keyCountingRebalanceListener,
captureOffsetAndExecuteTestHandler);
endlessConsumer.start();
return new KafkaProducer<>(props);
}
-
- @Bean
- KafkaConsumer<Bytes, Bytes> offsetConsumer(ApplicationProperties properties)
- {
- Properties props = new Properties();
- props.put("bootstrap.servers", properties.getBootstrapServer());
- props.put("client.id", "OFFSET-CONSUMER");
- props.put("group.id", properties.getGroupId());
- props.put("key.deserializer", BytesDeserializer.class.getName());
- props.put("value.deserializer", BytesDeserializer.class.getName());
-
- return new KafkaConsumer<>(props);
- }
}
}