package de.juplo.kafka;
import lombok.RequiredArgsConstructor;
+import lombok.extern.slf4j.Slf4j;
+import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
import org.apache.kafka.common.TopicPartition;
import java.util.Collection;
+import java.util.Map;
@RequiredArgsConstructor
+@Slf4j
public class WordcountRebalanceListener implements ConsumerRebalanceListener
{
- private final RecordHandler<String, String> handler;
+ private final WordcountRecordHandler handler;
+ private final PartitionStatisticsRepository repository;
+ private final String id;
+ private final Consumer<String, String> consumer;
@Override
public void onPartitionsAssigned(Collection<TopicPartition> partitions)
{
- partitions.forEach(tp -> handler.onPartitionAssigned(tp));
+ partitions.forEach(tp ->
+ {
+ Integer partition = tp.partition();
+ Long offset = consumer.position(tp);
+ log.info("{} - adding partition: {}, offset={}", id, partition, offset);
+ StatisticsDocument document =
+ repository
+ .findById(Integer.toString(partition))
+ .orElse(new StatisticsDocument(partition));
+ if (document.offset >= 0)
+ {
+ // Only seek, if a stored offset was found
+ // Otherwise: Use initial offset, generated by Kafka
+ consumer.seek(tp, document.offset);
+ }
+ handler.addPartition(partition, document.statistics);
+ });
}
@Override
public void onPartitionsRevoked(Collection<TopicPartition> partitions)
{
- partitions.forEach(tp -> handler.onPartitionRevoked(tp));
+ partitions.forEach(tp ->
+ {
+ Integer partition = tp.partition();
+ Long newOffset = consumer.position(tp);
+ log.info(
+ "{} - removing partition: {}, offset of next message {})",
+ id,
+ partition,
+ newOffset);
+ Map<String, Map<String, Long>> removed = handler.removePartition(partition);
+ repository.save(new StatisticsDocument(partition, removed, consumer.position(tp)));
+ });
}
}