{
return new WordcountRecordHandler(
repository,
- properties.getClientId(),
properties.getTopic(),
Clock.systemDefaultZone(),
properties.getCommitInterval(),
@Bean
public WordcountRebalanceListener wordcountRebalanceListener(
- WordcountRecordHandler wordcountRecordHandler)
+ WordcountRecordHandler wordcountRecordHandler,
+ PartitionStatisticsRepository repository,
+ Consumer<String, String> consumer,
+ ApplicationProperties properties)
{
- return new WordcountRebalanceListener(wordcountRecordHandler);
+ return new WordcountRebalanceListener(
+ wordcountRecordHandler,
+ repository,
+ properties.getClientId(),
+ consumer);
}
@Bean
package de.juplo.kafka;
import org.apache.kafka.clients.consumer.ConsumerRecord;
-import org.apache.kafka.common.TopicPartition;
import java.util.function.Consumer;
public interface RecordHandler<K, V> extends Consumer<ConsumerRecord<K,V>>
{
default void beforeNextPoll() {}
-
- default void onPartitionAssigned(TopicPartition tp) {}
-
- default void onPartitionRevoked(TopicPartition tp) {}
}
package de.juplo.kafka;
import lombok.RequiredArgsConstructor;
+import lombok.extern.slf4j.Slf4j;
+import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
import org.apache.kafka.common.TopicPartition;
import java.util.Collection;
+import java.util.Map;
@RequiredArgsConstructor
+@Slf4j
public class WordcountRebalanceListener implements ConsumerRebalanceListener
{
- private final RecordHandler<String, String> handler;
+ private final WordcountRecordHandler handler;
+ private final PartitionStatisticsRepository repository;
+ private final String id;
+ private final Consumer<String, String> consumer;
@Override
public void onPartitionsAssigned(Collection<TopicPartition> partitions)
{
- partitions.forEach(tp -> handler.onPartitionAssigned(tp));
+ partitions.forEach(tp ->
+ {
+ Integer partition = tp.partition();
+ Long offset = consumer.position(tp);
+ log.info("{} - adding partition: {}, offset={}", id, partition, offset);
+ StatisticsDocument document =
+ repository
+ .findById(Integer.toString(partition))
+ .orElse(new StatisticsDocument(partition));
+ if (document.offset >= 0)
+ {
+ // Only seek, if a stored offset was found
+ // Otherwise: Use initial offset, generated by Kafka
+ consumer.seek(tp, document.offset);
+ }
+ handler.addPartition(partition, document.statistics);
+ });
}
@Override
public void onPartitionsRevoked(Collection<TopicPartition> partitions)
{
- partitions.forEach(tp -> handler.onPartitionRevoked(tp));
+ partitions.forEach(tp ->
+ {
+ Integer partition = tp.partition();
+ Long newOffset = consumer.position(tp);
+ log.info(
+ "{} - removing partition: {}, offset of next message {})",
+ id,
+ partition,
+ newOffset);
+ Map<String, Map<String, Long>> removed = handler.removePartition(partition);
+ repository.save(new StatisticsDocument(partition, removed, consumer.position(tp)));
+ });
}
}
private final PartitionStatisticsRepository repository;
- private final String id;
private final String topic;
private final Clock clock;
private final Duration commitInterval;
}
}
- @Override
- public void onPartitionAssigned(TopicPartition tp)
+ public void addPartition(Integer partition, Map<String, Map<String, Long>> statistics)
{
- Integer partition = tp.partition();
- Long offset = consumer.position(tp);
- log.info("{} - adding partition: {}, offset={}", id, partition, offset);
- StatisticsDocument document =
- repository
- .findById(Integer.toString(partition))
- .orElse(new StatisticsDocument(partition));
- if (document.offset >= 0)
- {
- // Only seek, if a stored offset was found
- // Otherwise: Use initial offset, generated by Kafka
- consumer.seek(tp, document.offset);
- }
- seen.put(partition, document.statistics);
+ seen.put(partition, statistics);
}
- @Override
- public void onPartitionRevoked(TopicPartition tp)
+ public Map<String, Map<String, Long>> removePartition(Integer partition)
{
- Integer partition = tp.partition();
- Long newOffset = consumer.position(tp);
- log.info(
- "{} - removing partition: {}, offset of next message {})",
- id,
- partition,
- newOffset);
- Map<String, Map<String, Long>> removed = seen.remove(partition);
- repository.save(new StatisticsDocument(partition, removed, consumer.position(tp)));
+ return seen.remove(partition);
}
import lombok.RequiredArgsConstructor;
import org.apache.kafka.clients.consumer.ConsumerRecord;
-import org.apache.kafka.common.TopicPartition;
@RequiredArgsConstructor
{
handler.beforeNextPoll();
}
-
- @Override
- public void onPartitionAssigned(TopicPartition tp)
- {
- handler.onPartitionAssigned(tp);
- }
-
- @Override
- public void onPartitionRevoked(TopicPartition tp)
- {
- handler.onPartitionRevoked(tp);
- }
}