X-Git-Url: https://juplo.de/gitweb/?a=blobdiff_plain;f=src%2Fmain%2Fjava%2Fde%2Fjuplo%2Fkafka%2FApplicationRebalanceListener.java;h=8e8464fbb09237f56257a69922c90c39f8abe09f;hb=348477f6e4ff77e78bf7b6db66e4716663c9512d;hp=9e75112516bb9129720bcbedbcd4c045889c0b3b;hpb=12c1ce703ef76b75d995f94d1689c894dde1406a;p=demos%2Fkafka%2Ftraining diff --git a/src/main/java/de/juplo/kafka/ApplicationRebalanceListener.java b/src/main/java/de/juplo/kafka/ApplicationRebalanceListener.java index 9e75112..8e8464f 100644 --- a/src/main/java/de/juplo/kafka/ApplicationRebalanceListener.java +++ b/src/main/java/de/juplo/kafka/ApplicationRebalanceListener.java @@ -3,28 +3,32 @@ package de.juplo.kafka; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; import org.apache.kafka.clients.consumer.Consumer; -import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; import org.apache.kafka.common.TopicPartition; import java.time.Clock; import java.time.Duration; import java.time.Instant; import java.util.*; -import java.util.concurrent.CountDownLatch; @RequiredArgsConstructor @Slf4j -public class ApplicationRebalanceListener implements ConsumerRebalanceListener +public class ApplicationRebalanceListener implements RebalanceListener { - private final Consumer consumer; private final ApplicationRecordHandler recordHandler; private final AdderResults adderResults; private final StateRepository stateRepository; private final String id; + private final String topic; + private final Clock clock; + private final Duration commitInterval; + private final Consumer consumer; private final Set partitions = new HashSet<>(); + private Instant lastCommit = Instant.EPOCH; + private boolean commitsEnabled = true; + @Override public void onPartitionsAssigned(Collection partitions) { @@ -37,6 +41,17 @@ public class ApplicationRebalanceListener implements ConsumerRebalanceListener stateRepository .findById(Integer.toString(partition)) .orElse(new StateDocument(partition)); + if (document.offset >= 0) + { + // Only seek, if a stored offset was found + // Otherwise: Use initial offset, generated by Kafka + consumer.seek(tp, document.offset); + log.info( + "{} - Seeking to offset {} for partition {}", + id, + document.offset, + partition); + } recordHandler.addPartition(partition, document.state); for (String user : document.state.keySet()) { @@ -54,55 +69,71 @@ public class ApplicationRebalanceListener implements ConsumerRebalanceListener @Override public void onPartitionsRevoked(Collection partitions) { - log.info("{} - Commiting offsets for all previously assigned partitions", id); - CountDownLatch commitDone = new CountDownLatch(1); - consumer.commitAsync((offsets, e) -> - { - commitDone.countDown(); - if (e == null) - { - log.error("{} - Could not commit offsets to Kafka!", id, e); - } - else - { - offsets.entrySet().stream().forEach(entry -> - { - log.info("{} - Commited offset for {}: {}", id, entry.getKey(), entry.getValue()); - }); - } - }); - partitions.forEach(tp -> { Integer partition = tp.partition(); log.info("{} - removing partition: {}", id, partition); this.partitions.remove(partition); - Map state = recordHandler.removePartition(partition); - for (String user : state.keySet()) + Long offset = consumer.position(tp); + if (commitsEnabled) { log.info( - "{} - Saved state for partition={}|user={}: {}", + "{} - Storing {} as offset of next message for partition {}", id, - partition, - user, - state.get(user)); + offset, + partition); + Map state = recordHandler.removePartition(partition); + for (String user : state.keySet()) + { + log.info( + "{} - Saved state for partition={}|user={}: {}", + id, + partition, + user, + state.get(user)); + } + Map> results = adderResults.removePartition(partition); + stateRepository.save(new StateDocument(partition, state, results, offset)); + } + else + { + log.info("{} - Offset commits are disabled! Last commit: {}", id, lastCommit); } - Map> results = adderResults.removePartition(partition); - stateRepository.save(new StateDocument(partition, state, results)); }); + } - try + + @Override + public void beforeNextPoll() + { + if (!commitsEnabled) { - log.debug("{} - Waiting for async commit to complete", id); - commitDone.await(); + log.info("{} - Offset commits are disabled! Last commit: {}", id, lastCommit); + return; } - catch (InterruptedException e) + + if (lastCommit.plus(commitInterval).isBefore(clock.instant())) { - log.warn( - "{} - Interrupted while waiting for async commit in onPartitionsRevoked({})", - id, - partitions, - e); + log.debug("{} - Storing data and offsets, last commit: {}", id, lastCommit); + partitions.forEach(partition -> stateRepository.save( + new StateDocument( + partition, + recordHandler.getState(partition).getState(), + adderResults.getState(partition), + consumer.position(new TopicPartition(topic, partition))))); + lastCommit = clock.instant(); } } + + @Override + public void enableCommits() + { + commitsEnabled = true; + } + + @Override + public void disableCommits() + { + commitsEnabled = false; + } }