X-Git-Url: http://juplo.de/gitweb/?a=blobdiff_plain;f=src%2Fmain%2Fjava%2Fde%2Fjuplo%2Fkafka%2FApplicationRebalanceListener.java;h=86fe68af6005b8bbf8eccec1a0612acedd98d27f;hb=refs%2Ftags%2Fsumup-adder--drop-duplicates---lvm-2-tage;hp=9e75112516bb9129720bcbedbcd4c045889c0b3b;hpb=12c1ce703ef76b75d995f94d1689c894dde1406a;p=demos%2Fkafka%2Ftraining diff --git a/src/main/java/de/juplo/kafka/ApplicationRebalanceListener.java b/src/main/java/de/juplo/kafka/ApplicationRebalanceListener.java index 9e75112..86fe68a 100644 --- a/src/main/java/de/juplo/kafka/ApplicationRebalanceListener.java +++ b/src/main/java/de/juplo/kafka/ApplicationRebalanceListener.java @@ -2,22 +2,15 @@ package de.juplo.kafka; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; -import org.apache.kafka.clients.consumer.Consumer; -import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; import org.apache.kafka.common.TopicPartition; -import java.time.Clock; -import java.time.Duration; -import java.time.Instant; import java.util.*; -import java.util.concurrent.CountDownLatch; @RequiredArgsConstructor @Slf4j -public class ApplicationRebalanceListener implements ConsumerRebalanceListener +public class ApplicationRebalanceListener implements RebalanceListener { - private final Consumer consumer; private final ApplicationRecordHandler recordHandler; private final AdderResults adderResults; private final StateRepository stateRepository; @@ -37,7 +30,12 @@ public class ApplicationRebalanceListener implements ConsumerRebalanceListener stateRepository .findById(Integer.toString(partition)) .orElse(new StateDocument(partition)); - recordHandler.addPartition(partition, document.state); + log.info( + "{} - Offset of next unseen message for partition {}: {}", + id, + partition, + document.offset); + recordHandler.addPartition(partition, document.state, document.offset); for (String user : document.state.keySet()) { log.info( @@ -54,55 +52,66 @@ public class ApplicationRebalanceListener implements ConsumerRebalanceListener @Override public void onPartitionsRevoked(Collection partitions) { - log.info("{} - Commiting offsets for all previously assigned partitions", id); - CountDownLatch commitDone = new CountDownLatch(1); - consumer.commitAsync((offsets, e) -> - { - commitDone.countDown(); - if (e == null) - { - log.error("{} - Could not commit offsets to Kafka!", id, e); - } - else - { - offsets.entrySet().stream().forEach(entry -> - { - log.info("{} - Commited offset for {}: {}", id, entry.getKey(), entry.getValue()); - }); - } - }); - partitions.forEach(tp -> { Integer partition = tp.partition(); log.info("{} - removing partition: {}", id, partition); this.partitions.remove(partition); - Map state = recordHandler.removePartition(partition); - for (String user : state.keySet()) + ApplicationState state = recordHandler.removePartition(partition); + log.info( + "{} - offset of next unseen message for partition {} is {}", + id, + partition, + state.getOffset()); + for (String user : state.getAdderState().keySet()) { log.info( "{} - Saved state for partition={}|user={}: {}", id, partition, user, - state.get(user)); + state.getAdderState().get(user)); } Map> results = adderResults.removePartition(partition); - stateRepository.save(new StateDocument(partition, state, results)); + stateRepository.save( + new StateDocument( + partition, + state.getAdderState(), + results, + state.getOffset())); }); + } - try - { - log.debug("{} - Waiting for async commit to complete", id); - commitDone.await(); - } - catch (InterruptedException e) - { - log.warn( - "{} - Interrupted while waiting for async commit in onPartitionsRevoked({})", - id, - partitions, - e); - } + @Override + public void beforeNextPoll() + { + partitions + .stream() + .forEach(partition -> + { + log.info("{} - persisting state & offset for partition: {}", id, partition); + ApplicationState state = recordHandler.getState(partition); + log.info( + "{} - offset of next unseen message for partition {} is {}", + id, + partition, + state.getOffset()); + for (String user : state.getAdderState().keySet()) + { + log.info( + "{} - Saved state for partition={}|user={}: {}", + id, + partition, + user, + state.getAdderState().get(user)); + } + Map> results = adderResults.getState(partition); + stateRepository.save( + new StateDocument( + partition, + state.getAdderState(), + results, + state.getOffset())); + }); } }