import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
+import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.common.TopicPartition;
import java.time.Clock;
import java.time.Duration;
import java.time.Instant;
-import java.util.Collection;
-import java.util.Map;
+import java.util.*;
@RequiredArgsConstructor
public class ApplicationRebalanceListener implements PollIntervalAwareConsumerRebalanceListener
{
private final ApplicationRecordHandler recordHandler;
+ private final AdderResults adderResults;
private final StateRepository stateRepository;
private final String id;
+ private final String topic;
private final Clock clock;
private final Duration commitInterval;
+ private final Consumer<String, String> consumer;
+
+ private final Set<Integer> partitions = new HashSet<>();
private Instant lastCommit = Instant.EPOCH;
+ private boolean commitsEnabled = true;
@Override
public void onPartitionsAssigned(Collection<TopicPartition> partitions)
partitions.forEach(tp ->
{
Integer partition = tp.partition();
- log.info("{} - adding partition: {}", id, partition);
+ this.partitions.add(partition);
StateDocument document =
stateRepository
.findById(Integer.toString(partition))
.orElse(new StateDocument(partition));
+ log.info("{} - adding partition: {}, offset={}", id, partition, document.offset);
+ if (document.offset >= 0)
+ {
+ // Only seek, if a stored offset was found
+ // Otherwise: Use initial offset, generated by Kafka
+ consumer.seek(tp, document.offset);
+ }
recordHandler.addPartition(partition, document.state);
+ adderResults.addPartition(partition, document.results);
});
}
partitions.forEach(tp ->
{
Integer partition = tp.partition();
- log.info("{} - removing partition: {}", id, partition);
- Map<String, Long> removed = recordHandler.removePartition(partition);
- for (String key : removed.keySet())
+ this.partitions.remove(partition);
+ Long offset = consumer.position(tp);
+ log.info(
+ "{} - removing partition: {}, offset of next message {})",
+ id,
+ partition,
+ offset);
+ if (commitsEnabled)
+ {
+ Map<String, AdderResult> state = recordHandler.removePartition(partition);
+ Map<String, List<AdderResult>> results = adderResults.removePartition(partition);
+ stateRepository.save(new StateDocument(partition, state, results, offset));
+ }
+ else
{
- log.info(
- "{} - Seen {} messages for partition={}|key={}",
- id,
- removed.get(key),
- partition,
- key);
+ log.info("Offset commits are disabled! Last commit: {}", lastCommit);
}
- stateRepository.save(new StateDocument(partition, removed));
});
}
@Override
public void beforeNextPoll()
{
+ if (!commitsEnabled)
+ {
+ log.info("Offset commits are disabled! Last commit: {}", lastCommit);
+ return;
+ }
+
if (lastCommit.plus(commitInterval).isBefore(clock.instant()))
{
- log.debug("Storing data, last commit: {}", lastCommit);
- recordHandler.getState().forEach((partiton, adder) -> stateRepository.save(
+ log.debug("Storing data and offsets, last commit: {}", lastCommit);
+ partitions.forEach(partition -> stateRepository.save(
new StateDocument(
- partiton,
- adder.getState())));
+ partition,
+ recordHandler.getState(partition).getState(),
+ adderResults.getState(partition),
+ consumer.position(new TopicPartition(topic, partition)))));
lastCommit = clock.instant();
}
}
+
+ @Override
+ public void enableCommits()
+ {
+ commitsEnabled = true;
+ }
+
+ @Override
+ public void disableCommits()
+ {
+ commitsEnabled = false;
+ }
}