package de.juplo.kafka;
-import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
public ApplicationRebalanceListener rebalanceListener(
ApplicationRecordHandler recordHandler,
StateRepository stateRepository,
- Consumer<String, String> consumer,
ApplicationProperties properties)
{
return new ApplicationRebalanceListener(
recordHandler,
stateRepository,
properties.getClientId(),
- properties.getTopic(),
Clock.systemDefaultZone(),
- properties.getCommitInterval(),
- consumer);
+ properties.getCommitInterval());
}
@Bean
props.put("partition.assignment.strategy", "org.apache.kafka.clients.consumer.CooperativeStickyAssignor");
props.put("group.id", properties.getGroupId());
props.put("client.id", properties.getClientId());
- props.put("enable.auto.commit", false);
props.put("auto.offset.reset", properties.getAutoOffsetReset());
props.put("metadata.max.age.ms", "1000");
props.put("key.deserializer", StringDeserializer.class.getName());
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
-import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.common.TopicPartition;
import java.time.Clock;
private final ApplicationRecordHandler recordHandler;
private final StateRepository stateRepository;
private final String id;
- private final String topic;
private final Clock clock;
private final Duration commitInterval;
- private final Consumer<String, String> consumer;
private Instant lastCommit = Instant.EPOCH;
- private boolean commitsEnabled = true;
@Override
public void onPartitionsAssigned(Collection<TopicPartition> partitions)
partitions.forEach(tp ->
{
Integer partition = tp.partition();
+ log.info("{} - adding partition: {}", id, partition);
StateDocument document =
stateRepository
.findById(Integer.toString(partition))
.orElse(new StateDocument(partition));
- log.info("{} - adding partition: {}, offset={}", id, partition, document.offset);
- if (document.offset >= 0)
- {
- // Only seek, if a stored offset was found
- // Otherwise: Use initial offset, generated by Kafka
- consumer.seek(tp, document.offset);
- }
recordHandler.addPartition(partition, document.state);
});
}
partitions.forEach(tp ->
{
Integer partition = tp.partition();
- Long offset = consumer.position(tp);
- log.info(
- "{} - removing partition: {}, offset of next message {})",
- id,
- partition,
- offset);
- if (commitsEnabled)
- {
- Map<String, Long> removed = recordHandler.removePartition(partition);
- stateRepository.save(new StateDocument(partition, removed, offset));
- }
- else
+ log.info("{} - removing partition: {}", id, partition);
+ Map<String, Long> removed = recordHandler.removePartition(partition);
+ for (String key : removed.keySet())
{
- log.info("Offset commits are disabled! Last commit: {}", lastCommit);
+ log.info(
+ "{} - Seen {} messages for partition={}|key={}",
+ id,
+ removed.get(key),
+ partition,
+ key);
}
+ stateRepository.save(new StateDocument(partition, removed));
});
}
@Override
public void beforeNextPoll()
{
- if (!commitsEnabled)
- {
- log.info("Offset commits are disabled! Last commit: {}", lastCommit);
- return;
- }
-
if (lastCommit.plus(commitInterval).isBefore(clock.instant()))
{
- log.debug("Storing data and offsets, last commit: {}", lastCommit);
+ log.debug("Storing data, last commit: {}", lastCommit);
recordHandler.getState().forEach((partiton, adder) -> stateRepository.save(
new StateDocument(
partiton,
- adder.getState(),
- consumer.position(new TopicPartition(topic, partiton)))));
+ adder.getState())));
lastCommit = clock.instant();
}
}
-
- @Override
- public void enableCommits()
- {
- commitsEnabled = true;
- }
-
- @Override
- public void disableCommits()
- {
- commitsEnabled = false;
- }
}
try
{
log.info("{} - Subscribing to topic {}", id, topic);
- rebalanceListener.enableCommits();
consumer.subscribe(Arrays.asList(topic), rebalanceListener);
while (true)
catch(WakeupException e)
{
log.info("{} - RIIING! Request to stop consumption - commiting current offsets!", id);
+ consumer.commitSync();
shutdown();
}
catch(RecordDeserializationException e)
offset,
e.getCause().toString());
+ consumer.commitSync();
shutdown(e);
}
catch(Exception e)
{
- log.error("{} - Unexpected error: {}, disabling commits", id, e.toString(), e);
- rebalanceListener.disableCommits();
+ log.error("{} - Unexpected error: {}", id, e.toString(), e);
shutdown(e);
}
finally
public interface PollIntervalAwareConsumerRebalanceListener extends ConsumerRebalanceListener
{
default void beforeNextPoll() {}
-
- default void enableCommits() {}
- default void disableCommits() {}
}
{
@Id
public String id;
- public long offset = -1l;
public Map<String, Long> state;
public StateDocument()
public StateDocument(
Integer partition,
- Map<String, Long> state,
- long offset)
+ Map<String, Long> state)
{
this.id = Integer.toString(partition);
this.state = state;
- this.offset = offset;
}
}
@Test
- public void testApplicationStartup()
+ public void testApplicationStartup()
{
restTemplate.getForObject(
"http://localhost:" + port + "/actuator/health",
@Autowired
ExecutorService executor;
@Autowired
- StateRepository stateRepository;
- @Autowired
PollIntervalAwareConsumerRebalanceListener rebalanceListener;
@Autowired
RecordHandler<K, V> recordHandler;
void seekToEnd()
{
offsetConsumer.assign(partitions());
+ offsetConsumer.seekToEnd(partitions());
partitions().forEach(tp ->
{
+ // seekToEnd() works lazily: it only takes effect on poll()/position()
Long offset = offsetConsumer.position(tp);
log.info("New position for {}: {}", tp, offset);
- Integer partition = tp.partition();
- StateDocument document =
- stateRepository
- .findById(partition.toString())
- .orElse(new StateDocument(partition));
- document.offset = offset;
- stateRepository.save(document);
});
+ // The new positions must be commited!
+ offsetConsumer.commitSync();
offsetConsumer.unsubscribe();
}
void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
{
- partitions().forEach(tp ->
- {
- String partition = Integer.toString(tp.partition());
- Optional<Long> offset = stateRepository.findById(partition).map(document -> document.offset);
- consumer.accept(tp, offset.orElse(0l));
- });
+ offsetConsumer.assign(partitions());
+ partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp)));
+ offsetConsumer.unsubscribe();
}
List<TopicPartition> partitions()