package de.juplo.kafka;
-import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
ApplicationRecordHandler recordHandler,
AdderResults adderResults,
StateRepository stateRepository,
- Consumer<String, String> consumer,
ApplicationProperties properties)
{
return new ApplicationRebalanceListener(
adderResults,
stateRepository,
properties.getClientId(),
- properties.getTopic(),
Clock.systemDefaultZone(),
- properties.getCommitInterval(),
- consumer);
+ properties.getCommitInterval());
}
@Bean
props.put("partition.assignment.strategy", "org.apache.kafka.clients.consumer.CooperativeStickyAssignor");
props.put("group.id", properties.getGroupId());
props.put("client.id", properties.getClientId());
- props.put("enable.auto.commit", false);
props.put("auto.offset.reset", properties.getAutoOffsetReset());
+ props.put("auto.commit.interval.ms", (int)properties.getCommitInterval().toMillis());
props.put("metadata.max.age.ms", "1000");
props.put("key.deserializer", StringDeserializer.class.getName());
props.put("value.deserializer", StringDeserializer.class.getName());
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
-import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.common.TopicPartition;
import java.time.Clock;
private final AdderResults adderResults;
private final StateRepository stateRepository;
private final String id;
- private final String topic;
private final Clock clock;
private final Duration commitInterval;
- private final Consumer<String, String> consumer;
private final Set<Integer> partitions = new HashSet<>();
private Instant lastCommit = Instant.EPOCH;
- private boolean commitsEnabled = true;
@Override
public void onPartitionsAssigned(Collection<TopicPartition> partitions)
partitions.forEach(tp ->
{
Integer partition = tp.partition();
+ log.info("{} - adding partition: {}", id, partition);
this.partitions.add(partition);
StateDocument document =
stateRepository
.findById(Integer.toString(partition))
.orElse(new StateDocument(partition));
- log.info("{} - adding partition: {}, offset={}", id, partition, document.offset);
- if (document.offset >= 0)
- {
- // Only seek, if a stored offset was found
- // Otherwise: Use initial offset, generated by Kafka
- consumer.seek(tp, document.offset);
- }
recordHandler.addPartition(partition, document.state);
adderResults.addPartition(partition, document.results);
});
partitions.forEach(tp ->
{
Integer partition = tp.partition();
+ log.info("{} - removing partition: {}", id, partition);
this.partitions.remove(partition);
- Long offset = consumer.position(tp);
- log.info(
- "{} - removing partition: {}, offset of next message {})",
- id,
- partition,
- offset);
- if (commitsEnabled)
- {
- Map<String, AdderResult> state = recordHandler.removePartition(partition);
- Map<String, List<AdderResult>> results = adderResults.removePartition(partition);
- stateRepository.save(new StateDocument(partition, state, results, offset));
- }
- else
+ Map<String, AdderResult> state = recordHandler.removePartition(partition);
+ for (String key : state.keySet())
{
- log.info("Offset commits are disabled! Last commit: {}", lastCommit);
+ log.info(
+ "{} - Seen {} messages for partition={}|key={}",
+ id,
+ state.get(key),
+ partition,
+ key);
}
+ Map<String, List<AdderResult>> results = adderResults.removePartition(partition);
+ stateRepository.save(new StateDocument(partition, state, results));
});
}
@Override
public void beforeNextPoll()
{
- if (!commitsEnabled)
- {
- log.info("Offset commits are disabled! Last commit: {}", lastCommit);
- return;
- }
-
if (lastCommit.plus(commitInterval).isBefore(clock.instant()))
{
- log.debug("Storing data and offsets, last commit: {}", lastCommit);
+ log.debug("Storing data, last commit: {}", lastCommit);
partitions.forEach(partition -> stateRepository.save(
new StateDocument(
partition,
recordHandler.getState(partition).getState(),
- adderResults.getState(partition),
- consumer.position(new TopicPartition(topic, partition)))));
+ adderResults.getState(partition))));
lastCommit = clock.instant();
}
}
-
- @Override
- public void enableCommits()
- {
- commitsEnabled = true;
- }
-
- @Override
- public void disableCommits()
- {
- commitsEnabled = false;
- }
}
try
{
log.info("{} - Subscribing to topic {}", id, topic);
- rebalanceListener.enableCommits();
consumer.subscribe(Arrays.asList(topic), rebalanceListener);
while (true)
catch(WakeupException e)
{
log.info("{} - RIIING! Request to stop consumption - commiting current offsets!", id);
+ consumer.commitSync();
shutdown();
}
catch(RecordDeserializationException e)
offset,
e.getCause().toString());
+ consumer.commitSync();
shutdown(e);
}
catch(Exception e)
{
- log.error("{} - Unexpected error: {}, disabling commits", id, e.toString(), e);
- rebalanceListener.disableCommits();
+ log.error("{} - Unexpected error: {}", id, e.toString(), e);
shutdown(e);
}
finally
public interface PollIntervalAwareConsumerRebalanceListener extends ConsumerRebalanceListener
{
default void beforeNextPoll() {}
-
- default void enableCommits() {}
- default void disableCommits() {}
}
{
@Id
public String id;
- public long offset = -1l;
public Map<String, AdderResult> state;
public Map<String, List<AdderResult>> results;
public StateDocument(
Integer partition,
Map<String, AdderResult> state,
- Map<String, List<AdderResult>> results,
- long offset)
+ Map<String, List<AdderResult>> results)
{
this.id = Integer.toString(partition);
this.state = state;
this.results = results;
- this.offset = offset;
}
}
@Test
- public void testApplicationStartup()
+ public void testApplicationStartup()
{
restTemplate.getForObject(
"http://localhost:" + port + "/actuator/health",
package de.juplo.kafka;
+import com.mongodb.client.MongoClient;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.junit.jupiter.api.*;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
+import org.springframework.boot.autoconfigure.mongo.MongoProperties;
import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo;
import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
import org.springframework.boot.test.context.TestConfiguration;
@Autowired
ExecutorService executor;
@Autowired
- StateRepository stateRepository;
+ MongoClient mongoClient;
+ @Autowired
+ MongoProperties mongoProperties;
@Autowired
PollIntervalAwareConsumerRebalanceListener rebalanceListener;
@Autowired
void seekToEnd()
{
offsetConsumer.assign(partitions());
+ offsetConsumer.seekToEnd(partitions());
partitions().forEach(tp ->
{
+ // seekToEnd() works lazily: it only takes effect on poll()/position()
Long offset = offsetConsumer.position(tp);
log.info("New position for {}: {}", tp, offset);
- Integer partition = tp.partition();
- StateDocument document =
- stateRepository
- .findById(partition.toString())
- .orElse(new StateDocument(partition));
- document.offset = offset;
- stateRepository.save(document);
});
+ // The new positions must be commited!
+ offsetConsumer.commitSync();
offsetConsumer.unsubscribe();
}
void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
{
- partitions().forEach(tp ->
- {
- String partition = Integer.toString(tp.partition());
- Optional<Long> offset = stateRepository.findById(partition).map(document -> document.offset);
- consumer.accept(tp, offset.orElse(0l));
- });
+ offsetConsumer.assign(partitions());
+ partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp)));
+ offsetConsumer.unsubscribe();
}
List<TopicPartition> partitions()
}
};
+ mongoClient.getDatabase(mongoProperties.getDatabase()).drop();
+
endlessConsumer =
new EndlessConsumer<>(
executor,