import org.apache.kafka.common.errors.WakeupException;
import javax.annotation.PreDestroy;
+import java.time.Clock;
import java.time.Duration;
+import java.time.Instant;
import java.util.*;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
public class EndlessConsumer<K, V> implements ConsumerRebalanceListener, Runnable
{
private final ExecutorService executor;
+ private final PartitionStatisticsRepository repository;
private final String id;
private final String topic;
+ private final Clock clock;
+ private final Duration commitInterval;
private final Consumer<K, V> consumer;
private final java.util.function.Consumer<ConsumerRecord<K, V>> handler;
private long consumed = 0;
private final Map<Integer, Map<String, Long>> seen = new HashMap<>();
- private final Map<Integer, Long> offsets = new HashMap<>();
@Override
{
Integer partition = tp.partition();
Long newOffset = consumer.position(tp);
- Long oldOffset = offsets.remove(partition);
log.info(
- "{} - removing partition: {}, consumed {} records (offset {} -> {})",
+ "{} - removing partition: {}, offset of next message {})",
id,
partition,
- newOffset - oldOffset,
- oldOffset,
newOffset);
Map<String, Long> removed = seen.remove(partition);
for (String key : removed.keySet())
partition,
key);
}
+ repository.save(new StatisticsDocument(partition, removed, consumer.position(tp)));
});
}
Integer partition = tp.partition();
Long offset = consumer.position(tp);
log.info("{} - adding partition: {}, offset={}", id, partition, offset);
- offsets.put(partition, offset);
- seen.put(partition, new HashMap<>());
+ StatisticsDocument document =
+ repository
+ .findById(Integer.toString(partition))
+ .orElse(new StatisticsDocument(partition));
+ if (document.offset >= 0)
+ {
+ // Only seek, if a stored offset was found
+ // Otherwise: Use initial offset, generated by Kafka
+ consumer.seek(tp, document.offset);
+ }
+ seen.put(partition, document.statistics);
});
}
log.info("{} - Subscribing to topic {}", id, topic);
consumer.subscribe(Arrays.asList(topic), this);
+ Instant lastCommit = clock.instant();
+
while (true)
{
ConsumerRecords<K, V> records =
seenByKey++;
byKey.put(key, seenByKey);
}
+
+ if (lastCommit.plus(commitInterval).isBefore(clock.instant()))
+ {
+ log.debug("Storing data and offsets, last commit: {}", lastCommit);
+ seen.forEach((partiton, statistics) -> repository.save(
+ new StatisticsDocument(
+ partiton,
+ statistics,
+ consumer.position(new TopicPartition(topic, partiton)))));
+ lastCommit = clock.instant();
+ }
}
}
catch(WakeupException e)
{
log.info("{} - RIIING! Request to stop consumption - commiting current offsets!", id);
- consumer.commitSync();
shutdown();
}
catch(RecordDeserializationException e)
offset,
e.getCause().toString());
- consumer.commitSync();
shutdown(e);
}
catch(Exception e)
public void destroy() throws ExecutionException, InterruptedException
{
log.info("{} - Destroy!", id);
- try
- {
- stop();
- }
- catch (IllegalStateException e)
- {
- log.info("{} - Was already stopped", id);
- }
- catch (Exception e)
- {
- log.error("{} - Unexpected exception while trying to stop the consumer", id, e);
- }
- finally
- {
- log.info("{}: Consumed {} messages in total, exiting!", id, consumed);
- }
+ log.info("{}: Consumed {} messages in total, exiting!", id, consumed);
}
public boolean running()