import org.apache.kafka.common.errors.WakeupException;
import javax.annotation.PreDestroy;
-import java.time.Clock;
import java.time.Duration;
-import java.time.Instant;
import java.util.*;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
-import java.util.regex.Pattern;
@Slf4j
@RequiredArgsConstructor
-public class EndlessConsumer implements ConsumerRebalanceListener, Runnable
+public class EndlessConsumer<K, V> implements Runnable
{
- final static Pattern PATTERN = Pattern.compile("\\W+");
-
-
private final ExecutorService executor;
- private final PartitionStatisticsRepository repository;
private final String id;
private final String topic;
- private final Clock clock;
- private final Duration commitInterval;
- private final Consumer<String, String> consumer;
+ private final Consumer<K, V> consumer;
+ private final RebalanceListener rebalanceListener;
+ private final RecordHandler<K, V> recordHandler;
private final Lock lock = new ReentrantLock();
private final Condition condition = lock.newCondition();
private Exception exception;
private long consumed = 0;
- private final Map<Integer, Map<String, Map<String, Long>>> seen = new HashMap<>();
-
-
- @Override
- public void onPartitionsRevoked(Collection<TopicPartition> partitions)
- {
- partitions.forEach(tp ->
- {
- Integer partition = tp.partition();
- Long newOffset = consumer.position(tp);
- log.info(
- "{} - removing partition: {}, offset of next message {})",
- id,
- partition,
- newOffset);
- Map<String, Map<String, Long>> removed = seen.remove(partition);
- repository.save(new StatisticsDocument(partition, removed, consumer.position(tp)));
- });
- }
-
- @Override
- public void onPartitionsAssigned(Collection<TopicPartition> partitions)
- {
- partitions.forEach(tp ->
- {
- Integer partition = tp.partition();
- Long offset = consumer.position(tp);
- log.info("{} - adding partition: {}, offset={}", id, partition, offset);
- StatisticsDocument document =
- repository
- .findById(Integer.toString(partition))
- .orElse(new StatisticsDocument(partition));
- if (document.offset >= 0)
- {
- // Only seek, if a stored offset was found
- // Otherwise: Use initial offset, generated by Kafka
- consumer.seek(tp, document.offset);
- }
- seen.put(partition, document.statistics);
- });
- }
@Override
try
{
log.info("{} - Subscribing to topic {}", id, topic);
- consumer.subscribe(Arrays.asList(topic), this);
-
- Instant lastCommit = clock.instant();
+ rebalanceListener.enableCommits();
+ consumer.subscribe(Arrays.asList(topic), rebalanceListener);
while (true)
{
- ConsumerRecords<String, String> records =
+ ConsumerRecords<K, V> records =
consumer.poll(Duration.ofSeconds(1));
// Do something with the data...
log.info("{} - Received {} messages", id, records.count());
- for (ConsumerRecord<String, String> record : records)
+ for (ConsumerRecord<K, V> record : records)
{
log.info(
"{} - {}: {}/{} - {}={}",
record.value()
);
- consumed++;
+ recordHandler.accept(record);
- Integer partition = record.partition();
- String user = record.key();
- Map<String, Map<String, Long>> users = seen.get(partition);
-
- Map<String, Long> words = users.get(user);
- if (words == null)
- {
- words = new HashMap<>();
- users.put(user, words);
- }
-
- for (String word : PATTERN.split(record.value()))
- {
- Long num = words.get(word);
- if (num == null)
- {
- num = 1l;
- }
- else
- {
- num++;
- }
- words.put(word, num);
- }
- }
-
- if (lastCommit.plus(commitInterval).isBefore(clock.instant()))
- {
- log.debug("Storing data and offsets, last commit: {}", lastCommit);
- seen.forEach((partiton, statistics) -> repository.save(
- new StatisticsDocument(
- partiton,
- statistics,
- consumer.position(new TopicPartition(topic, partiton)))));
- lastCommit = clock.instant();
+ consumed++;
}
}
}
catch(WakeupException e)
{
- log.info("{} - RIIING! Request to stop consumption - commiting current offsets!", id);
+ log.info("{} - RIIING! Request to stop consumption.", id);
shutdown();
}
catch(RecordDeserializationException e)
}
catch(Exception e)
{
- log.error("{} - Unexpected error: {}", id, e.toString(), e);
+ log.error("{} - Unexpected error: {}, disabling commits", id, e.toString(), e);
+ rebalanceListener.disableCommits();
shutdown(e);
}
finally
}
}
- public Map<Integer, Map<String, Map<String, Long>>> getSeen()
- {
- return seen;
- }
-
public void start()
{
lock.lock();