X-Git-Url: http://juplo.de/gitweb/?a=blobdiff_plain;f=src%2Fmain%2Fjava%2Fde%2Fjuplo%2Fkafka%2FEndlessConsumer.java;h=3d154c2cbd066e430d7f6f2c8ab49f41a3c9d0eb;hb=ce840f48340d55613291fca468bf10b834c473db;hp=2310ccd7fe7306f4b0e7f5f5081fdd16c5846380;hpb=f9c0ba7779552d8fcfc9cb29c8b689e20c314904;p=demos%2Fkafka%2Ftraining diff --git a/src/main/java/de/juplo/kafka/EndlessConsumer.java b/src/main/java/de/juplo/kafka/EndlessConsumer.java index 2310ccd..3d154c2 100644 --- a/src/main/java/de/juplo/kafka/EndlessConsumer.java +++ b/src/main/java/de/juplo/kafka/EndlessConsumer.java @@ -1,19 +1,15 @@ package de.juplo.kafka; +import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.apache.kafka.clients.consumer.ConsumerRecords; -import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.clients.consumer.*; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.errors.RecordDeserializationException; import org.apache.kafka.common.errors.WakeupException; -import org.apache.kafka.common.serialization.StringDeserializer; import javax.annotation.PreDestroy; import java.time.Duration; -import java.util.Arrays; -import java.util.HashMap; -import java.util.Map; -import java.util.Optional; -import java.util.Properties; +import java.util.*; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.locks.Condition; @@ -22,73 +18,86 @@ import java.util.concurrent.locks.ReentrantLock; @Slf4j -public class EndlessConsumer implements Runnable +@RequiredArgsConstructor +public class EndlessConsumer implements ConsumerRebalanceListener, Runnable { private final ExecutorService executor; - private final String bootstrapServer; - private final String groupId; + private final PartitionStatisticsRepository repository; private final String id; private final String topic; - private final String autoOffsetReset; + private final Consumer consumer; + private final java.util.function.Consumer> handler; private final Lock lock = new ReentrantLock(); private final Condition condition = lock.newCondition(); private boolean running = false; private Exception exception; private long consumed = 0; - private KafkaConsumer consumer = null; + private final Map> seen = new HashMap<>(); - private Map> seen; + @Override + public void onPartitionsRevoked(Collection partitions) + { + partitions.forEach(tp -> + { + Integer partition = tp.partition(); + Long newOffset = consumer.position(tp); + log.info( + "{} - removing partition: {}, offset of next message {})", + id, + partition, + newOffset); + Map removed = seen.remove(partition); + for (String key : removed.keySet()) + { + log.info( + "{} - Seen {} messages for partition={}|key={}", + id, + removed.get(key), + partition, + key); + } + repository.save(new StatisticsDocument(partition, removed, consumer.position(tp))); + }); + } - public EndlessConsumer( - ExecutorService executor, - String bootstrapServer, - String groupId, - String clientId, - String topic, - String autoOffsetReset) + @Override + public void onPartitionsAssigned(Collection partitions) { - this.executor = executor; - this.bootstrapServer = bootstrapServer; - this.groupId = groupId; - this.id = clientId; - this.topic = topic; - this.autoOffsetReset = autoOffsetReset; + partitions.forEach(tp -> + { + Integer partition = tp.partition(); + Long offset = consumer.position(tp); + log.info("{} - adding partition: {}, offset={}", id, partition, offset); + StatisticsDocument document = + repository + .findById(Integer.toString(partition)) + .orElse(new StatisticsDocument(partition)); + consumer.seek(tp, document.offset); + seen.put(partition, document.statistics); + }); } + @Override public void run() { try { - Properties props = new Properties(); - props.put("bootstrap.servers", bootstrapServer); - props.put("group.id", groupId); - props.put("client.id", id); - props.put("auto.offset.reset", autoOffsetReset); - props.put("metadata.max.age.ms", "1000"); - props.put("key.deserializer", StringDeserializer.class.getName()); - props.put("value.deserializer", StringDeserializer.class.getName()); - - this.consumer = new KafkaConsumer<>(props); - log.info("{} - Subscribing to topic {}", id, topic); - consumer.subscribe(Arrays.asList(topic)); - - seen = new HashMap<>(); + consumer.subscribe(Arrays.asList(topic), this); while (true) { - ConsumerRecords records = + ConsumerRecords records = consumer.poll(Duration.ofSeconds(1)); // Do something with the data... log.info("{} - Received {} messages", id, records.count()); - for (ConsumerRecord record : records) + for (ConsumerRecord record : records) { - consumed++; log.info( "{} - {}: {}/{} - {}={}", id, @@ -99,28 +108,47 @@ public class EndlessConsumer implements Runnable record.value() ); - Integer partition = record.partition(); - String key = record.key() == null ? "NULL" : record.key(); + handler.accept(record); - if (!seen.containsKey(partition)) - seen.put(partition, new HashMap<>()); + consumed++; - Map byKey = seen.get(partition); + Integer partition = record.partition(); + String key = record.key() == null ? "NULL" : record.key().toString(); + Map byKey = seen.get(partition); if (!byKey.containsKey(key)) - byKey.put(key, 0); + byKey.put(key, 0l); - int seenByKey = byKey.get(key); + long seenByKey = byKey.get(key); seenByKey++; byKey.put(key, seenByKey); } + + seen.forEach((partiton, statistics) -> repository.save( + new StatisticsDocument( + partiton, + statistics, + consumer.position(new TopicPartition(topic, partiton))))); } } catch(WakeupException e) { - log.info("{} - RIIING!", id); + log.info("{} - RIIING! Request to stop consumption - commiting current offsets!", id); shutdown(); } + catch(RecordDeserializationException e) + { + TopicPartition tp = e.topicPartition(); + long offset = e.offset(); + log.error( + "{} - Could not deserialize message on topic {} with offset={}: {}", + id, + tp, + offset, + e.getCause().toString()); + + shutdown(e); + } catch(Exception e) { log.error("{} - Unexpected error: {}", id, e.toString(), e); @@ -128,33 +156,10 @@ public class EndlessConsumer implements Runnable } finally { - log.info("{} - Closing the KafkaConsumer", id); - consumer.close(); - - for (Integer partition : seen.keySet()) - { - Map byKey = seen.get(partition); - for (String key : byKey.keySet()) - { - log.info( - "{} - Seen {} messages for partition={}|key={}", - id, - byKey.get(key), - partition, - key); - } - } - seen = null; - log.info("{} - Consumer-Thread exiting", id); } } - public Map> getSeen() - { - return seen; - } - private void shutdown() { shutdown(null); @@ -165,9 +170,25 @@ public class EndlessConsumer implements Runnable lock.lock(); try { - running = false; - exception = e; - condition.signal(); + try + { + log.info("{} - Unsubscribing from topic {}", id, topic); + consumer.unsubscribe(); + } + catch (Exception ue) + { + log.error( + "{} - Error while unsubscribing from topic {}: {}", + id, + topic, + ue.toString()); + } + finally + { + running = false; + exception = e; + condition.signal(); + } } finally { @@ -175,6 +196,11 @@ public class EndlessConsumer implements Runnable } } + public Map> getSeen() + { + return seen; + } + public void start() { lock.lock(); @@ -217,22 +243,7 @@ public class EndlessConsumer implements Runnable public void destroy() throws ExecutionException, InterruptedException { log.info("{} - Destroy!", id); - try - { - stop(); - } - catch (IllegalStateException e) - { - log.info("{} - Was already stopped", id); - } - catch (Exception e) - { - log.error("{} - Unexpected exception while trying to stop the consumer", id, e); - } - finally - { - log.info("{}: Consumed {} messages in total, exiting!", id, consumed); - } + log.info("{}: Consumed {} messages in total, exiting!", id, consumed); } public boolean running()