X-Git-Url: https://juplo.de/gitweb/?a=blobdiff_plain;f=src%2Fmain%2Fjava%2Fde%2Fjuplo%2Fkafka%2FEndlessConsumer.java;h=9ea944b9039812dd3c0a6d4f1dcd10286d1f67bf;hb=eba11d4859d1e2b936bcd9e8075986b5179b32ea;hp=bc3d3571ad4ab7c450c5c7408f5ebeafe32ed22e;hpb=262d13bfa0274005b050c7b271f867acd9809f8b;p=demos%2Fkafka%2Ftraining diff --git a/src/main/java/de/juplo/kafka/EndlessConsumer.java b/src/main/java/de/juplo/kafka/EndlessConsumer.java index bc3d357..9ea944b 100644 --- a/src/main/java/de/juplo/kafka/EndlessConsumer.java +++ b/src/main/java/de/juplo/kafka/EndlessConsumer.java @@ -1,120 +1,53 @@ package de.juplo.kafka; +import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; -import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.apache.kafka.clients.consumer.ConsumerRecords; -import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.clients.consumer.*; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.errors.RecordDeserializationException; import org.apache.kafka.common.errors.WakeupException; -import org.apache.kafka.common.serialization.StringDeserializer; +import org.springframework.context.ConfigurableApplicationContext; -import javax.annotation.PreDestroy; import java.time.Duration; import java.util.*; -import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; -import java.util.concurrent.locks.Condition; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; @Slf4j -public class EndlessConsumer implements Runnable +@RequiredArgsConstructor +public class EndlessConsumer implements Runnable { private final ExecutorService executor; - private final String bootstrapServer; - private final String groupId; + private final ConfigurableApplicationContext applicationContext; private final String id; private final String topic; - private final String autoOffsetReset; + private final Consumer consumer; + private final ConsumerRebalanceListener rebalanceListener; + private final RecordHandler recordHandler; - private final Lock lock = new ReentrantLock(); - private final Condition condition = lock.newCondition(); private boolean running = false; - private Exception exception; + private Exception exception = null; private long consumed = 0; - private KafkaConsumer consumer = null; - private final Map> seen = new HashMap<>(); - - - public EndlessConsumer( - ExecutorService executor, - String bootstrapServer, - String groupId, - String clientId, - String topic, - String autoOffsetReset) - { - this.executor = executor; - this.bootstrapServer = bootstrapServer; - this.groupId = groupId; - this.id = clientId; - this.topic = topic; - this.autoOffsetReset = autoOffsetReset; - } @Override public void run() { try { - Properties props = new Properties(); - props.put("bootstrap.servers", bootstrapServer); - props.put("group.id", groupId); - props.put("client.id", id); - props.put("auto.offset.reset", autoOffsetReset); - props.put("metadata.max.age.ms", "1000"); - props.put("key.deserializer", StringDeserializer.class.getName()); - props.put("value.deserializer", StringDeserializer.class.getName()); - - this.consumer = new KafkaConsumer<>(props); - log.info("{} - Subscribing to topic {}", id, topic); - consumer.subscribe(Arrays.asList(topic), new ConsumerRebalanceListener() - { - @Override - public void onPartitionsRevoked(Collection partitions) - { - partitions.forEach(tp -> - { - log.info("{} - removing partition: {}", id, tp); - Map removed = seen.remove(tp.partition()); - for (String key : removed.keySet()) - { - log.info( - "{} - Seen {} messages for partition={}|key={}", - id, - removed.get(key), - tp.partition(), - key); - } - }); - } - - @Override - public void onPartitionsAssigned(Collection partitions) - { - partitions.forEach(tp -> - { - log.info("{} - adding partition: {}", id, tp); - seen.put(tp.partition(), new HashMap<>()); - }); - } - }); + consumer.subscribe(Arrays.asList(topic), rebalanceListener); while (true) { - ConsumerRecords records = + ConsumerRecords records = consumer.poll(Duration.ofSeconds(1)); // Do something with the data... log.info("{} - Received {} messages", id, records.count()); - for (ConsumerRecord record : records) + for (ConsumerRecord record : records) { - consumed++; log.info( "{} - {}: {}/{} - {}={}", id, @@ -125,148 +58,70 @@ public class EndlessConsumer implements Runnable record.value() ); - Integer partition = record.partition(); - String key = record.key() == null ? "NULL" : record.key(); - Map byKey = seen.get(partition); + recordHandler.accept(record); - if (!byKey.containsKey(key)) - byKey.put(key, 0l); - - long seenByKey = byKey.get(key); - seenByKey++; - byKey.put(key, seenByKey); + consumed++; } } } catch(WakeupException e) { - log.info("{} - RIIING!", id); - shutdown(); + log.info("{} - RIIING! Request to stop consumption - commiting current offsets!", id); + } + catch(RecordDeserializationException e) + { + TopicPartition tp = e.topicPartition(); + long offset = e.offset(); + log.error( + "{} - Could not deserialize message on topic {} with offset={}: {}", + id, + tp, + offset, + e.getCause().toString()); + this.exception = e; } catch(Exception e) { log.error("{} - Unexpected error: {}", id, e.toString(), e); - shutdown(e); + this.exception = e; + log.info("{} - Unsubscribing...", id); + consumer.unsubscribe(); } finally { - log.info("{} - Closing the KafkaConsumer", id); + running = false; + log.info("{} - Closing the consumer...", id); consumer.close(); + log.info("{} - Shutting down the app...", id); + applicationContext.close(); log.info("{} - Consumer-Thread exiting", id); } } - private void shutdown() - { - shutdown(null); - } - - private void shutdown(Exception e) - { - lock.lock(); - try - { - running = false; - exception = e; - condition.signal(); - } - finally - { - lock.unlock(); - } - } - - public Map> getSeen() - { - return seen; - } - public void start() { - lock.lock(); - try - { - if (running) - throw new IllegalStateException("Consumer instance " + id + " is already running!"); + if (running) + throw new IllegalStateException("Consumer instance " + id + " is already running!"); - log.info("{} - Starting - consumed {} messages before", id, consumed); - running = true; - exception = null; - executor.submit(this); - } - finally - { - lock.unlock(); - } - } - - public synchronized void stop() throws ExecutionException, InterruptedException - { - lock.lock(); - try - { - if (!running) - throw new IllegalStateException("Consumer instance " + id + " is not running!"); - - log.info("{} - Stopping", id); - consumer.wakeup(); - condition.await(); - log.info("{} - Stopped - consumed {} messages so far", id, consumed); - } - finally - { - lock.unlock(); - } + log.info("{} - Starting - consumed {} messages before", id, consumed); + running = true; + executor.submit(this); } - @PreDestroy - public void destroy() throws ExecutionException, InterruptedException + public void stop() { - log.info("{} - Destroy!", id); - try - { - stop(); - } - catch (IllegalStateException e) - { - log.info("{} - Was already stopped", id); - } - catch (Exception e) - { - log.error("{} - Unexpected exception while trying to stop the consumer", id, e); - } - finally - { - log.info("{}: Consumed {} messages in total, exiting!", id, consumed); - } + consumer.wakeup(); } - public boolean running() { - lock.lock(); - try - { - return running; - } - finally - { - lock.unlock(); - } + return running; } public Optional exitStatus() { - lock.lock(); - try - { - if (running) - throw new IllegalStateException("No exit-status available: Consumer instance " + id + " is running!"); + if (running) + throw new IllegalStateException("No exit-status available: Consumer instance " + id + " is running!"); - return Optional.ofNullable(exception); - } - finally - { - lock.unlock(); - } + return Optional.ofNullable(exception); } }