X-Git-Url: http://juplo.de/gitweb/?a=blobdiff_plain;f=src%2Fmain%2Fjava%2Fde%2Fjuplo%2Fkafka%2FEndlessConsumer.java;h=04a0a3af8e112e55f7c34a60fe785713c27283b8;hb=1709f0e4f41be7e3b955d19769697a517633827d;hp=22dce95ee17a7c6c9f089bbe5e9377fe8bdcdba3;hpb=e87f4bb2bc188252955fb4932ddd99161ba621d3;p=demos%2Fkafka%2Ftraining diff --git a/src/main/java/de/juplo/kafka/EndlessConsumer.java b/src/main/java/de/juplo/kafka/EndlessConsumer.java index 22dce95..04a0a3a 100644 --- a/src/main/java/de/juplo/kafka/EndlessConsumer.java +++ b/src/main/java/de/juplo/kafka/EndlessConsumer.java @@ -1,145 +1,130 @@ package de.juplo.kafka; +import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.apache.kafka.clients.consumer.ConsumerRecords; -import org.apache.kafka.clients.consumer.KafkaConsumer; -import org.apache.kafka.common.errors.WakeupException; -import org.apache.kafka.common.serialization.StringDeserializer; +import org.apache.kafka.common.TopicPartition; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.kafka.annotation.KafkaListener; +import org.springframework.kafka.config.KafkaListenerEndpointRegistry; +import org.springframework.kafka.listener.ConsumerAwareRebalanceListener; +import org.springframework.stereotype.Component; -import javax.annotation.PreDestroy; -import java.time.Duration; -import java.util.Arrays; -import java.util.Properties; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; -import java.util.concurrent.atomic.AtomicBoolean; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Consumer; +@Component @Slf4j -public class EndlessConsumer implements Runnable +@RequiredArgsConstructor +public class EndlessConsumer implements ConsumerAwareRebalanceListener { - private final ExecutorService executor; - private final String bootstrapServer; - private final String groupId; - private final String id; - private final String topic; + @Autowired + private KafkaListenerEndpointRegistry registry; + @Value("${spring.kafka.consumer.client-id}") + String id; + @Autowired + Consumer> handler; - private AtomicBoolean running = new AtomicBoolean(); private long consumed = 0; - private KafkaConsumer consumer = null; - private Future future = null; - - public EndlessConsumer( - ExecutorService executor, - String bootstrapServer, - String groupId, - String clientId, - String topic) - { - this.executor = executor; - this.bootstrapServer = bootstrapServer; - this.groupId = groupId; - this.id = clientId; - this.topic = topic; - } - @Override - public void run() - { - Properties props = new Properties(); - props.put("bootstrap.servers", bootstrapServer); - props.put("group.id", groupId); - props.put("client.id", id); - props.put("auto.offset.reset", "earliest"); - props.put("key.deserializer", StringDeserializer.class.getName()); - props.put("value.deserializer", StringDeserializer.class.getName()); + private final Map> seen = new HashMap<>(); + private final Map offsets = new HashMap<>(); - this.consumer = new KafkaConsumer<>(props); - try + @Override + public void onPartitionsRevokedBeforeCommit( + org.apache.kafka.clients.consumer.Consumer consumer, + Collection partitions) + { + partitions.forEach(tp -> { - log.info("{} - Subscribing to topic {}", id, topic); - consumer.subscribe(Arrays.asList(topic)); - - while (true) + Integer partition = tp.partition(); + Long newOffset = consumer.position(tp); + Long oldOffset = offsets.remove(partition); + log.info( + "{} - removing partition: {}, consumed {} records (offset {} -> {})", + id, + partition, + newOffset - oldOffset, + oldOffset, + newOffset); + Map removed = seen.remove(partition); + for (String key : removed.keySet()) { - ConsumerRecords records = - consumer.poll(Duration.ofSeconds(1)); - - // Do something with the data... - log.info("{} - Received {} messages", id, records.count()); - for (ConsumerRecord record : records) - { - consumed++; - log.info( - "{} - {}: {}/{} - {}={}", - id, - record.offset(), - record.topic(), - record.partition(), - record.key(), - record.value() - ); - } + log.info( + "{} - Seen {} messages for partition={}|key={}", + id, + removed.get(key), + partition, + key); } - } - catch(WakeupException e) - { - log.info("{} - RIIING!", id); - } - catch(Exception e) - { - log.error("{} - Unexpected error: {}", id, e.toString()); - running.set(false); // Mark the instance as not running - } - finally + }); + } + + @Override + public void onPartitionsAssigned( + org.apache.kafka.clients.consumer.Consumer consumer, + Collection partitions) + { + partitions.forEach(tp -> { - log.info("{} - Closing the KafkaConsumer", id); - consumer.close(); - log.info("{} - Consumer-Thread exiting", id); - } + Integer partition = tp.partition(); + Long offset = consumer.position(tp); + log.info("{} - adding partition: {}, offset={}", id, partition, offset); + offsets.put(partition, offset); + seen.put(partition, new HashMap<>()); + }); + } + + + @KafkaListener( + id = "${spring.kafka.consumer.client-id}", + idIsGroup = false, + topics = "${consumer.topic}", + autoStartup = "false") + public void receive(ConsumerRecord record) + { + log.info( + "{} - {}: {}/{} - {}={}", + id, + record.offset(), + record.topic(), + record.partition(), + record.key(), + record.value() + ); + + handler.accept(record); + + consumed++; } public synchronized void start() { - boolean stateChanged = running.compareAndSet(false, true); - if (!stateChanged) - throw new RuntimeException("Consumer instance " + id + " is already running!"); + if (registry.getListenerContainer(id).isChildRunning()) + throw new IllegalStateException("Consumer instance " + id + " is already running!"); log.info("{} - Starting - consumed {} messages before", id, consumed); - future = executor.submit(this); + registry.getListenerContainer(id).start(); } - public synchronized void stop() throws ExecutionException, InterruptedException + public synchronized void stop() { - boolean stateChanged = running.compareAndSet(true, false); - if (!stateChanged) - throw new RuntimeException("Consumer instance " + id + " is not running!"); + if (!registry.getListenerContainer(id).isChildRunning()) + throw new IllegalStateException("Consumer instance " + id + " is not running!"); log.info("{} - Stopping", id); - consumer.wakeup(); - future.get(); + registry.getListenerContainer(id).stop(); log.info("{} - Stopped - consumed {} messages so far", id, consumed); } - @PreDestroy - public void destroy() throws ExecutionException, InterruptedException + public synchronized boolean isRunning() { - log.info("{} - Destroy!", id); - try - { - stop(); - } - catch (IllegalStateException e) - { - log.info("{} - Was already stopped", id); - } - finally - { - log.info("{}: Consumed {} messages in total, exiting!", id, consumed); - } + return registry.getListenerContainer(id).isChildRunning(); } }