From 0b5724a5f250e739db9f019e1d141cc6fa4f6246 Mon Sep 17 00:00:00 2001 From: Kai Moritz Date: Thu, 26 Sep 2024 15:21:01 +0200 Subject: [PATCH] `simple-producer` in `simple-consumer` verwandelt --- Dockerfile | 2 +- README.sh | 15 +- docker/docker-compose.yml | 4 + pom.xml | 8 +- .../java/de/juplo/kafka/ExampleConsumer.java | 137 ++++++++++++++++ .../java/de/juplo/kafka/ExampleProducer.java | 154 ------------------ 6 files changed, 155 insertions(+), 165 deletions(-) create mode 100644 src/main/java/de/juplo/kafka/ExampleConsumer.java delete mode 100644 src/main/java/de/juplo/kafka/ExampleProducer.java diff --git a/Dockerfile b/Dockerfile index 74e66ed..22819af 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,4 +3,4 @@ VOLUME /tmp COPY target/*.jar /opt/app.jar COPY target/libs /opt/libs ENTRYPOINT [ "java", "-jar", "/opt/app.jar" ] -CMD [ "kafka:9092", "test", "DCKR" ] +CMD [ "kafka:9092", "test", "my-group", "DCKR" ] diff --git a/README.sh b/README.sh index 3d98ace..85b8f96 100755 --- a/README.sh +++ b/README.sh @@ -1,6 +1,6 @@ #!/bin/bash -IMAGE=juplo/simple-producer:1.0-SNAPSHOT +IMAGE=juplo/simple-consumer:1.0-SNAPSHOT if [ "$1" = "cleanup" ] then @@ -10,7 +10,7 @@ then fi docker compose -f docker/docker-compose.yml up -d --remove-orphans kafka-1 kafka-2 kafka-3 -docker compose -f docker/docker-compose.yml rm -svf producer +docker compose -f docker/docker-compose.yml rm -svf consumer if [[ $(docker image ls -q $IMAGE) == "" || @@ -27,10 +27,13 @@ docker compose -f docker/docker-compose.yml up --remove-orphans setup || exit 1 docker compose -f docker/docker-compose.yml up -d producer +docker compose -f docker/docker-compose.yml up -d consumer + sleep 5 +docker compose -f docker/docker-compose.yml stop consumer -docker compose -f docker/docker-compose.yml exec cli kafkacat -b kafka:9092 -t test -c 20 -f'topic=%t\tpartition=%p\toffset=%o\tkey=%k\tvalue=%s\n' +docker compose -f docker/docker-compose.yml start consumer +sleep 5 -docker compose -f docker/docker-compose.yml stop producer -docker compose -f docker/docker-compose.yml exec cli kafkacat -b kafka:9092 -t test -e -f'topic=%t\tpartition=%p\toffset=%o\tkey=%k\tvalue=%s\n' -docker compose -f docker/docker-compose.yml logs producer +docker compose -f docker/docker-compose.yml stop producer consumer +docker compose -f docker/docker-compose.yml logs consumer diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index cd00617..de76c79 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -190,6 +190,10 @@ services: image: juplo/simple-producer:1.0-SNAPSHOT command: kafka:9092 test producer + consumer: + image: juplo/simple-consumer:1.0-SNAPSHOT + command: kafka:9092 test my-group consumer + volumes: zookeeper-data: zookeeper-log: diff --git a/pom.xml b/pom.xml index ad7f17a..2d81d24 100644 --- a/pom.xml +++ b/pom.xml @@ -12,9 +12,9 @@ de.juplo.kafka - simple-producer - Super Simple Producer - A Simple Producer, programmed with pure Java, that sends messages via Kafka + simple-consumer + Simple Consumer-Group + Super Simple Consumer-Group, that is implemented as a plain Java-program 1.0-SNAPSHOT @@ -62,7 +62,7 @@ true libs/ - de.juplo.kafka.ExampleProducer + de.juplo.kafka.ExampleConsumer diff --git a/src/main/java/de/juplo/kafka/ExampleConsumer.java b/src/main/java/de/juplo/kafka/ExampleConsumer.java new file mode 100644 index 0000000..1715813 --- /dev/null +++ b/src/main/java/de/juplo/kafka/ExampleConsumer.java @@ -0,0 +1,137 @@ +package de.juplo.kafka; + +import lombok.extern.slf4j.Slf4j; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.apache.kafka.clients.consumer.Consumer; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.common.errors.WakeupException; +import org.apache.kafka.common.serialization.StringDeserializer; + +import java.time.Duration; +import java.util.Arrays; +import java.util.Properties; + + +@Slf4j +public class ExampleConsumer +{ + private final String id; + private final String topic; + private final Consumer consumer; + + private volatile boolean running = false; + private long consumed = 0; + + public ExampleConsumer(String broker, String topic, String groupId, String clientId) + { + Properties props = new Properties(); + props.put("bootstrap.servers", broker); + props.put("group.id", groupId); // ID für die Offset-Commits + props.put("client.id", clientId); // Nur zur Wiedererkennung + props.put("auto.offset.reset", "earliest"); // Von Beginn an lesen + props.put("partition.assignment.strategy", "org.apache.kafka.clients.consumer.CooperativeStickyAssignor"); + props.put("key.deserializer", StringDeserializer.class.getName()); + props.put("value.deserializer", StringDeserializer.class.getName()); + + this.id = clientId; + this.topic = topic; + consumer = new KafkaConsumer<>(props); + } + + + public void run() + { + try + { + log.info("{} - Subscribing to topic {}", id, topic); + consumer.subscribe(Arrays.asList(topic)); + running = true; + + while (true) + { + ConsumerRecords records = + consumer.poll(Duration.ofSeconds(1)); + + log.info("{} - Received {} messages", id, records.count()); + for (ConsumerRecord record : records) + { + consumed++; + log.info( + "{} - {}: {}/{} - {}={}", + id, + record.offset(), + record.topic(), + record.partition(), + record.key(), + record.value() + ); + } + } + } + catch(WakeupException e) + { + log.info("{} - Consumer was signaled to finish its work", id); + } + catch(Exception e) + { + log.error("{} - Unexpected error: {}, unsubscribing!", id, e.toString()); + consumer.unsubscribe(); + } + finally + { + running = false; + log.info("{} - Closing the KafkaConsumer", id); + consumer.close(); + log.info("{}: Consumed {} messages in total, exiting!", id, consumed); + } + } + + + public static void main(String[] args) throws Exception + { + String broker = ":9092"; + String topic = "test"; + String groupId = "my-group"; + String clientId = "DEV"; + + switch (args.length) + { + case 4: + clientId = args[3]; + case 3: + groupId = args[2]; + case 2: + topic = args[1]; + case 1: + broker = args[0]; + } + + + ExampleConsumer instance = new ExampleConsumer(broker, topic, groupId, clientId); + + Runtime.getRuntime().addShutdownHook(new Thread(() -> + { + instance.consumer.wakeup(); + + while (instance.running) + { + log.info("Waiting for main-thread..."); + try + { + Thread.sleep(1000); + } + catch (InterruptedException e) {} + } + log.info("Shutdown completed."); + })); + + log.info( + "Running SimpleConsumer: broker={}, topic={}, group-id={}, client-id={}", + broker, + topic, + groupId, + clientId); + instance.run(); + } +} diff --git a/src/main/java/de/juplo/kafka/ExampleProducer.java b/src/main/java/de/juplo/kafka/ExampleProducer.java deleted file mode 100644 index a629c64..0000000 --- a/src/main/java/de/juplo/kafka/ExampleProducer.java +++ /dev/null @@ -1,154 +0,0 @@ -package de.juplo.kafka; - -import lombok.extern.slf4j.Slf4j; -import org.apache.kafka.clients.producer.Producer; -import org.apache.kafka.clients.producer.KafkaProducer; -import org.apache.kafka.clients.producer.ProducerRecord; -import org.apache.kafka.common.serialization.StringSerializer; - -import java.util.Properties; - - -@Slf4j -public class ExampleProducer -{ - private final String id; - private final String topic; - private final Producer producer; - - private volatile boolean running = true; - private volatile boolean done = false; - private long produced = 0; - - public ExampleProducer(String broker, String topic, String clientId) - { - Properties props = new Properties(); - props.put("bootstrap.servers", broker); - props.put("client.id", clientId); // Nur zur Wiedererkennung - props.put("key.serializer", StringSerializer.class.getName()); - props.put("value.serializer", StringSerializer.class.getName()); - props.put("metadata.maxage.ms", 5000); - - this.id = clientId; - this.topic = topic; - producer = new KafkaProducer<>(props); - } - - public void run() - { - long i = 0; - - try - { - for (; running; i++) - { - send(Long.toString(i%10), Long.toString(i)); - Thread.sleep(500); - } - } - catch (Exception e) - { - log.error("{} - Unexpected error: {}!", id, e.toString()); - } - finally - { - log.info("{}: Closing the KafkaProducer", id); - producer.close(); - log.info("{}: Produced {} messages in total, exiting!", id, produced); - done = true; - } - } - - void send(String key, String value) - { - final long time = System.currentTimeMillis(); - - final ProducerRecord record = new ProducerRecord<>( - topic, // Topic - key, // Key - value // Value - ); - - producer.send(record, (metadata, e) -> - { - long now = System.currentTimeMillis(); - if (e == null) - { - // HANDLE SUCCESS - produced++; - log.debug( - "{} - Sent key={} message={} partition={}/{} timestamp={} latency={}ms", - id, - record.key(), - record.value(), - metadata.partition(), - metadata.offset(), - metadata.timestamp(), - now - time - ); - } - else - { - // HANDLE ERROR - log.error( - "{} - ERROR key={} timestamp={} latency={}ms: {}", - id, - record.key(), - metadata == null ? -1 : metadata.timestamp(), - now - time, - e.toString() - ); - } - }); - - long now = System.currentTimeMillis(); - log.trace( - "{} - Queued message with key={} latency={}ms", - id, - record.key(), - now - time - ); - } - - - public static void main(String[] args) throws Exception - { - String broker = ":9092"; - String topic = "test"; - String clientId = "DEV"; - - switch (args.length) - { - case 3: - clientId = args[2]; - case 2: - topic = args[1]; - case 1: - broker = args[0]; - } - - ExampleProducer instance = new ExampleProducer(broker, topic, clientId); - - Runtime.getRuntime().addShutdownHook(new Thread(() -> - { - instance.running = false; - while (!instance.done) - { - log.info("Waiting for main-thread..."); - try - { - Thread.sleep(1000); - } - catch (InterruptedException e) {} - } - log.info("Shutdown completed."); - })); - - log.info( - "Running ExampleProducer: broker={}, topic={}, client-id={}", - broker, - topic, - clientId); - instance.run(); - } -} -- 2.20.1