Vorlage grundlagen/simple-consumer--vorlage grundlagen/simple-consumer--vorlage--2025-03-18--19-42 grundlagen/simple-consumer--vorlage--2025-03-signal grundlagen/simple-consumer--vorlage--2025-04-signal
authorKai Moritz <kai@juplo.de>
Tue, 29 Oct 2024 15:14:31 +0000 (16:14 +0100)
committerKai Moritz <kai@juplo.de>
Fri, 14 Mar 2025 15:51:25 +0000 (16:51 +0100)
Dockerfile [deleted file]
README.sh [deleted file]
docker/docker-compose.yml
src/main/java/de/juplo/kafka/ExampleConsumer.java

diff --git a/Dockerfile b/Dockerfile
deleted file mode 100644 (file)
index 22819af..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-FROM eclipse-temurin:21-jre
-VOLUME /tmp
-COPY target/*.jar /opt/app.jar
-COPY target/libs /opt/libs
-ENTRYPOINT [ "java", "-jar", "/opt/app.jar" ]
-CMD [ "kafka:9092", "test", "my-group", "DCKR" ]
diff --git a/README.sh b/README.sh
deleted file mode 100755 (executable)
index 85b8f96..0000000
--- a/README.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/bash
-
-IMAGE=juplo/simple-consumer:1.0-SNAPSHOT
-
-if [ "$1" = "cleanup" ]
-then
-  docker compose -f docker/docker-compose.yml down -t0 -v --remove-orphans
-  mvn clean
-  exit
-fi
-
-docker compose -f docker/docker-compose.yml up -d --remove-orphans kafka-1 kafka-2 kafka-3
-docker compose -f docker/docker-compose.yml rm -svf consumer
-
-if [[
-  $(docker image ls -q $IMAGE) == "" ||
-  "$1" = "build"
-]]
-then
-  mvn clean install || exit
-else
-  echo "Using image existing images:"
-  docker image ls $IMAGE
-fi
-
-docker compose -f docker/docker-compose.yml up --remove-orphans setup || exit 1
-
-
-docker compose -f docker/docker-compose.yml up -d producer
-docker compose -f docker/docker-compose.yml up -d consumer
-
-sleep 5
-docker compose -f docker/docker-compose.yml stop consumer
-
-docker compose -f docker/docker-compose.yml start consumer
-sleep 5
-
-docker compose -f docker/docker-compose.yml stop producer consumer
-docker compose -f docker/docker-compose.yml logs consumer
index 9553900..5b19de7 100644 (file)
@@ -139,10 +139,6 @@ services:
     image: juplo/simple-producer:1.0-SNAPSHOT
     command: kafka:9092 test producer
 
-  consumer:
-    image: juplo/simple-consumer:1.0-SNAPSHOT
-    command: kafka:9092 test my-group consumer
-
 volumes:
   zookeeper-data:
   zookeeper-log:
index 891af89..0452522 100644 (file)
 package de.juplo.kafka;
 
 import lombok.extern.slf4j.Slf4j;
-import org.apache.kafka.clients.consumer.ConsumerRecord;
-import org.apache.kafka.clients.consumer.ConsumerRecords;
-import org.apache.kafka.clients.consumer.Consumer;
-import org.apache.kafka.clients.consumer.KafkaConsumer;
-import org.apache.kafka.common.errors.WakeupException;
-import org.apache.kafka.common.serialization.StringDeserializer;
-
-import java.time.Duration;
-import java.util.Arrays;
-import java.util.Properties;
 
 
 @Slf4j
 public class ExampleConsumer
 {
-  private final String id;
-  private final String topic;
-  private final Consumer<String, String> consumer;
-
-  private volatile boolean running = false;
-  private long consumed = 0;
-
-  public ExampleConsumer(
-    String broker,
-    String topic,
-    String groupId,
-    String clientId)
-  {
-    Properties props = new Properties();
-    props.put("bootstrap.servers", broker);
-    props.put("group.id", groupId); // ID für die Offset-Commits
-    props.put("client.id", clientId); // Nur zur Wiedererkennung
-    props.put("key.deserializer", StringDeserializer.class.getName());
-    props.put("value.deserializer", StringDeserializer.class.getName());
-
-    this.id = clientId;
-    this.topic = topic;
-    consumer = new KafkaConsumer<>(props);
-  }
-
-
-  public void run()
-  {
-    try
-    {
-      log.info("{} - Subscribing to topic {}", id, topic);
-      consumer.subscribe(Arrays.asList(topic));
-      running = true;
-
-      while (true)
-      {
-        ConsumerRecords<String, String> records = consumer.poll(Duration.ofSeconds(1));
-
-        log.info("{} - Received {} messages", id, records.count());
-        for (ConsumerRecord<String, String> record : records)
-        {
-          handleRecord(
-            record.topic(),
-            record.partition(),
-            record.offset(),
-            record.key(),
-            record.value());
-        }
-      }
-    }
-    catch(WakeupException e)
-    {
-      log.info("{} - Consumer was signaled to finish its work", id);
-    }
-    catch(Exception e)
-    {
-      log.error("{} - Unexpected error, unsubscribing!", id, e);
-      consumer.unsubscribe();
-    }
-    finally
-    {
-      running = false;
-      log.info("{} - Closing the KafkaConsumer", id);
-      consumer.close();
-      log.info("{}: Consumed {} messages in total, exiting!", id, consumed);
-    }
-  }
-
-  private void handleRecord(
-    String topic,
-    Integer partition,
-    Long offset,
-    String key,
-    String value)
-  {
-    consumed++;
-    log.info("{} - partition={}-{}, offset={}: {}={}", id, topic, partition, offset, key, value);
-  }
-
-
   public static void main(String[] args) throws Exception
   {
-    String broker = ":9092";
-    String topic = "test";
-    String groupId = "my-group";
-    String clientId = "DEV";
-
-    switch (args.length)
-    {
-      case 4:
-        clientId = args[3];
-      case 3:
-        groupId = args[2];
-      case 2:
-        topic = args[1];
-      case 1:
-        broker = args[0];
-    }
-
-
-    ExampleConsumer instance = new ExampleConsumer(broker, topic, groupId, clientId);
-
-    Runtime.getRuntime().addShutdownHook(new Thread(() ->
-    {
-      instance.consumer.wakeup();
-
-      while (instance.running)
-      {
-        log.info("Waiting for main-thread...");
-        try
-        {
-          Thread.sleep(1000);
-        }
-        catch (InterruptedException e) {}
-      }
-      log.info("Shutdown completed.");
-    }));
-
-    log.info(
-      "Running ExampleConsumer: broker={}, topic={}, group-id={}, client-id={}",
-      broker,
-      topic,
-      groupId,
-      clientId);
-    instance.run();
   }
 }