COPY target/*.jar /opt/app.jar
COPY target/libs /opt/libs
ENTRYPOINT [ "java", "-jar", "/opt/app.jar" ]
-CMD [ "kafka:9092", "test", "DCKR" ]
+CMD [ "kafka:9092", "test", "my-group", "DCKR" ]
#!/bin/bash
-IMAGE=juplo/simple-producer:1.0-SNAPSHOT
+IMAGE=juplo/simple-consumer:1.0-SNAPSHOT
if [ "$1" = "cleanup" ]
then
fi
docker compose -f docker/docker-compose.yml up -d --remove-orphans kafka-1 kafka-2 kafka-3
-docker compose -f docker/docker-compose.yml rm -svf producer
+docker compose -f docker/docker-compose.yml rm -svf consumer
if [[
$(docker image ls -q $IMAGE) == "" ||
docker compose -f docker/docker-compose.yml up -d producer
+docker compose -f docker/docker-compose.yml up -d consumer
+
sleep 5
+docker compose -f docker/docker-compose.yml stop consumer
-docker compose -f docker/docker-compose.yml exec cli kafkacat -b kafka:9092 -t test -c 20 -f'topic=%t\tpartition=%p\toffset=%o\tkey=%k\tvalue=%s\n'
+docker compose -f docker/docker-compose.yml start consumer
+sleep 5
-docker compose -f docker/docker-compose.yml stop producer
-docker compose -f docker/docker-compose.yml exec cli kafkacat -b kafka:9092 -t test -e -f'topic=%t\tpartition=%p\toffset=%o\tkey=%k\tvalue=%s\n'
-docker compose -f docker/docker-compose.yml logs producer
+docker compose -f docker/docker-compose.yml stop producer consumer
+docker compose -f docker/docker-compose.yml logs consumer
image: juplo/simple-producer:1.0-SNAPSHOT
command: kafka:9092 test producer
+ consumer:
+ image: juplo/simple-consumer:1.0-SNAPSHOT
+ command: kafka:9092 test my-group consumer
+
volumes:
zookeeper-data:
zookeeper-log:
</parent>
<groupId>de.juplo.kafka</groupId>
- <artifactId>simple-producer</artifactId>
- <name>Super Simple Producer</name>
- <description>A Simple Producer, programmed with pure Java, that sends messages via Kafka</description>
+ <artifactId>simple-consumer</artifactId>
+ <name>Simple Consumer-Group</name>
+ <description>Super Simple Consumer-Group, that is implemented as a plain Java-program</description>
<version>1.0-SNAPSHOT</version>
<properties>
<manifest>
<addClasspath>true</addClasspath>
<classpathPrefix>libs/</classpathPrefix>
- <mainClass>de.juplo.kafka.ExampleProducer</mainClass>
+ <mainClass>de.juplo.kafka.ExampleConsumer</mainClass>
</manifest>
</archive>
</configuration>
--- /dev/null
+package de.juplo.kafka;
+
+import lombok.extern.slf4j.Slf4j;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.clients.consumer.ConsumerRecords;
+import org.apache.kafka.clients.consumer.Consumer;
+import org.apache.kafka.clients.consumer.KafkaConsumer;
+import org.apache.kafka.common.errors.WakeupException;
+import org.apache.kafka.common.serialization.StringDeserializer;
+
+import java.time.Duration;
+import java.util.Arrays;
+import java.util.Properties;
+
+
+@Slf4j
+public class ExampleConsumer
+{
+ private final String id;
+ private final String topic;
+ private final Consumer<String, String> consumer;
+
+ private volatile boolean running = false;
+ private long consumed = 0;
+
+ public ExampleConsumer(String broker, String topic, String groupId, String clientId)
+ {
+ Properties props = new Properties();
+ props.put("bootstrap.servers", broker);
+ props.put("group.id", groupId); // ID für die Offset-Commits
+ props.put("client.id", clientId); // Nur zur Wiedererkennung
+ props.put("auto.offset.reset", "earliest"); // Von Beginn an lesen
+ props.put("partition.assignment.strategy", "org.apache.kafka.clients.consumer.CooperativeStickyAssignor");
+ props.put("key.deserializer", StringDeserializer.class.getName());
+ props.put("value.deserializer", StringDeserializer.class.getName());
+
+ this.id = clientId;
+ this.topic = topic;
+ consumer = new KafkaConsumer<>(props);
+ }
+
+
+ public void run()
+ {
+ try
+ {
+ log.info("{} - Subscribing to topic {}", id, topic);
+ consumer.subscribe(Arrays.asList(topic));
+ running = true;
+
+ while (true)
+ {
+ ConsumerRecords<String, String> records =
+ consumer.poll(Duration.ofSeconds(1));
+
+ log.info("{} - Received {} messages", id, records.count());
+ for (ConsumerRecord<String, String> record : records)
+ {
+ consumed++;
+ log.info(
+ "{} - {}: {}/{} - {}={}",
+ id,
+ record.offset(),
+ record.topic(),
+ record.partition(),
+ record.key(),
+ record.value()
+ );
+ }
+ }
+ }
+ catch(WakeupException e)
+ {
+ log.info("{} - Consumer was signaled to finish its work", id);
+ }
+ catch(Exception e)
+ {
+ log.error("{} - Unexpected error: {}, unsubscribing!", id, e.toString());
+ consumer.unsubscribe();
+ }
+ finally
+ {
+ running = false;
+ log.info("{} - Closing the KafkaConsumer", id);
+ consumer.close();
+ log.info("{}: Consumed {} messages in total, exiting!", id, consumed);
+ }
+ }
+
+
+ public static void main(String[] args) throws Exception
+ {
+ String broker = ":9092";
+ String topic = "test";
+ String groupId = "my-group";
+ String clientId = "DEV";
+
+ switch (args.length)
+ {
+ case 4:
+ clientId = args[3];
+ case 3:
+ groupId = args[2];
+ case 2:
+ topic = args[1];
+ case 1:
+ broker = args[0];
+ }
+
+
+ ExampleConsumer instance = new ExampleConsumer(broker, topic, groupId, clientId);
+
+ Runtime.getRuntime().addShutdownHook(new Thread(() ->
+ {
+ instance.consumer.wakeup();
+
+ while (instance.running)
+ {
+ log.info("Waiting for main-thread...");
+ try
+ {
+ Thread.sleep(1000);
+ }
+ catch (InterruptedException e) {}
+ }
+ log.info("Shutdown completed.");
+ }));
+
+ log.info(
+ "Running SimpleConsumer: broker={}, topic={}, group-id={}, client-id={}",
+ broker,
+ topic,
+ groupId,
+ clientId);
+ instance.run();
+ }
+}
+++ /dev/null
-package de.juplo.kafka;
-
-import lombok.extern.slf4j.Slf4j;
-import org.apache.kafka.clients.producer.Producer;
-import org.apache.kafka.clients.producer.KafkaProducer;
-import org.apache.kafka.clients.producer.ProducerRecord;
-import org.apache.kafka.common.serialization.StringSerializer;
-
-import java.util.Properties;
-
-
-@Slf4j
-public class ExampleProducer
-{
- private final String id;
- private final String topic;
- private final Producer<String, String> producer;
-
- private volatile boolean running = true;
- private volatile boolean done = false;
- private long produced = 0;
-
- public ExampleProducer(String broker, String topic, String clientId)
- {
- Properties props = new Properties();
- props.put("bootstrap.servers", broker);
- props.put("client.id", clientId); // Nur zur Wiedererkennung
- props.put("key.serializer", StringSerializer.class.getName());
- props.put("value.serializer", StringSerializer.class.getName());
- props.put("metadata.maxage.ms", 5000);
-
- this.id = clientId;
- this.topic = topic;
- producer = new KafkaProducer<>(props);
- }
-
- public void run()
- {
- long i = 0;
-
- try
- {
- for (; running; i++)
- {
- send(Long.toString(i%10), Long.toString(i));
- Thread.sleep(500);
- }
- }
- catch (Exception e)
- {
- log.error("{} - Unexpected error: {}!", id, e.toString());
- }
- finally
- {
- log.info("{}: Closing the KafkaProducer", id);
- producer.close();
- log.info("{}: Produced {} messages in total, exiting!", id, produced);
- done = true;
- }
- }
-
- void send(String key, String value)
- {
- final long time = System.currentTimeMillis();
-
- final ProducerRecord<String, String> record = new ProducerRecord<>(
- topic, // Topic
- key, // Key
- value // Value
- );
-
- producer.send(record, (metadata, e) ->
- {
- long now = System.currentTimeMillis();
- if (e == null)
- {
- // HANDLE SUCCESS
- produced++;
- log.debug(
- "{} - Sent key={} message={} partition={}/{} timestamp={} latency={}ms",
- id,
- record.key(),
- record.value(),
- metadata.partition(),
- metadata.offset(),
- metadata.timestamp(),
- now - time
- );
- }
- else
- {
- // HANDLE ERROR
- log.error(
- "{} - ERROR key={} timestamp={} latency={}ms: {}",
- id,
- record.key(),
- metadata == null ? -1 : metadata.timestamp(),
- now - time,
- e.toString()
- );
- }
- });
-
- long now = System.currentTimeMillis();
- log.trace(
- "{} - Queued message with key={} latency={}ms",
- id,
- record.key(),
- now - time
- );
- }
-
-
- public static void main(String[] args) throws Exception
- {
- String broker = ":9092";
- String topic = "test";
- String clientId = "DEV";
-
- switch (args.length)
- {
- case 3:
- clientId = args[2];
- case 2:
- topic = args[1];
- case 1:
- broker = args[0];
- }
-
- ExampleProducer instance = new ExampleProducer(broker, topic, clientId);
-
- Runtime.getRuntime().addShutdownHook(new Thread(() ->
- {
- instance.running = false;
- while (!instance.done)
- {
- log.info("Waiting for main-thread...");
- try
- {
- Thread.sleep(1000);
- }
- catch (InterruptedException e) {}
- }
- log.info("Shutdown completed.");
- }));
-
- log.info(
- "Running ExampleProducer: broker={}, topic={}, client-id={}",
- broker,
- topic,
- clientId);
- instance.run();
- }
-}