Version des `spring-consumer`, die mit `assign()` arbeitet consumer/spring-consumer--assign consumer/spring-consumer--assign--2025-03-18--19-42 consumer/spring-consumer--assign--2025-03-signal consumer/spring-consumer--assign--2025-04-signal
authorKai Moritz <kai@juplo.de>
Sat, 9 Nov 2024 17:36:27 +0000 (18:36 +0100)
committerKai Moritz <kai@juplo.de>
Sat, 15 Mar 2025 18:25:38 +0000 (19:25 +0100)
* Außerdem den Service `spickzettel` zum Auslesen des Offset-Topics ergänzt.

README.sh
build.gradle
docker/docker-compose.yml
pom.xml
src/main/java/de/juplo/kafka/ApplicationConfiguration.java
src/main/java/de/juplo/kafka/ApplicationProperties.java
src/main/java/de/juplo/kafka/ExampleConsumer.java
src/main/resources/application.yml
src/test/java/de/juplo/kafka/ApplicationTests.java

index b46e235..c504e10 100755 (executable)
--- a/README.sh
+++ b/README.sh
@@ -1,6 +1,6 @@
 #!/bin/bash
 
-IMAGE=juplo/spring-consumer:1.1-SNAPSHOT
+IMAGE=juplo/spring-consumer:1.1-assign-SNAPSHOT
 
 if [ "$1" = "cleanup" ]
 then
index a8614fd..68a37df 100644 (file)
@@ -8,7 +8,7 @@ plugins {
 }
 
 group = 'de.juplo.kafka'
-version = '1.1-SNAPSHOT'
+version = '1.1-assign-SNAPSHOT'
 
 java {
        toolchain {
index 4fa2ead..26ef4b3 100644 (file)
@@ -113,6 +113,7 @@ services:
     stop_grace_period: 0s
     depends_on:
       - cli
+      - spickzettel
 
   akhq:
     image: tchiotludo/akhq:0.23.0
@@ -145,11 +146,21 @@ services:
       juplo.producer.throttle-ms: 100
 
   consumer:
-    image: juplo/spring-consumer:1.1-SNAPSHOT
+    image: juplo/spring-consumer:1.1-assign-SNAPSHOT
     environment:
       juplo.bootstrap-server: kafka:9092
       juplo.client-id: consumer
-      juplo.consumer.topic: test
+      juplo.consumer.partitions: test:0,test:1
+
+  spickzettel:
+    image: juplo/toolbox
+    command: >
+      bash -c '
+        kafka-console-consumer \
+          --bootstrap-server kafka:9092 \
+          --topic __consumer_offsets --from-beginning \
+          --formatter "kafka.coordinator.group.GroupMetadataManager\$$OffsetsMessageFormatter"
+      '
 
   peter:
     image: juplo/spring-consumer:1.1-SNAPSHOT
diff --git a/pom.xml b/pom.xml
index dd96d00..8898423 100644 (file)
--- a/pom.xml
+++ b/pom.xml
@@ -15,7 +15,7 @@
   <artifactId>spring-consumer</artifactId>
   <name>Spring Consumer</name>
   <description>Super Simple Consumer-Group, that is implemented as Spring-Boot application and configured by Spring Kafka</description>
-  <version>1.1-SNAPSHOT</version>
+  <version>1.1-assign-SNAPSHOT</version>
 
   <properties>
     <java.version>21</java.version>
index d2b8e05..860cd82 100644 (file)
@@ -3,12 +3,14 @@ package de.juplo.kafka;
 import org.apache.kafka.clients.consumer.Consumer;
 import org.apache.kafka.clients.consumer.KafkaConsumer;
 import org.apache.kafka.clients.consumer.StickyAssignor;
+import org.apache.kafka.common.TopicPartition;
 import org.apache.kafka.common.serialization.StringDeserializer;
 import org.springframework.boot.context.properties.EnableConfigurationProperties;
 import org.springframework.context.ConfigurableApplicationContext;
 import org.springframework.context.annotation.Bean;
 import org.springframework.context.annotation.Configuration;
 
+import java.util.Arrays;
 import java.util.Properties;
 
 
@@ -25,7 +27,14 @@ public class ApplicationConfiguration
     return
       new ExampleConsumer<>(
         properties.getClientId(),
-        properties.getConsumerProperties().getTopic(),
+        Arrays
+          .stream(properties.getConsumerProperties().getPartitions())
+          .map(partition ->
+          {
+            String[] parts = partition.split(":");
+            return new TopicPartition(parts[0], Integer.parseInt(parts[1]));
+          })
+          .toList(),
         kafkaConsumer,
         () -> applicationContext.close());
   }
index c8193c9..9e6f5e3 100644 (file)
@@ -43,7 +43,7 @@ public class ApplicationProperties
     private String groupId;
     @NotNull
     @NotEmpty
-    private String topic;
+    private String[] partitions;
     private OffsetReset autoOffsetReset;
     private Duration autoCommitInterval;
 
index 101abd1..8245ccf 100644 (file)
@@ -4,17 +4,19 @@ import lombok.extern.slf4j.Slf4j;
 import org.apache.kafka.clients.consumer.Consumer;
 import org.apache.kafka.clients.consumer.ConsumerRecord;
 import org.apache.kafka.clients.consumer.ConsumerRecords;
+import org.apache.kafka.common.TopicPartition;
 import org.apache.kafka.common.errors.WakeupException;
 
 import java.time.Duration;
-import java.util.Arrays;
+import java.util.List;
+import java.util.stream.Collectors;
 
 
 @Slf4j
 public class ExampleConsumer<K, V> implements Runnable
 {
   private final String id;
-  private final String topic;
+  private final List<TopicPartition> partitions;
   private final Consumer<K, V> consumer;
   private final Thread workerThread;
   private final Runnable closeCallback;
@@ -24,12 +26,12 @@ public class ExampleConsumer<K, V> implements Runnable
 
   public ExampleConsumer(
     String clientId,
-    String topic,
+    List<TopicPartition> partitions,
     Consumer<K, V> consumer,
     Runnable closeCallback)
   {
     this.id = clientId;
-    this.topic = topic;
+    this.partitions = partitions;
     this.consumer = consumer;
 
     workerThread = new Thread(this, "ExampleConsumer Worker-Thread");
@@ -44,8 +46,14 @@ public class ExampleConsumer<K, V> implements Runnable
   {
     try
     {
-      log.info("{} - Subscribing to topic {}", id, topic);
-      consumer.subscribe(Arrays.asList(topic));
+      log.info(
+        "{} - Assigning to partitions: {}",
+        id,
+        partitions
+          .stream()
+          .map(TopicPartition::toString)
+          .collect(Collectors.joining(", ")));
+      consumer.assign(partitions);
 
       while (true)
       {
index 7a06731..c5f21ec 100644 (file)
@@ -3,7 +3,7 @@ juplo:
   client-id: DEV
   consumer:
     group-id: my-group
-    topic: test
+    partitions: test:0,test:1
     auto-offset-reset: earliest
     auto-commit-interval: 5s
 management:
@@ -25,7 +25,7 @@ info:
     client-id: ${juplo.client-id}
     consumer:
       group-id: ${juplo.consumer.group-id}
-      topic: ${juplo.consumer.topic}
+      partitions: ${juplo.consumer.partitions}
       auto-offset-reset: ${juplo.consumer.auto-offset-reset}
       auto-commit-interval: ${juplo.consumer.auto-commit-interval}
 logging:
index ae119bf..959dc4e 100644 (file)
@@ -20,7 +20,7 @@ import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.
 @SpringBootTest(
   properties = {
     "juplo.bootstrap-server=${spring.embedded.kafka.brokers}",
-    "juplo.consumer.topic=" + TOPIC })
+    "juplo.consumer.partitions=" + TOPIC + ":0" })
 @AutoConfigureMockMvc
 @EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
 public class ApplicationTests