Merge der überarbeiteten Compose-Konfiguration ('endless-stream-consumer')
authorKai Moritz <kai@juplo.de>
Sat, 23 Jul 2022 11:49:23 +0000 (13:49 +0200)
committerKai Moritz <kai@juplo.de>
Sat, 23 Jul 2022 11:49:23 +0000 (13:49 +0200)
* Die letzten Änderungen an 'endless-stream-consumer' sind länger nicht
  mehr gemerged worden.

README.sh
docker-compose.yml
pom.xml
src/main/java/de/juplo/kafka/DriverController.java
src/main/java/de/juplo/kafka/EndlessConsumer.java

index 900270a..c14f45b 100755 (executable)
--- a/README.sh
+++ b/README.sh
@@ -24,8 +24,64 @@ fi
 
 echo "Waiting for the Kafka-Cluster to become ready..."
 docker-compose exec cli cub kafka-ready -b kafka:9092 1 60 > /dev/null 2>&1 || exit 1
-docker-compose up setup
-docker-compose up -d producer consumer
-sleep 15
+docker-compose up -d kafka-ui
+
+docker-compose exec -T cli bash << 'EOF'
+echo "Creating topic with 3 partitions..."
+kafka-topics --bootstrap-server kafka:9092 --delete --if-exists --topic test
+# tag::createtopic[]
+kafka-topics --bootstrap-server kafka:9092 --create --topic test --partitions 3
+# end::createtopic[]
+kafka-topics --bootstrap-server kafka:9092 --describe --topic test
+EOF
+
+docker-compose up -d consumer
+
+docker-compose up -d producer
+sleep 10
+http -v :8081/seen
+sleep 1
+http -v :8081/seen
+sleep 1
+http -v :8081/seen
+sleep 1
+http -v :8081/seen
+
+docker-compose exec -T cli bash << 'EOF'
+echo "Altering number of partitions from 3 to 7..."
+# tag::altertopic[]
+kafka-topics --bootstrap-server kafka:9092 --alter --topic test --partitions 7
+kafka-topics --bootstrap-server kafka:9092 --describe --topic test
+# end::altertopic[]
+EOF
+
+docker-compose restart producer
+sleep 1
+http -v :8081/seen
+sleep 1
+http -v :8081/seen
+sleep 1
+http -v :8081/seen
+sleep 1
+http -v :8081/seen
+sleep 1
+http -v :8081/seen
+sleep 1
+http -v :8081/seen
+sleep 1
+http -v :8081/seen
+sleep 1
+http -v :8081/seen
+sleep 1
+http -v :8081/seen
+sleep 1
+http -v :8081/seen
+sleep 1
+http -v :8081/seen
+sleep 1
+http -v :8081/seen
+sleep 1
+http -v :8081/seen
+sleep 1
+http -v :8081/seen
 docker-compose stop producer consumer
-docker-compose logs consumer
index 159f9cb..f9eeedd 100644 (file)
@@ -24,13 +24,13 @@ services:
     depends_on:
       - zookeeper
 
-  setup:
-    image: juplo/toolbox
-    command: >
-      bash -c "
-        kafka-topics --bootstrap-server kafka:9092 --delete --if-exists --topic test
-        kafka-topics --bootstrap-server kafka:9092 --create --topic test --partitions 2
-      "
+  kafka-ui:
+    image: provectuslabs/kafka-ui:0.3.3
+    ports:
+      - 8080:8080
+    environment:
+      KAFKA_CLUSTERS_0_NAME: local
+      KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:9092
 
   cli:
     image: juplo/toolbox
@@ -39,13 +39,13 @@ services:
   producer:
     image: juplo/endless-producer:1.0-SNAPSHOT
     ports:
-      - 8080:8080
+      - 8000:8080
     environment:
       server.port: 8080
       producer.bootstrap-server: kafka:9092
       producer.client-id: producer
       producer.topic: test
-      producer.throttle-ms: 200
+      producer.throttle-ms: 10
 
 
   consumer:
diff --git a/pom.xml b/pom.xml
index 29c1851..9db9d9d 100644 (file)
--- a/pom.xml
+++ b/pom.xml
@@ -14,7 +14,7 @@
   <groupId>de.juplo.kafka</groupId>
   <artifactId>endless-consumer</artifactId>
   <version>1.0-SNAPSHOT</version>
-  <name>Endless Consumer: a Simple Consumer-Group that reads and print the topic</name>
+  <name>Endless Consumer: a Simple Consumer-Group that reads and prints the topic and counts the received messages for each key by topic</name>
 
   <dependencies>
     <dependency>
index a02fd2c..06e562c 100644 (file)
@@ -1,12 +1,14 @@
 package de.juplo.kafka;
 
 import lombok.RequiredArgsConstructor;
+import org.springframework.web.bind.annotation.GetMapping;
 import org.springframework.http.HttpStatus;
 import org.springframework.web.bind.annotation.ExceptionHandler;
 import org.springframework.web.bind.annotation.PostMapping;
 import org.springframework.web.bind.annotation.ResponseStatus;
 import org.springframework.web.bind.annotation.RestController;
 
+import java.util.Map;
 import java.util.concurrent.ExecutionException;
 
 
@@ -29,6 +31,14 @@ public class DriverController
     consumer.stop();
   }
 
+
+  @GetMapping("seen")
+  public Map<Integer, Map<String, Integer>> seen()
+  {
+    return consumer.getSeen();
+  }
+
+
   @ExceptionHandler
   @ResponseStatus(HttpStatus.BAD_REQUEST)
   public ErrorResponse illegalStateException(IllegalStateException e)
index adebff1..2310ccd 100644 (file)
@@ -10,6 +10,8 @@ import org.apache.kafka.common.serialization.StringDeserializer;
 import javax.annotation.PreDestroy;
 import java.time.Duration;
 import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
 import java.util.Optional;
 import java.util.Properties;
 import java.util.concurrent.ExecutionException;
@@ -37,6 +39,9 @@ public class EndlessConsumer implements Runnable
   private KafkaConsumer<String, String> consumer = null;
 
 
+  private Map<Integer, Map<String, Integer>> seen;
+
+
   public EndlessConsumer(
       ExecutorService executor,
       String bootstrapServer,
@@ -63,6 +68,7 @@ public class EndlessConsumer implements Runnable
       props.put("group.id", groupId);
       props.put("client.id", id);
       props.put("auto.offset.reset", autoOffsetReset);
+      props.put("metadata.max.age.ms", "1000");
       props.put("key.deserializer", StringDeserializer.class.getName());
       props.put("value.deserializer", StringDeserializer.class.getName());
 
@@ -71,6 +77,8 @@ public class EndlessConsumer implements Runnable
       log.info("{} - Subscribing to topic {}", id, topic);
       consumer.subscribe(Arrays.asList(topic));
 
+      seen = new HashMap<>();
+
       while (true)
       {
         ConsumerRecords<String, String> records =
@@ -90,6 +98,21 @@ public class EndlessConsumer implements Runnable
               record.key(),
               record.value()
           );
+
+          Integer partition = record.partition();
+          String key = record.key() == null ? "NULL" : record.key();
+
+          if (!seen.containsKey(partition))
+            seen.put(partition, new HashMap<>());
+
+          Map<String, Integer> byKey = seen.get(partition);
+
+          if (!byKey.containsKey(key))
+            byKey.put(key, 0);
+
+          int seenByKey = byKey.get(key);
+          seenByKey++;
+          byKey.put(key, seenByKey);
         }
       }
     }
@@ -107,10 +130,31 @@ public class EndlessConsumer implements Runnable
     {
       log.info("{} - Closing the KafkaConsumer", id);
       consumer.close();
+
+      for (Integer partition : seen.keySet())
+      {
+        Map<String, Integer> byKey = seen.get(partition);
+        for (String key : byKey.keySet())
+        {
+          log.info(
+              "{} - Seen {} messages for partition={}|key={}",
+              id,
+              byKey.get(key),
+              partition,
+              key);
+        }
+      }
+      seen = null;
+
       log.info("{} - Consumer-Thread exiting", id);
     }
   }
 
+  public Map<Integer, Map<String, Integer>> getSeen()
+  {
+    return seen;
+  }
+
   private void shutdown()
   {
     shutdown(null);