From: Kai Moritz Date: Sat, 23 Jul 2022 11:49:23 +0000 (+0200) Subject: Merge der überarbeiteten Compose-Konfiguration ('endless-stream-consumer') X-Git-Tag: wip-DEPRECATED~12^2~1^2^2 X-Git-Url: http://juplo.de/gitweb/?a=commitdiff_plain;h=f9c0ba7779552d8fcfc9cb29c8b689e20c314904;hp=d115070b9cbc56f4ec9d47ec658f49527fbeb35e;p=demos%2Fkafka%2Ftraining Merge der überarbeiteten Compose-Konfiguration ('endless-stream-consumer') * Die letzten Änderungen an 'endless-stream-consumer' sind länger nicht mehr gemerged worden. --- diff --git a/README.sh b/README.sh index 900270a..c14f45b 100755 --- a/README.sh +++ b/README.sh @@ -24,8 +24,64 @@ fi echo "Waiting for the Kafka-Cluster to become ready..." docker-compose exec cli cub kafka-ready -b kafka:9092 1 60 > /dev/null 2>&1 || exit 1 -docker-compose up setup -docker-compose up -d producer consumer -sleep 15 +docker-compose up -d kafka-ui + +docker-compose exec -T cli bash << 'EOF' +echo "Creating topic with 3 partitions..." +kafka-topics --bootstrap-server kafka:9092 --delete --if-exists --topic test +# tag::createtopic[] +kafka-topics --bootstrap-server kafka:9092 --create --topic test --partitions 3 +# end::createtopic[] +kafka-topics --bootstrap-server kafka:9092 --describe --topic test +EOF + +docker-compose up -d consumer + +docker-compose up -d producer +sleep 10 +http -v :8081/seen +sleep 1 +http -v :8081/seen +sleep 1 +http -v :8081/seen +sleep 1 +http -v :8081/seen + +docker-compose exec -T cli bash << 'EOF' +echo "Altering number of partitions from 3 to 7..." +# tag::altertopic[] +kafka-topics --bootstrap-server kafka:9092 --alter --topic test --partitions 7 +kafka-topics --bootstrap-server kafka:9092 --describe --topic test +# end::altertopic[] +EOF + +docker-compose restart producer +sleep 1 +http -v :8081/seen +sleep 1 +http -v :8081/seen +sleep 1 +http -v :8081/seen +sleep 1 +http -v :8081/seen +sleep 1 +http -v :8081/seen +sleep 1 +http -v :8081/seen +sleep 1 +http -v :8081/seen +sleep 1 +http -v :8081/seen +sleep 1 +http -v :8081/seen +sleep 1 +http -v :8081/seen +sleep 1 +http -v :8081/seen +sleep 1 +http -v :8081/seen +sleep 1 +http -v :8081/seen +sleep 1 +http -v :8081/seen docker-compose stop producer consumer -docker-compose logs consumer diff --git a/docker-compose.yml b/docker-compose.yml index 159f9cb..f9eeedd 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -24,13 +24,13 @@ services: depends_on: - zookeeper - setup: - image: juplo/toolbox - command: > - bash -c " - kafka-topics --bootstrap-server kafka:9092 --delete --if-exists --topic test - kafka-topics --bootstrap-server kafka:9092 --create --topic test --partitions 2 - " + kafka-ui: + image: provectuslabs/kafka-ui:0.3.3 + ports: + - 8080:8080 + environment: + KAFKA_CLUSTERS_0_NAME: local + KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:9092 cli: image: juplo/toolbox @@ -39,13 +39,13 @@ services: producer: image: juplo/endless-producer:1.0-SNAPSHOT ports: - - 8080:8080 + - 8000:8080 environment: server.port: 8080 producer.bootstrap-server: kafka:9092 producer.client-id: producer producer.topic: test - producer.throttle-ms: 200 + producer.throttle-ms: 10 consumer: diff --git a/pom.xml b/pom.xml index 29c1851..9db9d9d 100644 --- a/pom.xml +++ b/pom.xml @@ -14,7 +14,7 @@ de.juplo.kafka endless-consumer 1.0-SNAPSHOT - Endless Consumer: a Simple Consumer-Group that reads and print the topic + Endless Consumer: a Simple Consumer-Group that reads and prints the topic and counts the received messages for each key by topic diff --git a/src/main/java/de/juplo/kafka/DriverController.java b/src/main/java/de/juplo/kafka/DriverController.java index a02fd2c..06e562c 100644 --- a/src/main/java/de/juplo/kafka/DriverController.java +++ b/src/main/java/de/juplo/kafka/DriverController.java @@ -1,12 +1,14 @@ package de.juplo.kafka; import lombok.RequiredArgsConstructor; +import org.springframework.web.bind.annotation.GetMapping; import org.springframework.http.HttpStatus; import org.springframework.web.bind.annotation.ExceptionHandler; import org.springframework.web.bind.annotation.PostMapping; import org.springframework.web.bind.annotation.ResponseStatus; import org.springframework.web.bind.annotation.RestController; +import java.util.Map; import java.util.concurrent.ExecutionException; @@ -29,6 +31,14 @@ public class DriverController consumer.stop(); } + + @GetMapping("seen") + public Map> seen() + { + return consumer.getSeen(); + } + + @ExceptionHandler @ResponseStatus(HttpStatus.BAD_REQUEST) public ErrorResponse illegalStateException(IllegalStateException e) diff --git a/src/main/java/de/juplo/kafka/EndlessConsumer.java b/src/main/java/de/juplo/kafka/EndlessConsumer.java index adebff1..2310ccd 100644 --- a/src/main/java/de/juplo/kafka/EndlessConsumer.java +++ b/src/main/java/de/juplo/kafka/EndlessConsumer.java @@ -10,6 +10,8 @@ import org.apache.kafka.common.serialization.StringDeserializer; import javax.annotation.PreDestroy; import java.time.Duration; import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; import java.util.Optional; import java.util.Properties; import java.util.concurrent.ExecutionException; @@ -37,6 +39,9 @@ public class EndlessConsumer implements Runnable private KafkaConsumer consumer = null; + private Map> seen; + + public EndlessConsumer( ExecutorService executor, String bootstrapServer, @@ -63,6 +68,7 @@ public class EndlessConsumer implements Runnable props.put("group.id", groupId); props.put("client.id", id); props.put("auto.offset.reset", autoOffsetReset); + props.put("metadata.max.age.ms", "1000"); props.put("key.deserializer", StringDeserializer.class.getName()); props.put("value.deserializer", StringDeserializer.class.getName()); @@ -71,6 +77,8 @@ public class EndlessConsumer implements Runnable log.info("{} - Subscribing to topic {}", id, topic); consumer.subscribe(Arrays.asList(topic)); + seen = new HashMap<>(); + while (true) { ConsumerRecords records = @@ -90,6 +98,21 @@ public class EndlessConsumer implements Runnable record.key(), record.value() ); + + Integer partition = record.partition(); + String key = record.key() == null ? "NULL" : record.key(); + + if (!seen.containsKey(partition)) + seen.put(partition, new HashMap<>()); + + Map byKey = seen.get(partition); + + if (!byKey.containsKey(key)) + byKey.put(key, 0); + + int seenByKey = byKey.get(key); + seenByKey++; + byKey.put(key, seenByKey); } } } @@ -107,10 +130,31 @@ public class EndlessConsumer implements Runnable { log.info("{} - Closing the KafkaConsumer", id); consumer.close(); + + for (Integer partition : seen.keySet()) + { + Map byKey = seen.get(partition); + for (String key : byKey.keySet()) + { + log.info( + "{} - Seen {} messages for partition={}|key={}", + id, + byKey.get(key), + partition, + key); + } + } + seen = null; + log.info("{} - Consumer-Thread exiting", id); } } + public Map> getSeen() + { + return seen; + } + private void shutdown() { shutdown(null);