if [ "$1" = "cleanup" ]
then
- docker-compose -f docker/docker-compose.yml down -t0 -v --remove-orphans
+ docker compose -f docker/docker-compose.yml down -t0 -v --remove-orphans
mvn clean
exit
fi
-docker-compose -f docker/docker-compose.yml up -d --remove-orphans kafka-1 kafka-2 kafka-3
+docker compose -f docker/docker-compose.yml up -d --remove-orphans kafka-1 kafka-2 kafka-3
+docker compose -f docker/docker-compose.yml rm -svf producer
if [[
$(docker image ls -q $IMAGE) == "" ||
docker image ls $IMAGE
fi
-docker-compose -f docker/docker-compose.yml up --remove-orphans setup || exit 1
+docker compose -f docker/docker-compose.yml up --remove-orphans setup || exit 1
-docker-compose -f docker/docker-compose.yml up -t0 -d cli
-docker-compose -f docker/docker-compose.yml ps
-docker-compose -f docker/docker-compose.yml up producer
-# tag::kafkacat[]
-kafkacat -b :9092 -t test -e -f'topic=%t\tpartition=%p\toffset=%o\tkey=%k\tvalue=%s\n'
-# end::kafkacat[]
+docker compose -f docker/docker-compose.yml up -d producer
+docker compose -f docker/docker-compose.yml up -d consumer-1 consumer-2
+sleep 15
+
+docker compose -f docker/docker-compose.yml stop producer
+
+echo
+echo "Von consumer-1 empfangen:"
+docker compose -f docker/docker-compose.yml logs consumer-1 | grep '\ test\/.'
+echo
+echo "Von consumer-2 empfangen:"
+docker compose -f docker/docker-compose.yml logs consumer-2 | grep '\ test\/.'
+
+docker compose -f docker/docker-compose.yml stop consumer-1 consumer-2
-version: '3.2'
services:
zookeeper:
- image: confluentinc/cp-zookeeper:7.5.1
+ image: confluentinc/cp-zookeeper:7.7.1
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ports:
- zookeeper-log:/var/lib/zookeeper/log
kafka-1:
- image: confluentinc/cp-kafka:7.5.1
+ image: confluentinc/cp-kafka:7.7.1
environment:
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_LISTENERS: BROKER://:9092, LOCALHOST://:9081
KAFKA_INTER_BROKER_LISTENER_NAME: BROKER
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "false"
- KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.authorizer.AclAuthorizer
- KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true"
- KAFKA_OPTS:
- -javaagent:/usr/share/java/cp-base-new/jmx_prometheus_javaagent-0.18.0.jar=0.0.0.0:80:/etc/jmx-exporter.yml
- -Dcom.sun.management.jmxremote=true
- -Dcom.sun.management.jmxremote.port=9101
- -Dcom.sun.management.jmxremote.authenticate=false
+ KAFKA_LOG_RETENTION_CHECK_INTERVAL_MS: 1000
volumes:
- - ./jmx-exporter.yml:/etc/jmx-exporter.yml:ro
- kafka-1-data:/var/lib/kafka/data
ports:
- 9081:9081
- zookeeper
kafka-2:
- image: confluentinc/cp-kafka:7.5.1
+ image: confluentinc/cp-kafka:7.7.1
environment:
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_LISTENERS: BROKER://:9092, LOCALHOST://:9082
KAFKA_INTER_BROKER_LISTENER_NAME: BROKER
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "false"
- KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.authorizer.AclAuthorizer
- KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true"
- KAFKA_OPTS:
- -javaagent:/usr/share/java/cp-base-new/jmx_prometheus_javaagent-0.18.0.jar=0.0.0.0:80:/etc/jmx-exporter.yml
- -Dcom.sun.management.jmxremote=true
- -Dcom.sun.management.jmxremote.port=9101
- -Dcom.sun.management.jmxremote.authenticate=false
+ KAFKA_LOG_RETENTION_CHECK_INTERVAL_MS: 10000
volumes:
- - ./jmx-exporter.yml:/etc/jmx-exporter.yml:ro
- kafka-2-data:/var/lib/kafka/data
ports:
- 9092:9082
- zookeeper
kafka-3:
- image: confluentinc/cp-kafka:7.5.1
+ image: confluentinc/cp-kafka:7.7.1
environment:
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_LISTENERS: BROKER://:9092, LOCALHOST://:9083
KAFKA_INTER_BROKER_LISTENER_NAME: BROKER
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "false"
- KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.authorizer.AclAuthorizer
- KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true"
- KAFKA_OPTS:
- -javaagent:/usr/share/java/cp-base-new/jmx_prometheus_javaagent-0.18.0.jar=0.0.0.0:80:/etc/jmx-exporter.yml
- -Dcom.sun.management.jmxremote=true
- -Dcom.sun.management.jmxremote.port=9101
- -Dcom.sun.management.jmxremote.authenticate=false
+ KAFKA_LOG_RETENTION_CHECK_INTERVAL_MS: 10000
volumes:
- - ./jmx-exporter.yml:/etc/jmx-exporter.yml:ro
- kafka-3-data:/var/lib/kafka/data
ports:
- 9083:9083
- zookeeper
schema-registry:
- image: confluentinc/cp-schema-registry:7.5.1
+ image: confluentinc/cp-schema-registry:7.7.1
environment:
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: kafka-1:9092,kafka-2:9092,kafka-3:9092
SCHEMA_REGISTRY_HOST_NAME: schema-registry
- kafka-3
connect:
- image: confluentinc/cp-kafka-connect:7.5.1
+ image: confluentinc/cp-kafka-connect:7.7.1
environment:
CONNECT_BOOTSTRAP_SERVERS: kafka-1:9092,kafka-2:9092,kafka-3:9092
CONNECT_REST_PORT: 8083
spring.kafka.client-id: producer
spring.kafka.template.default-topic: test
+ consumer-1:
+ image: juplo/simple-consumer:1.0-SNAPSHOT
+ command: kafka:9092 test my-group consumer-1
+
+ consumer-2:
+ image: juplo/simple-consumer:1.0-SNAPSHOT
+ command: kafka:9092 test my-group consumer-2
+
volumes:
zookeeper-data:
zookeeper-log:
<parent>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
- <version>2.7.2</version>
+ <version>3.3.4</version>
<relativePath/> <!-- lookup parent from repository -->
</parent>
<version>1.0-SNAPSHOT</version>
<properties>
- <java.version>17</java.version>
+ <java.version>21</java.version>
</properties>
<dependencies>
</execution>
</executions>
</plugin>
+ <plugin>
+ <groupId>pl.project13.maven</groupId>
+ <artifactId>git-commit-id-plugin</artifactId>
+ </plugin>
<plugin>
<groupId>io.fabric8</groupId>
<artifactId>docker-maven-plugin</artifactId>
- <version>0.33.0</version>
+ <version>0.45.0</version>
<configuration>
<images>
<image>
</execution>
</executions>
</plugin>
- <plugin>
- <artifactId>maven-failsafe-plugin</artifactId>
- </plugin>
</plugins>
</build>