X-Git-Url: http://juplo.de/gitweb/?a=blobdiff_plain;f=README.sh;h=56d1990b607d7d0a9c4f4a2e454de9239a9f0339;hb=dec8425c67cc2662e06f3a386e48bef9781f440e;hp=95aae0b264ea7611224ed1dc3e13e7eef97a52f5;hpb=d4bf2b7400df95df6c3f2534519671e596163be2;p=demos%2Fkafka%2Ftraining diff --git a/README.sh b/README.sh index 95aae0b..56d1990 100755 --- a/README.sh +++ b/README.sh @@ -2,32 +2,40 @@ if [ "$1" = "cleanup" ] then - docker-compose down -v - mvn clean + docker-compose -f docker/docker-compose.yml down -t0 -v --remove-orphans exit fi -mvn package || exit 1 -if [ "$1" = "build" ]; then exit; fi - -trap 'kill $(jobs -p) 2>/dev/null' EXIT - -docker-compose up -d - +docker-compose -f docker/docker-compose.yml up -d --remove-orphans kafka-1 kafka-2 kafka-3 +docker-compose -f docker/docker-compose.yml ps echo "Waiting for the Kafka-Cluster to become ready..." -docker-compose exec kafka cub kafka-ready -b kafka:9092 1 60 > /dev/null 2>&1 || exit 1 - -echo "Producing messages" -mvn exec:java@producer - -echo "Reading messages" -mvn exec:java@consumer & -sleep 7 -kill $(jobs -p) -sleep 2 - -echo "Re-Reading messages" -mvn exec:java@consumer & -sleep 7 -kill $(jobs -p) -sleep 2 +docker-compose -f docker/docker-compose.yml run --rm cli cub kafka-ready -b kafka:9092 3 60 > /dev/null 2>&1 || exit 1 + +docker-compose -f docker/docker-compose.yml up -t0 -d cli +sleep 1 +docker-compose -f docker/docker-compose.yml logs setup + +# --throughput -1 : Kein Throttling zur "künstlichen Kappung" des maximalen Durchsatzes +# --record-size : Größe der einzelnen Nachricht in Bytes +# batch.size : Größe der Batches in Bytes +# --num-records : Variierende passend ausprobierte Werte, damit für die unterschiedlichen +# Batch-Size ungefähr gleich viele Zwischen-Schätzungen auflaufen + +docker-compose -f docker/docker-compose.yml exec -T cli bash << EOF +echo "------------------- batch.size = 0 bytes" +kafka-producer-perf-test --topic test --record-size 1024 --num-records 100000 --throughput -1 --producer-props bootstrap.servers=kafka:9092 batch.size=0 +echo +echo "------------------- batch.size = 8192 bytes" +kafka-producer-perf-test --topic test --record-size 1024 --num-records 1000000 --throughput -1 --producer-props bootstrap.servers=kafka:9092 batch.size=8192 +echo +echo "------------------- batch.size = 16384 bytes" +kafka-producer-perf-test --topic test --record-size 1024 --num-records 1000000 --throughput -1 --producer-props bootstrap.servers=kafka:9092 batch.size=16384 +echo +echo "------------------- batch.size = 32768 bytes" +kafka-producer-perf-test --topic test --record-size 1024 --num-records 1000000 --throughput -1 --producer-props bootstrap.servers=kafka:9092 batch.size=32768 +echo +echo "------------------- batch.size = 524288 bytes (0.5 MB)" +kafka-producer-perf-test --topic test --record-size 1024 --num-records 5000000 --throughput -1 --producer-props bootstrap.servers=kafka:9092 batch.size=524288 +EOF + +# Default-Batchgröße: 16384 bytes = 16 kibibytes = ca. 16 kilobytes