exit
fi
-docker-compose up -d zookeeper kafka cli
+docker-compose up -d zookeeper kafka cli mongo express
if [[
$(docker image ls -q $IMAGE) == "" ||
echo "Waiting for the Kafka-Cluster to become ready..."
docker-compose exec cli cub kafka-ready -b kafka:9092 1 60 > /dev/null 2>&1 || exit 1
-docker-compose up setup
-docker-compose up -d producer consumer
-sleep 5
+docker-compose up -d kafka-ui
+
docker-compose exec -T cli bash << 'EOF'
-echo "Writing poison pill into topic test..."
-# tag::poisonpill[]
-echo 'BOOM!' | kafkacat -P -b kafka:9092 -t test
-# end::poisonpill[]
+echo "Creating topic with 3 partitions..."
+kafka-topics --bootstrap-server kafka:9092 --delete --if-exists --topic test
+# tag::createtopic[]
+kafka-topics --bootstrap-server kafka:9092 --create --topic test --partitions 3
+# end::createtopic[]
+kafka-topics --bootstrap-server kafka:9092 --describe --topic test
EOF
-while [[ $(http 0:8081/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "Consumer is still running..."; sleep 1; done
-http -v :8081/actuator/health
-echo "Restarting consumer"
-http -v post :8081/start
-while ! [[ $(http 0:8081/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "Waiting for consumer..."; sleep 1; done
-while [[ $(http 0:8081/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "Consumer is still running..."; sleep 1; done
-http -v :8081/actuator/health
-http -v post :8081/actuator/shutdown
+
+docker-compose up -d consumer
+
+docker-compose up -d producer
+sleep 10
+http -v :8081/seen
+sleep 1
+http -v :8081/seen
+sleep 1
+http -v :8081/seen
+sleep 1
+http -v :8081/seen
+
docker-compose stop producer
-docker-compose ps
-docker-compose logs --tail=100 consumer
+docker-compose exec -T cli bash << 'EOF'
+echo "Altering number of partitions from 3 to 7..."
+# tag::altertopic[]
+kafka-topics --bootstrap-server kafka:9092 --alter --topic test --partitions 7
+kafka-topics --bootstrap-server kafka:9092 --describe --topic test
+# end::altertopic[]
+EOF
+
+docker-compose start producer
+sleep 1
+http -v :8081/seen
+sleep 1
+http -v :8081/seen
+sleep 1
+http -v :8081/seen
+sleep 1
+http -v :8081/seen
+sleep 1
+http -v :8081/seen
+sleep 1
+http -v :8081/seen
+sleep 1
+http -v :8081/seen
+sleep 1
+http -v :8081/seen
+sleep 1
+http -v :8081/seen
+sleep 1
+http -v :8081/seen
+sleep 1
+http -v :8081/seen
+sleep 1
+http -v :8081/seen
+sleep 1
+http -v :8081/seen
+sleep 1
+http -v :8081/seen
+docker-compose stop producer consumer
depends_on:
- zookeeper
- setup:
- image: juplo/toolbox
- command: >
- bash -c "
- kafka-topics --bootstrap-server kafka:9092 --delete --if-exists --topic test
- kafka-topics --bootstrap-server kafka:9092 --create --topic test --partitions 2
- "
+ mongo:
+ image: mongo:4.4.13
+ ports:
+ - 27017:27017
+ environment:
+ MONGO_INITDB_ROOT_USERNAME: juplo
+ MONGO_INITDB_ROOT_PASSWORD: training
+
+ express:
+ image: mongo-express
+ ports:
+ - 8090:8081
+ environment:
+ ME_CONFIG_MONGODB_ADMINUSERNAME: juplo
+ ME_CONFIG_MONGODB_ADMINPASSWORD: training
+ ME_CONFIG_MONGODB_URL: mongodb://juplo:training@mongo:27017/
+
+ kafka-ui:
+ image: provectuslabs/kafka-ui:0.3.3
+ ports:
+ - 8080:8080
+ environment:
+ KAFKA_CLUSTERS_0_NAME: local
+ KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:9092
cli:
image: juplo/toolbox
producer.bootstrap-server: kafka:9092
producer.client-id: producer
producer.topic: test
- producer.throttle-ms: 200
+ producer.throttle-ms: 10
consumer:
environment:
server.port: 8080
consumer.bootstrap-server: kafka:9092
- consumer.client-id: my-group
consumer.client-id: consumer
consumer.topic: test
+ spring.data.mongodb.uri: mongodb://juplo:training@mongo:27017
+ spring.data.mongodb.database: juplo
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.springframework.boot</groupId>
+ <artifactId>spring-boot-starter-data-mongodb</artifactId>
+ </dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-validation</artifactId>
<artifactId>awaitility</artifactId>
<scope>test</scope>
</dependency>
+ <dependency>
+ <groupId>de.flapdoodle.embed</groupId>
+ <artifactId>de.flapdoodle.embed.mongo</artifactId>
+ <scope>test</scope>
+ </dependency>
</dependencies>
<build>
import org.springframework.boot.autoconfigure.SpringBootApplication;
import javax.annotation.PreDestroy;
-import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
KafkaConsumer<String, Long> kafkaConsumer,
ExecutorService executor,
Consumer<ConsumerRecord<String, Long>> handler,
+ PartitionStatisticsRepository repository,
ApplicationProperties properties)
{
return
new EndlessConsumer<>(
executor,
+ repository,
properties.getClientId(),
properties.getTopic(),
kafkaConsumer,
public class EndlessConsumer<K, V> implements ConsumerRebalanceListener, Runnable
{
private final ExecutorService executor;
+ private final PartitionStatisticsRepository repository;
private final String id;
private final String topic;
private final Consumer<K, V> consumer;
partition,
key);
}
+ repository.save(new StatisticsDocument(partition, removed));
});
}
Long offset = consumer.position(tp);
log.info("{} - adding partition: {}, offset={}", id, partition, offset);
offsets.put(partition, offset);
- seen.put(partition, new HashMap<>());
+ seen.put(
+ partition,
+ repository
+ .findById(Integer.toString(tp.partition()))
+ .map(document -> document.statistics)
+ .orElse(new HashMap<>()));
});
}
--- /dev/null
+package de.juplo.kafka;
+
+import org.springframework.data.mongodb.repository.MongoRepository;
+
+import java.util.Optional;
+
+
+public interface PartitionStatisticsRepository extends MongoRepository<StatisticsDocument, String>
+{
+ public Optional<StatisticsDocument> findById(String partition);
+}
--- /dev/null
+package de.juplo.kafka;
+
+import lombok.ToString;
+import org.springframework.data.annotation.Id;
+import org.springframework.data.mongodb.core.mapping.Document;
+
+import java.util.HashMap;
+import java.util.Map;
+
+
+@Document(collection = "statistics")
+@ToString
+public class StatisticsDocument
+{
+ @Id
+ public String id;
+ public Map<String, Long> statistics;
+
+ public StatisticsDocument()
+ {
+ }
+
+ public StatisticsDocument(Integer partition, Map<String, Long> statistics)
+ {
+ this.id = Integer.toString(partition);
+ this.statistics = statistics;
+ }
+}
group-id: ${consumer.group-id}
topic: ${consumer.topic}
auto-offset-reset: ${consumer.auto-offset-reset}
+spring:
+ data:
+ mongodb:
+ uri: mongodb://juplo:training@localhost:27017
+ database: juplo
logging:
level:
root: INFO
import org.apache.kafka.common.utils.Bytes;
import org.junit.jupiter.api.*;
import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
+import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo;
import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
import org.springframework.boot.test.context.TestConfiguration;
import org.springframework.context.annotation.Bean;
@TestPropertySource(
properties = {
"consumer.bootstrap-server=${spring.embedded.kafka.brokers}",
- "consumer.topic=" + TOPIC })
+ "consumer.topic=" + TOPIC,
+ "spring.mongodb.embedded.version=4.4.13" })
@EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
+@EnableAutoConfiguration
+@AutoConfigureDataMongo
@Slf4j
class ApplicationTests
{
ApplicationProperties properties;
@Autowired
ExecutorService executor;
+ @Autowired
+ PartitionStatisticsRepository repository;
Consumer<ConsumerRecord<String, Long>> testHandler;
EndlessConsumer<String, Long> endlessConsumer;
endlessConsumer =
new EndlessConsumer<>(
executor,
+ repository,
properties.getClientId(),
properties.getTopic(),
kafkaConsumer,