* Per `git cherry-pick` aus `springified-consumer--config' übernommen.
* Conflicts:
** src/main/java/de/juplo/kafka/ApplicationConfiguration.java
** src/main/java/de/juplo/kafka/ApplicationProperties.java
** src/main/resources/application.yml
** src/test/java/de/juplo/kafka/ApplicationTests.java
* Anpassungen an `README.sh`, `docker-compose.yml` und `pom.xml` nachgeholt.
#!/bin/bash
-IMAGE=juplo/sumup-adder-json:1.0-SNAPSHOT
+IMAGE=juplo/sumup-adder-springified:1.0-SNAPSHOT
if [ "$1" = "cleanup" ]
then
command: sleep infinity
gateway:
- image: juplo/sumup-gateway:1.0-SNAPSHOT
+ image: juplo/sumup-gateway--springified:1.0-SNAPSHOT
ports:
- 8080:8080
environment:
server.port: 8080
- sumup.gateway.bootstrap-server: kafka:9092
- sumup.gateway.client-id: gateway
+ spring.kafka.bootstrap-servers: kafka:9092
+ spring.kafka.client-id: gateway
sumup.gateway.topic: in
requests-1:
sumup.requests.client-id: requests-2
adder-1:
- image: juplo/sumup-adder-json:1.0-SNAPSHOT
+ image: juplo/sumup-adder-springified:1.0-SNAPSHOT
ports:
- 8091:8080
environment:
server.port: 8080
- sumup.adder.bootstrap-server: kafka:9092
- sumup.adder.client-id: adder-1
- sumup.adder.commit-interval: 1s
+ spring.kafka.bootstrap-servers: kafka:9092
+ spring.kafak.client-id: adder-1
+ spring.kafka.auto-commit-interval: 1s
sumup.adder.throttle: 3ms
spring.data.mongodb.uri: mongodb://juplo:training@mongo:27017
spring.data.mongodb.database: juplo
logging.level.org.apache.kafka.clients.consumer: DEBUG
adder-2:
- image: juplo/sumup-adder-json:1.0-SNAPSHOT
+ image: juplo/sumup-adder-springified:1.0-SNAPSHOT
ports:
- 8092:8080
environment:
server.port: 8080
- sumup.adder.bootstrap-server: kafka:9092
- sumup.adder.client-id: adder-2
- sumup.adder.commit-interval: 1s
+ spring.kafka.bootstrap-servers: kafka:9092
+ spring.kafak.client-id: adder-2
+ spring.kafka.auto-commit-interval: 1s
sumup.adder.throttle: 3ms
spring.data.mongodb.uri: mongodb://juplo:training@mongo:27017
spring.data.mongodb.database: juplo
</parent>
<groupId>de.juplo.kafka</groupId>
- <artifactId>sumup-adder-json</artifactId>
+ <artifactId>sumup-adder-springified</artifactId>
<version>1.0-SNAPSHOT</version>
<name>SumUp Adder</name>
<description>Calculates the sum for the send messages. This version consumes JSON-messages.</description>
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;
+import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
@Configuration
-@EnableConfigurationProperties(ApplicationProperties.class)
+@EnableConfigurationProperties({ KafkaProperties.class, ApplicationProperties.class })
public class ApplicationConfiguration
{
@Bean
public ApplicationRecordHandler recordHandler(
AdderResults adderResults,
- ApplicationProperties properties)
+ KafkaProperties kafkaProperties,
+ ApplicationProperties applicationProperties)
{
return new ApplicationRecordHandler(
adderResults,
- Optional.ofNullable(properties.getThrottle()),
- properties.getClientId());
+ Optional.ofNullable(applicationProperties.getThrottle()),
+ kafkaProperties.getClientId());
}
@Bean
ApplicationRecordHandler recordHandler,
AdderResults adderResults,
StateRepository stateRepository,
- ApplicationProperties properties)
+ KafkaProperties kafkaProperties,
+ ApplicationProperties applicationProperties)
{
return new ApplicationRebalanceListener(
recordHandler,
adderResults,
stateRepository,
- properties.getClientId());
+ kafkaProperties.getClientId());
}
@Bean
ExecutorService executor,
ApplicationRebalanceListener rebalanceListener,
ApplicationRecordHandler recordHandler,
- ApplicationProperties properties)
+ KafkaProperties kafkaProperties,
+ ApplicationProperties applicationProperties)
{
return
new EndlessConsumer<>(
executor,
- properties.getClientId(),
- properties.getTopic(),
+ kafkaProperties.getClientId(),
+ applicationProperties.getTopic(),
kafkaConsumer,
rebalanceListener,
recordHandler);
}
@Bean(destroyMethod = "close")
- public KafkaConsumer<String, Message> kafkaConsumer(ApplicationProperties properties)
+ public KafkaConsumer<String, Message> kafkaConsumer(KafkaProperties kafkaProperties)
{
Properties props = new Properties();
- props.put("bootstrap.servers", properties.getBootstrapServer());
+ props.put("bootstrap.servers", kafkaProperties.getBootstrapServers());
props.put("partition.assignment.strategy", "org.apache.kafka.clients.consumer.StickyAssignor");
- props.put("group.id", properties.getGroupId());
- props.put("client.id", properties.getClientId());
- props.put("auto.offset.reset", properties.getAutoOffsetReset());
- props.put("auto.commit.interval.ms", (int)properties.getCommitInterval().toMillis());
- props.put("metadata.max.age.ms", "1000");
+ props.put("group.id", kafkaProperties.getConsumer().getGroupId());
+ props.put("client.id", kafkaProperties.getClientId());
+ props.put("auto.offset.reset", kafkaProperties.getConsumer().getAutoOffsetReset());
+ props.put("auto.commit.interval.ms", (int)kafkaProperties.getConsumer().getAutoCommitInterval().toMillis());
+ props.put("metadata.max.age.ms", kafkaProperties.getConsumer().getProperties().get("metadata.max.age.ms"));
props.put("key.deserializer", StringDeserializer.class.getName());
props.put("value.deserializer", JsonDeserializer.class.getName());
props.put(JsonDeserializer.TRUSTED_PACKAGES, "de.juplo.kafka");
@Setter
public class ApplicationProperties
{
- @NotNull
- @NotEmpty
- private String bootstrapServer;
- @NotNull
- @NotEmpty
- private String groupId;
- @NotNull
- @NotEmpty
- private String clientId;
@NotNull
@NotEmpty
private String topic;
- @NotNull
- @NotEmpty
- private String autoOffsetReset;
- @NotNull
- private Duration commitInterval;
private Duration throttle;
}
sumup:
adder:
- bootstrap-server: :9092
- group-id: my-group
- client-id: DEV
topic: out
- auto-offset-reset: earliest
- commit-interval: 5s
management:
endpoint:
shutdown:
enabled: true
info:
kafka:
- bootstrap-server: ${consumer.bootstrap-server}
- client-id: ${consumer.client-id}
- group-id: ${consumer.group-id}
+ bootstrap-server: ${spring.kafka.consumer.bootstrap-servers}
+ client-id: ${spring.kafka.consumer.client-id}
+ group-id: ${spring.kafka.consumer.group-id}
topic: ${consumer.topic}
- auto-offset-reset: ${consumer.auto-offset-reset}
+ auto-offset-reset: ${spring.kafka.consumer.auto-offset-reset}
spring:
data:
mongodb:
uri: mongodb://juplo:training@localhost:27017
database: juplo
+ kafka:
+ bootstrap-servers: :9092
+ client-id: DEV
+ consumer:
+ group-id: my-group
+ auto-offset-reset: earliest
+ auto-commit-interval: 5s
+ properties:
+ metadata.max.age.ms: 1000
logging:
level:
root: INFO
@SpringBootTest(
webEnvironment = SpringBootTest.WebEnvironment.RANDOM_PORT,
properties = {
- "sumup.adder.bootstrap-server=${spring.embedded.kafka.brokers}",
+ "spring.kafka.bootstrap-servers=${spring.embedded.kafka.brokers}",
"sumup.adder.topic=" + TOPIC,
"spring.mongodb.embedded.version=4.4.13" })
@EmbeddedKafka(topics = TOPIC)
import org.junit.jupiter.api.*;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
+import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
import org.springframework.boot.autoconfigure.mongo.MongoProperties;
import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo;
import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
@SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class)
@TestPropertySource(
properties = {
- "sumup.adder.bootstrap-server=${spring.embedded.kafka.brokers}",
+ "spring.kafka.bootstrap-servers=${spring.embedded.kafka.brokers}",
"sumup.adder.topic=" + TOPIC,
- "sumup.adder.commit-interval=500ms",
+ "spring.kafka.consumer.auto-commit-interval=500ms",
"spring.mongodb.embedded.version=4.4.13" })
@EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
@EnableAutoConfiguration
@Autowired
Consumer<ConsumerRecord<K, V>> consumer;
@Autowired
- ApplicationProperties properties;
+ ApplicationProperties applicationProperties;
+ @Autowired
+ KafkaProperties kafkaProperties;
@Autowired
ExecutorService executor;
@Autowired
{
Properties props;
props = new Properties();
- props.put("bootstrap.servers", properties.getBootstrapServer());
+ props.put("bootstrap.servers", kafkaProperties.getBootstrapServers());
props.put("linger.ms", 100);
props.put("key.serializer", BytesSerializer.class.getName());
props.put("value.serializer", BytesSerializer.class.getName());
testRecordProducer = new KafkaProducer<>(props);
props = new Properties();
- props.put("bootstrap.servers", properties.getBootstrapServer());
+ props.put("bootstrap.servers", kafkaProperties.getBootstrapServers());
props.put("client.id", "OFFSET-CONSUMER");
- props.put("group.id", properties.getGroupId());
+ props.put("group.id", kafkaProperties.getConsumer().getGroupId());
props.put("key.deserializer", BytesDeserializer.class.getName());
props.put("value.deserializer", BytesDeserializer.class.getName());
offsetConsumer = new KafkaConsumer<>(props);
endlessConsumer =
new EndlessConsumer<>(
executor,
- properties.getClientId(),
- properties.getTopic(),
+ kafkaProperties.getClientId(),
+ applicationProperties.getTopic(),
kafkaConsumer,
rebalanceListener,
captureOffsetAndExecuteTestHandler);