package de.juplo.kafka;
-import org.apache.kafka.clients.consumer.Consumer;
-import org.apache.kafka.clients.consumer.KafkaConsumer;
-import org.apache.kafka.common.serialization.StringDeserializer;
+import org.apache.kafka.common.serialization.ByteArraySerializer;
+import org.apache.kafka.common.serialization.StringSerializer;
+import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
-import java.time.Clock;
-import java.util.Properties;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
+import java.util.Map;
+import java.util.Optional;
+
+import org.springframework.kafka.config.KafkaListenerEndpointRegistry;
+import org.springframework.kafka.core.DefaultKafkaProducerFactory;
+import org.springframework.kafka.core.KafkaOperations;
+import org.springframework.kafka.core.KafkaTemplate;
+import org.springframework.kafka.core.ProducerFactory;
+import org.springframework.kafka.listener.DeadLetterPublishingRecoverer;
+import org.springframework.kafka.listener.DefaultErrorHandler;
+import org.springframework.kafka.support.serializer.DelegatingByTypeSerializer;
+import org.springframework.kafka.support.serializer.JsonSerializer;
+import org.springframework.util.backoff.FixedBackOff;
@Configuration
-@EnableConfigurationProperties(ApplicationProperties.class)
+@EnableConfigurationProperties({ KafkaProperties.class, ApplicationProperties.class })
public class ApplicationConfiguration
{
@Bean
- public WordcountRecordHandler wordcountRecordHandler(
- PartitionStatisticsRepository repository,
- Consumer<String, String> consumer,
- ApplicationProperties properties)
+ public ApplicationRecordHandler applicationRecordHandler(
+ AdderResults adderResults,
+ KafkaProperties kafkaProperties,
+ ApplicationProperties applicationProperties)
+ {
+ return new ApplicationRecordHandler(
+ adderResults,
+ Optional.ofNullable(applicationProperties.getThrottle()),
+ kafkaProperties.getClientId());
+ }
+
+ @Bean
+ public AdderResults adderResults()
{
- return new WordcountRecordHandler(
- repository,
- properties.getTopic(),
- Clock.systemDefaultZone(),
- properties.getCommitInterval(),
- consumer);
+ return new AdderResults();
}
@Bean
- public WordcountRebalanceListener wordcountRebalanceListener(
- WordcountRecordHandler wordcountRecordHandler,
- PartitionStatisticsRepository repository,
- Consumer<String, String> consumer,
- ApplicationProperties properties)
+ public ApplicationRebalanceListener rebalanceListener(
+ ApplicationRecordHandler recordHandler,
+ AdderResults adderResults,
+ StateRepository stateRepository,
+ KafkaProperties kafkaProperties)
{
- return new WordcountRebalanceListener(
- wordcountRecordHandler,
- repository,
- properties.getClientId(),
- consumer);
+ return new ApplicationRebalanceListener(
+ recordHandler,
+ adderResults,
+ stateRepository,
+ kafkaProperties.getClientId());
}
@Bean
- public EndlessConsumer<String, String> endlessConsumer(
- KafkaConsumer<String, String> kafkaConsumer,
- ExecutorService executor,
- WordcountRebalanceListener wordcountRebalanceListener,
- WordcountRecordHandler wordcountRecordHandler,
- ApplicationProperties properties)
+ public EndlessConsumer endlessConsumer(
+ RecordHandler recordHandler,
+ KafkaProperties kafkaProperties,
+ KafkaListenerEndpointRegistry endpointRegistry)
{
return
- new EndlessConsumer<>(
- executor,
- properties.getClientId(),
- properties.getTopic(),
- kafkaConsumer,
- wordcountRebalanceListener,
- wordcountRecordHandler);
+ new EndlessConsumer(
+ kafkaProperties.getClientId(),
+ endpointRegistry,
+ recordHandler);
}
@Bean
- public ExecutorService executor()
+ public ProducerFactory<String, Object> producerFactory(
+ KafkaProperties properties)
{
- return Executors.newSingleThreadExecutor();
+ return new DefaultKafkaProducerFactory<>(
+ properties.getProducer().buildProperties(),
+ new StringSerializer(),
+ new DelegatingByTypeSerializer(
+ Map.of(
+ byte[].class, new ByteArraySerializer(),
+ MessageAddNumber.class, new JsonSerializer<>(),
+ MessageCalculateSum.class, new JsonSerializer<>())));
}
- @Bean(destroyMethod = "close")
- public KafkaConsumer<String, String> kafkaConsumer(ApplicationProperties properties)
+ @Bean
+ public KafkaTemplate<String, Object> kafkaTemplate(
+ ProducerFactory<String, Object> producerFactory)
{
- Properties props = new Properties();
+ return new KafkaTemplate<>(producerFactory);
+ }
- props.put("bootstrap.servers", properties.getBootstrapServer());
- props.put("partition.assignment.strategy", "org.apache.kafka.clients.consumer.CooperativeStickyAssignor");
- props.put("group.id", properties.getGroupId());
- props.put("client.id", properties.getClientId());
- props.put("enable.auto.commit", false);
- props.put("auto.offset.reset", properties.getAutoOffsetReset());
- props.put("metadata.max.age.ms", "1000");
- props.put("key.deserializer", StringDeserializer.class.getName());
- props.put("value.deserializer", StringDeserializer.class.getName());
+ @Bean
+ public DeadLetterPublishingRecoverer deadLetterPublishingRecoverer(
+ KafkaOperations<?, ?> kafkaTemplate)
+ {
+ return new DeadLetterPublishingRecoverer(kafkaTemplate);
+ }
- return new KafkaConsumer<>(props);
+ @Bean
+ public DefaultErrorHandler errorHandler(
+ DeadLetterPublishingRecoverer recoverer)
+ {
+ return new DefaultErrorHandler(
+ recoverer,
+ new FixedBackOff(0l, 0l));
}
}