refactor: Handling der Partitionen in WordcountRebalanceListener
[demos/kafka/training] / src / main / java / de / juplo / kafka / ApplicationConfiguration.java
1 package de.juplo.kafka;
2
3 import org.apache.kafka.clients.consumer.Consumer;
4 import org.apache.kafka.clients.consumer.KafkaConsumer;
5 import org.apache.kafka.common.serialization.StringDeserializer;
6 import org.springframework.boot.context.properties.EnableConfigurationProperties;
7 import org.springframework.context.annotation.Bean;
8 import org.springframework.context.annotation.Configuration;
9
10 import java.time.Clock;
11 import java.util.Properties;
12 import java.util.concurrent.ExecutorService;
13 import java.util.concurrent.Executors;
14
15
16 @Configuration
17 @EnableConfigurationProperties(ApplicationProperties.class)
18 public class ApplicationConfiguration
19 {
20   @Bean
21   public WordcountRecordHandler wordcountRecordHandler(
22       PartitionStatisticsRepository repository,
23       Consumer<String, String> consumer,
24       ApplicationProperties properties)
25   {
26     return new WordcountRecordHandler(
27         repository,
28         properties.getTopic(),
29         Clock.systemDefaultZone(),
30         properties.getCommitInterval(),
31         consumer);
32   }
33
34   @Bean
35   public WordcountRebalanceListener wordcountRebalanceListener(
36       WordcountRecordHandler wordcountRecordHandler,
37       PartitionStatisticsRepository repository,
38       Consumer<String, String> consumer,
39       ApplicationProperties properties)
40   {
41     return new WordcountRebalanceListener(
42         wordcountRecordHandler,
43         repository,
44         properties.getClientId(),
45         consumer);
46   }
47
48   @Bean
49   public EndlessConsumer<String, String> endlessConsumer(
50       KafkaConsumer<String, String> kafkaConsumer,
51       ExecutorService executor,
52       WordcountRebalanceListener wordcountRebalanceListener,
53       WordcountRecordHandler wordcountRecordHandler,
54       ApplicationProperties properties)
55   {
56     return
57         new EndlessConsumer<>(
58             executor,
59             properties.getClientId(),
60             properties.getTopic(),
61             kafkaConsumer,
62             wordcountRebalanceListener,
63             wordcountRecordHandler);
64   }
65
66   @Bean
67   public ExecutorService executor()
68   {
69     return Executors.newSingleThreadExecutor();
70   }
71
72   @Bean(destroyMethod = "close")
73   public KafkaConsumer<String, String> kafkaConsumer(ApplicationProperties properties)
74   {
75     Properties props = new Properties();
76
77     props.put("bootstrap.servers", properties.getBootstrapServer());
78     props.put("partition.assignment.strategy", "org.apache.kafka.clients.consumer.CooperativeStickyAssignor");
79     props.put("group.id", properties.getGroupId());
80     props.put("client.id", properties.getClientId());
81     props.put("enable.auto.commit", false);
82     props.put("auto.offset.reset", properties.getAutoOffsetReset());
83     props.put("metadata.max.age.ms", "1000");
84     props.put("key.deserializer", StringDeserializer.class.getName());
85     props.put("value.deserializer", StringDeserializer.class.getName());
86
87     return new KafkaConsumer<>(props);
88   }
89 }