<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.springframework.kafka</groupId>
+ <artifactId>spring-kafka</artifactId>
+ </dependency>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
package de.juplo.kafka;
import lombok.extern.slf4j.Slf4j;
-import org.springframework.beans.factory.annotation.Autowired;
-import org.springframework.boot.ApplicationArguments;
-import org.springframework.boot.ApplicationRunner;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
-import javax.annotation.PreDestroy;
-import java.util.List;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.TimeUnit;
-
@SpringBootApplication
@Slf4j
-public class Application implements ApplicationRunner
+public class Application
{
- @Autowired
- EndlessConsumer endlessConsumer;
- @Autowired
- ExecutorService executor;
-
-
- @Override
- public void run(ApplicationArguments args) throws Exception
- {
- log.info("Starting EndlessConsumer");
- endlessConsumer.start();
- }
-
- @PreDestroy
- public void stopExecutor()
- {
- try
- {
- log.info("Shutting down the ExecutorService.");
- executor.shutdown();
- log.info("Waiting 5 seconds for the ExecutorService to terminate...");
- executor.awaitTermination(5, TimeUnit.SECONDS);
- }
- catch (InterruptedException e)
- {
- log.error("Exception while waiting for the termination of the ExecutorService: {}", e.toString());
- }
- finally
- {
- if (!executor.isShutdown())
- {
- log.warn("Forcing shutdown of ExecutorService!");
- executor
- .shutdownNow()
- .forEach(runnable -> log.warn("Unfinished task: {}", runnable.getClass().getSimpleName()));
- }
- log.info("Shutdow of ExecutorService finished");
- }
- }
-
-
public static void main(String[] args)
{
SpringApplication.run(Application.class, args);
package de.juplo.kafka;
import org.apache.kafka.clients.consumer.ConsumerRecord;
-import org.apache.kafka.clients.consumer.KafkaConsumer;
-import org.apache.kafka.common.serialization.LongDeserializer;
-import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
-import java.util.Properties;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
import java.util.function.Consumer;
// Handle record
};
}
-
- @Bean
- public EndlessConsumer<String, Long> endlessConsumer(
- KafkaConsumer<String, Long> kafkaConsumer,
- ExecutorService executor,
- Consumer<ConsumerRecord<String, Long>> handler,
- ApplicationProperties properties)
- {
- return
- new EndlessConsumer<>(
- executor,
- properties.getClientId(),
- properties.getTopic(),
- kafkaConsumer,
- handler);
- }
-
- @Bean
- public ExecutorService executor()
- {
- return Executors.newSingleThreadExecutor();
- }
-
- @Bean(destroyMethod = "close")
- public KafkaConsumer<String, Long> kafkaConsumer(ApplicationProperties properties)
- {
- Properties props = new Properties();
-
- props.put("bootstrap.servers", properties.getBootstrapServer());
- props.put("group.id", properties.getGroupId());
- props.put("client.id", properties.getClientId());
- props.put("auto.offset.reset", properties.getAutoOffsetReset());
- props.put("metadata.max.age.ms", "1000");
- props.put("key.deserializer", StringDeserializer.class.getName());
- props.put("value.deserializer", LongDeserializer.class.getName());
-
- return new KafkaConsumer<>(props);
- }
}
@RequiredArgsConstructor
public class ApplicationHealthIndicator implements HealthIndicator
{
- private final EndlessConsumer<String, Long> consumer;
-
-
@Override
public Health health()
{
try
{
- return consumer
- .exitStatus()
- .map(Health::down)
- .orElse(Health.outOfService())
- .build();
+ return Health.up().build();
}
catch (IllegalStateException e)
{
import org.springframework.web.bind.annotation.ResponseStatus;
import org.springframework.web.bind.annotation.RestController;
+import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ExecutionException;
@RequiredArgsConstructor
public class DriverController
{
- private final EndlessConsumer consumer;
-
-
@PostMapping("start")
public void start()
{
- consumer.start();
}
@PostMapping("stop")
public void stop() throws ExecutionException, InterruptedException
{
- consumer.stop();
}
@GetMapping("seen")
public Map<Integer, Map<String, Long>> seen()
{
- return consumer.getSeen();
+ return new HashMap<>();
}
@ExceptionHandler
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
-import org.apache.kafka.clients.consumer.*;
-import org.apache.kafka.common.TopicPartition;
-import org.apache.kafka.common.errors.RecordDeserializationException;
-import org.apache.kafka.common.errors.WakeupException;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.beans.factory.annotation.Value;
+import org.springframework.kafka.annotation.KafkaListener;
+import org.springframework.stereotype.Component;
-import javax.annotation.PreDestroy;
-import java.time.Duration;
-import java.util.*;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.locks.Condition;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
+import java.util.function.Consumer;
+@Component
@Slf4j
@RequiredArgsConstructor
-public class EndlessConsumer<K, V> implements Runnable
+public class EndlessConsumer<K, V>
{
- private final ExecutorService executor;
- private final String id;
- private final String topic;
- private final Consumer<K, V> consumer;
- private final java.util.function.Consumer<ConsumerRecord<K, V>> handler;
-
- private final Lock lock = new ReentrantLock();
- private final Condition condition = lock.newCondition();
- private boolean running = false;
- private Exception exception;
- private long consumed = 0;
-
- private final Map<Integer, Map<String, Long>> seen = new HashMap<>();
- private final Map<Integer, Long> offsets = new HashMap<>();
-
-
- @Override
- public void run()
- {
- try
- {
- log.info("{} - Subscribing to topic {}", id, topic);
- consumer.subscribe(Arrays.asList(topic), new ConsumerRebalanceListener()
- {
- @Override
- public void onPartitionsRevoked(Collection<TopicPartition> partitions)
- {
- partitions.forEach(tp ->
- {
- Integer partition = tp.partition();
- Long newOffset = consumer.position(tp);
- Long oldOffset = offsets.remove(partition);
- log.info(
- "{} - removing partition: {}, consumed {} records (offset {} -> {})",
- id,
- partition,
- newOffset - oldOffset,
- oldOffset,
- newOffset);
- Map<String, Long> removed = seen.remove(partition);
- for (String key : removed.keySet())
- {
- log.info(
- "{} - Seen {} messages for partition={}|key={}",
- id,
- removed.get(key),
- partition,
- key);
- }
- });
- }
-
- @Override
- public void onPartitionsAssigned(Collection<TopicPartition> partitions)
- {
- partitions.forEach(tp ->
- {
- Integer partition = tp.partition();
- Long offset = consumer.position(tp);
- log.info("{} - adding partition: {}, offset={}", id, partition, offset);
- offsets.put(partition, offset);
- seen.put(partition, new HashMap<>());
- });
- }
- });
-
- while (true)
- {
- ConsumerRecords<K, V> records =
- consumer.poll(Duration.ofSeconds(1));
-
- // Do something with the data...
- log.info("{} - Received {} messages", id, records.count());
- for (ConsumerRecord<K, V> record : records)
- {
- log.info(
- "{} - {}: {}/{} - {}={}",
- id,
- record.offset(),
- record.topic(),
- record.partition(),
- record.key(),
- record.value()
- );
-
- handler.accept(record);
-
- consumed++;
-
- Integer partition = record.partition();
- String key = record.key() == null ? "NULL" : record.key().toString();
- Map<String, Long> byKey = seen.get(partition);
-
- if (!byKey.containsKey(key))
- byKey.put(key, 0l);
-
- long seenByKey = byKey.get(key);
- seenByKey++;
- byKey.put(key, seenByKey);
- }
- }
- }
- catch(WakeupException e)
- {
- log.info("{} - RIIING! Request to stop consumption - commiting current offsets!", id);
- consumer.commitSync();
- shutdown();
- }
- catch(RecordDeserializationException e)
- {
- TopicPartition tp = e.topicPartition();
- long offset = e.offset();
- log.error(
- "{} - Could not deserialize message on topic {} with offset={}: {}",
- id,
- tp,
- offset,
- e.getCause().toString());
-
- consumer.commitSync();
- shutdown(e);
- }
- catch(Exception e)
- {
- log.error("{} - Unexpected error: {}", id, e.toString(), e);
- shutdown(e);
- }
- finally
- {
- log.info("{} - Consumer-Thread exiting", id);
- }
- }
-
- private void shutdown()
- {
- shutdown(null);
- }
-
- private void shutdown(Exception e)
- {
- lock.lock();
- try
- {
- try
- {
- log.info("{} - Unsubscribing from topic {}", id, topic);
- consumer.unsubscribe();
- }
- catch (Exception ue)
- {
- log.error(
- "{} - Error while unsubscribing from topic {}: {}",
- id,
- topic,
- ue.toString());
- }
- finally
- {
- running = false;
- exception = e;
- condition.signal();
- }
- }
- finally
- {
- lock.unlock();
- }
- }
-
- public Map<Integer, Map<String, Long>> getSeen()
- {
- return seen;
- }
-
- public void start()
- {
- lock.lock();
- try
- {
- if (running)
- throw new IllegalStateException("Consumer instance " + id + " is already running!");
-
- log.info("{} - Starting - consumed {} messages before", id, consumed);
- running = true;
- exception = null;
- executor.submit(this);
- }
- finally
- {
- lock.unlock();
- }
- }
-
- public synchronized void stop() throws ExecutionException, InterruptedException
- {
- lock.lock();
- try
- {
- if (!running)
- throw new IllegalStateException("Consumer instance " + id + " is not running!");
-
- log.info("{} - Stopping", id);
- consumer.wakeup();
- condition.await();
- log.info("{} - Stopped - consumed {} messages so far", id, consumed);
- }
- finally
- {
- lock.unlock();
- }
- }
-
- @PreDestroy
- public void destroy() throws ExecutionException, InterruptedException
- {
- log.info("{} - Destroy!", id);
- try
- {
- stop();
- }
- catch (IllegalStateException e)
- {
- log.info("{} - Was already stopped", id);
- }
- catch (Exception e)
- {
- log.error("{} - Unexpected exception while trying to stop the consumer", id, e);
- }
- finally
- {
- log.info("{}: Consumed {} messages in total, exiting!", id, consumed);
- }
- }
-
- public boolean running()
- {
- lock.lock();
- try
- {
- return running;
- }
- finally
- {
- lock.unlock();
- }
- }
-
- public Optional<Exception> exitStatus()
- {
- lock.lock();
- try
- {
- if (running)
- throw new IllegalStateException("No exit-status available: Consumer instance " + id + " is running!");
-
- return Optional.ofNullable(exception);
- }
- finally
- {
- lock.unlock();
- }
+ @Value("${consumer.client-id}")
+ String id;
+ @Autowired
+ Consumer<ConsumerRecord<K, V>> handler;
+
+
+ @KafkaListener(topics = "${consumer.topic}")
+ public void receive(ConsumerRecord<K, V> record)
+ {
+ log.info(
+ "{} - {}: {}/{} - {}={}",
+ id,
+ record.offset(),
+ record.topic(),
+ record.partition(),
+ record.key(),
+ record.value()
+ );
+
+ handler.accept(record);
}
}
group-id: ${consumer.group-id}
topic: ${consumer.topic}
auto-offset-reset: ${consumer.auto-offset-reset}
+spring:
+ kafka:
+ consumer:
+ bootstrap-servers: ${consumer.bootstrap-server}
+ client-id: ${consumer.client-id}
+ auto-offset-reset: ${consumer.auto-offset-reset}
+ group-id: ${consumer.group-id}
+ value-deserializer: org.apache.kafka.common.serialization.LongDeserializer
logging:
level:
root: INFO
<logger name="de.juplo" level="TRACE"/>
<!-- logger name="org.apache.kafka.clients" level="DEBUG" / -->
+ <logger name="org.springframework.kafka" level="DEBUG"/>
<root level="INFO">
<appender-ref ref="STDOUT" />
import org.apache.kafka.common.utils.Bytes;
import org.junit.jupiter.api.*;
import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.boot.autoconfigure.kafka.KafkaAutoConfiguration;
import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
import org.springframework.boot.test.context.TestConfiguration;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Import;
+import org.springframework.context.annotation.Primary;
import org.springframework.kafka.test.context.EmbeddedKafka;
import org.springframework.test.context.TestPropertySource;
import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
import java.time.Duration;
import java.util.*;
import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
import java.util.function.Function;
import static org.awaitility.Awaitility.*;
-@SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class)
+@SpringJUnitConfig(
+ initializers = ConfigDataApplicationContextInitializer.class,
+ classes = {
+ EndlessConsumer.class,
+ KafkaAutoConfiguration.class,
+ ApplicationTests.Configuration.class })
@TestMethodOrder(MethodOrderer.OrderAnnotation.class)
@TestPropertySource(
properties = {
@Autowired
KafkaProducer<String, Bytes> kafkaProducer;
@Autowired
- KafkaConsumer<String, Long> kafkaConsumer;
- @Autowired
KafkaConsumer<Bytes, Bytes> offsetConsumer;
@Autowired
ApplicationProperties properties;
@Autowired
- ExecutorService executor;
+ RecordHandler recordHandler;
- Consumer<ConsumerRecord<String, Long>> testHandler;
- EndlessConsumer<String, Long> endlessConsumer;
Map<TopicPartition, Long> oldOffsets;
Map<TopicPartition, Long> newOffsets;
send100Messages(i -> new Bytes(longSerializer.serialize(TOPIC, i)));
Set<ConsumerRecord<String, Long>> received = new HashSet<>();
- testHandler = record -> received.add(record);
+ recordHandler.testHandler = record -> received.add(record);
await("100 records received")
.atMost(Duration.ofSeconds(30))
await("Consumer failed")
.atMost(Duration.ofSeconds(30))
- .until(() -> !endlessConsumer.running());
+ .untilAsserted(() -> checkSeenOffsetsForProgress());
- checkSeenOffsetsForProgress();
- compareToCommitedOffsets(newOffsets);
-
- endlessConsumer.start();
- await("Consumer failed")
- .atMost(Duration.ofSeconds(30))
- .until(() -> !endlessConsumer.running());
-
- checkSeenOffsetsForProgress();
compareToCommitedOffsets(newOffsets);
}
@BeforeEach
public void init()
{
- testHandler = record -> {} ;
+ recordHandler.testHandler = (record) -> {};
oldOffsets = new HashMap<>();
newOffsets = new HashMap<>();
newOffsets.put(tp, offset - 1);
});
- Consumer<ConsumerRecord<String, Long>> captureOffsetAndExecuteTestHandler =
+ recordHandler.captureOffsets =
record ->
- {
newOffsets.put(
new TopicPartition(record.topic(), record.partition()),
record.offset());
- testHandler.accept(record);
- };
-
- endlessConsumer =
- new EndlessConsumer<>(
- executor,
- properties.getClientId(),
- properties.getTopic(),
- kafkaConsumer,
- captureOffsetAndExecuteTestHandler);
-
- endlessConsumer.start();
}
- @AfterEach
- public void deinit()
+
+ public static class RecordHandler implements Consumer<ConsumerRecord<String, Long>>
{
- try
- {
- endlessConsumer.stop();
- }
- catch (Exception e)
+ Consumer<ConsumerRecord<String, Long>> captureOffsets;
+ Consumer<ConsumerRecord<String, Long>> testHandler;
+
+
+ @Override
+ public void accept(ConsumerRecord<String, Long> record)
{
- log.info("Exception while stopping the consumer: {}", e.toString());
+ captureOffsets
+ .andThen(testHandler)
+ .accept(record);
}
}
-
@TestConfiguration
@Import(ApplicationConfiguration.class)
public static class Configuration
{
+ @Primary
+ @Bean
+ public Consumer<ConsumerRecord<String, Long>> testHandler()
+ {
+ return new RecordHandler();
+ }
+
@Bean
KafkaProducer<String, Bytes> kafkaProducer(ApplicationProperties properties)
{