depends_on:
- zookeeper
- mongo:
- image: mongo:4.4.13
- ports:
- - 27017:27017
- environment:
- MONGO_INITDB_ROOT_USERNAME: juplo
- MONGO_INITDB_ROOT_PASSWORD: training
-
- express:
- image: mongo-express
- ports:
- - 8090:8081
- environment:
- ME_CONFIG_MONGODB_ADMINUSERNAME: juplo
- ME_CONFIG_MONGODB_ADMINPASSWORD: training
- ME_CONFIG_MONGODB_URL: mongodb://juplo:training@mongo:27017/
- depends_on:
- - mongo
-
kafka-ui:
image: provectuslabs/kafka-ui:0.3.3
ports:
command: sleep infinity
producer:
- image: juplo/endless-producer:1.0-SNAPSHOT
+ image: juplo/endless-long-producer:1.0-SNAPSHOT
ports:
- 8080:8080
environment:
server.port: 8080
producer.bootstrap-server: kafka:9092
producer.client-id: producer
- producer.topic: test
producer.throttle-ms: 10
environment:
server.port: 8080
consumer.bootstrap-server: kafka:9092
- consumer.client-id: my-group
consumer.client-id: consumer
- consumer.topic: test
- spring.data.mongodb.uri: mongodb://juplo:training@mongo:27017
- spring.data.mongodb.database: juplo
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
- <dependency>
- <groupId>org.springframework.boot</groupId>
- <artifactId>spring-boot-starter-data-mongodb</artifactId>
- </dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-validation</artifactId>
<artifactId>spring-boot-starter-test</artifactId>
<scope>test</scope>
</dependency>
- <dependency>
- <groupId>de.flapdoodle.embed</groupId>
- <artifactId>de.flapdoodle.embed.mongo</artifactId>
- <scope>test</scope>
- </dependency>
+ <dependency>
+ <groupId>org.springframework.kafka</groupId>
+ <artifactId>spring-kafka-test</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.awaitility</groupId>
+ <artifactId>awaitility</artifactId>
+ <scope>test</scope>
+ </dependency>
</dependencies>
<build>
--- /dev/null
-import java.time.Clock;
+ package de.juplo.kafka;
+
+ import org.apache.kafka.clients.consumer.KafkaConsumer;
+ import org.apache.kafka.common.serialization.LongDeserializer;
+ import org.apache.kafka.common.serialization.StringDeserializer;
+ import org.springframework.boot.context.properties.EnableConfigurationProperties;
+ import org.springframework.context.annotation.Bean;
+ import org.springframework.context.annotation.Configuration;
+
- PartitionStatisticsRepository repository,
+ import java.util.Properties;
+ import java.util.concurrent.ExecutorService;
+ import java.util.concurrent.Executors;
+
+
+ @Configuration
+ @EnableConfigurationProperties(ApplicationProperties.class)
+ public class ApplicationConfiguration
+ {
+ @Bean
+ public KeyCountingRecordHandler keyCountingRecordHandler()
+ {
+ return new KeyCountingRecordHandler();
+ }
+
+ @Bean
+ public KeyCountingRebalanceListener keyCountingRebalanceListener(
+ KeyCountingRecordHandler keyCountingRecordHandler,
- repository,
- properties.getClientId(),
- Clock.systemDefaultZone(),
- properties.getCommitInterval());
+ ApplicationProperties properties)
+ {
+ return new KeyCountingRebalanceListener(
+ keyCountingRecordHandler,
++ properties.getClientId());
+ }
+
+ @Bean
+ public EndlessConsumer<String, Long> endlessConsumer(
+ KafkaConsumer<String, Long> kafkaConsumer,
+ ExecutorService executor,
+ KeyCountingRebalanceListener keyCountingRebalanceListener,
+ KeyCountingRecordHandler keyCountingRecordHandler,
+ ApplicationProperties properties)
+ {
+ return
+ new EndlessConsumer<>(
+ executor,
+ properties.getClientId(),
+ properties.getTopic(),
+ kafkaConsumer,
+ keyCountingRebalanceListener,
+ keyCountingRecordHandler);
+ }
+
+ @Bean
+ public ExecutorService executor()
+ {
+ return Executors.newSingleThreadExecutor();
+ }
+
+ @Bean(destroyMethod = "close")
+ public KafkaConsumer<String, Long> kafkaConsumer(ApplicationProperties properties)
+ {
+ Properties props = new Properties();
+
+ props.put("bootstrap.servers", properties.getBootstrapServer());
+ props.put("partition.assignment.strategy", "org.apache.kafka.clients.consumer.CooperativeStickyAssignor");
+ props.put("group.id", properties.getGroupId());
+ props.put("client.id", properties.getClientId());
+ props.put("auto.offset.reset", properties.getAutoOffsetReset());
+ props.put("auto.commit.interval.ms", (int)properties.getCommitInterval().toMillis());
+ props.put("metadata.max.age.ms", "1000");
+ props.put("key.deserializer", StringDeserializer.class.getName());
+ props.put("value.deserializer", LongDeserializer.class.getName());
+
+ return new KafkaConsumer<>(props);
+ }
+ }
package de.juplo.kafka;
+ import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
- import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
- import org.apache.kafka.clients.consumer.ConsumerRecord;
- import org.apache.kafka.clients.consumer.ConsumerRecords;
- import org.apache.kafka.clients.consumer.KafkaConsumer;
+ import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;
+ import org.apache.kafka.common.errors.RecordDeserializationException;
import org.apache.kafka.common.errors.WakeupException;
- import org.apache.kafka.common.serialization.StringDeserializer;
import javax.annotation.PreDestroy;
import java.time.Duration;
@Slf4j
- public class EndlessConsumer implements Runnable
+ @RequiredArgsConstructor
+ public class EndlessConsumer<K, V> implements Runnable
{
private final ExecutorService executor;
- private final String bootstrapServer;
- private final String groupId;
private final String id;
private final String topic;
- private final String autoOffsetReset;
+ private final Consumer<K, V> consumer;
- private final PollIntervalAwareConsumerRebalanceListener pollIntervalAwareRebalanceListener;
++ private final ConsumerRebalanceListener consumerRebalanceListener;
+ private final RecordHandler<K, V> handler;
private final Lock lock = new ReentrantLock();
private final Condition condition = lock.newCondition();
private boolean running = false;
private Exception exception;
private long consumed = 0;
- private KafkaConsumer<String, String> consumer = null;
- private final Map<Integer, Map<String, Integer>> seen = new HashMap<>();
-
-
- public EndlessConsumer(
- ExecutorService executor,
- String bootstrapServer,
- String groupId,
- String clientId,
- String topic,
- String autoOffsetReset)
- {
- this.executor = executor;
- this.bootstrapServer = bootstrapServer;
- this.groupId = groupId;
- this.id = clientId;
- this.topic = topic;
- this.autoOffsetReset = autoOffsetReset;
- }
@Override
public void run()
{
try
{
- Properties props = new Properties();
- props.put("bootstrap.servers", bootstrapServer);
- props.put("group.id", groupId);
- props.put("client.id", id);
- props.put("auto.offset.reset", autoOffsetReset);
- props.put("metadata.max.age.ms", "1000");
- props.put("key.deserializer", StringDeserializer.class.getName());
- props.put("value.deserializer", StringDeserializer.class.getName());
-
- this.consumer = new KafkaConsumer<>(props);
-
log.info("{} - Subscribing to topic {}", id, topic);
- consumer.subscribe(Arrays.asList(topic), new ConsumerRebalanceListener()
- {
- @Override
- public void onPartitionsRevoked(Collection<TopicPartition> partitions)
- {
- partitions.forEach(tp ->
- {
- log.info("{} - removing partition: {}", id, tp);
- Map<String, Integer> removed = seen.remove(tp.partition());
- for (String key : removed.keySet())
- {
- log.info(
- "{} - Seen {} messages for partition={}|key={}",
- id,
- removed.get(key),
- tp.partition(),
- key);
- }
- });
- }
-
- @Override
- public void onPartitionsAssigned(Collection<TopicPartition> partitions)
- {
- partitions.forEach(tp ->
- {
- log.info("{} - adding partition: {}", id, tp);
- seen.put(tp.partition(), new HashMap<>());
- });
- }
- });
- consumer.subscribe(Arrays.asList(topic), pollIntervalAwareRebalanceListener);
++ consumer.subscribe(Arrays.asList(topic), consumerRebalanceListener);
while (true)
{
- ConsumerRecords<String, String> records =
+ ConsumerRecords<K, V> records =
consumer.poll(Duration.ofSeconds(1));
// Do something with the data...
log.info("{} - Received {} messages", id, records.count());
- for (ConsumerRecord<String, String> record : records)
+ for (ConsumerRecord<K, V> record : records)
{
- consumed++;
log.info(
"{} - {}: {}/{} - {}={}",
id,
record.value()
);
- Integer partition = record.partition();
- String key = record.key() == null ? "NULL" : record.key();
- Map<String, Integer> byKey = seen.get(partition);
+ handler.accept(record);
- if (!byKey.containsKey(key))
- byKey.put(key, 0);
-
- int seenByKey = byKey.get(key);
- seenByKey++;
- byKey.put(key, seenByKey);
+ consumed++;
}
-
- pollIntervalAwareRebalanceListener.beforeNextPoll();
}
}
catch(WakeupException e)
{
- log.info("{} - RIIING!", id);
+ log.info("{} - RIIING! Request to stop consumption - commiting current offsets!", id);
+ consumer.commitSync();
shutdown();
}
+ catch(RecordDeserializationException e)
+ {
+ TopicPartition tp = e.topicPartition();
+ long offset = e.offset();
+ log.error(
+ "{} - Could not deserialize message on topic {} with offset={}: {}",
+ id,
+ tp,
+ offset,
+ e.getCause().toString());
+
+ consumer.commitSync();
+ shutdown(e);
+ }
catch(Exception e)
{
log.error("{} - Unexpected error: {}", id, e.toString(), e);
}
finally
{
- log.info("{} - Closing the KafkaConsumer", id);
- consumer.close();
log.info("{} - Consumer-Thread exiting", id);
}
}
lock.lock();
try
{
- running = false;
- exception = e;
- condition.signal();
+ try
+ {
+ log.info("{} - Unsubscribing from topic {}", id, topic);
+ consumer.unsubscribe();
+ }
+ catch (Exception ue)
+ {
+ log.error(
+ "{} - Error while unsubscribing from topic {}: {}",
+ id,
+ topic,
+ ue.toString());
+ }
+ finally
+ {
+ running = false;
+ exception = e;
+ condition.signal();
+ }
}
finally
{
}
}
- public Map<Integer, Map<String, Integer>> getSeen()
- {
- return seen;
- }
-
public void start()
{
lock.lock();
}
}
- public synchronized void stop() throws ExecutionException, InterruptedException
+ public synchronized void stop() throws InterruptedException
{
lock.lock();
try
public void destroy() throws ExecutionException, InterruptedException
{
log.info("{} - Destroy!", id);
- try
- {
- stop();
- }
- catch (IllegalStateException e)
- {
- log.info("{} - Was already stopped", id);
- }
- catch (Exception e)
- {
- log.error("{} - Unexpected exception while trying to stop the consumer", id, e);
- }
- finally
- {
- log.info("{}: Consumed {} messages in total, exiting!", id, consumed);
- }
+ log.info("{}: Consumed {} messages in total, exiting!", id, consumed);
}
public boolean running()
--- /dev/null
-import java.time.Clock;
-import java.time.Duration;
-import java.time.Instant;
+ package de.juplo.kafka;
+
+ import lombok.RequiredArgsConstructor;
+ import lombok.extern.slf4j.Slf4j;
++import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
+ import org.apache.kafka.common.TopicPartition;
+
-public class KeyCountingRebalanceListener implements PollIntervalAwareConsumerRebalanceListener
+ import java.util.Collection;
++import java.util.HashMap;
+ import java.util.Map;
+
+
+ @RequiredArgsConstructor
+ @Slf4j
- private final PartitionStatisticsRepository repository;
++public class KeyCountingRebalanceListener implements ConsumerRebalanceListener
+ {
+ private final KeyCountingRecordHandler handler;
- private final Clock clock;
- private final Duration commitInterval;
-
- private Instant lastCommit = Instant.EPOCH;
+ private final String id;
- StatisticsDocument document =
- repository
- .findById(Integer.toString(partition))
- .orElse(new StatisticsDocument(partition));
- handler.addPartition(partition, document.statistics);
+
+ @Override
+ public void onPartitionsAssigned(Collection<TopicPartition> partitions)
+ {
+ partitions.forEach(tp ->
+ {
+ Integer partition = tp.partition();
+ log.info("{} - adding partition: {}", id, partition);
- repository.save(new StatisticsDocument(partition, removed));
++ handler.addPartition(partition, new HashMap<>());
+ });
+ }
+
+ @Override
+ public void onPartitionsRevoked(Collection<TopicPartition> partitions)
+ {
+ partitions.forEach(tp ->
+ {
+ Integer partition = tp.partition();
+ log.info("{} - removing partition: {}", id, partition);
+ Map<String, Long> removed = handler.removePartition(partition);
+ for (String key : removed.keySet())
+ {
+ log.info(
+ "{} - Seen {} messages for partition={}|key={}",
+ id,
+ removed.get(key),
+ partition,
+ key);
+ }
-
-
- @Override
- public void beforeNextPoll()
- {
- if (lastCommit.plus(commitInterval).isBefore(clock.instant()))
- {
- log.debug("Storing data, last commit: {}", lastCommit);
- handler.getSeen().forEach((partiton, statistics) -> repository.save(
- new StatisticsDocument(
- partiton,
- statistics)));
- lastCommit = clock.instant();
- }
- }
+ });
+ }
+ }
--- /dev/null
- default void beforeNextPoll() {}
+ package de.juplo.kafka;
+
+ import org.apache.kafka.clients.consumer.ConsumerRecord;
+
+ import java.util.function.Consumer;
+
+
+ public interface RecordHandler<K, V> extends Consumer<ConsumerRecord<K,V>>
+ {
+ }
client-id: DEV
topic: test
auto-offset-reset: earliest
+ commit-interval: 5s
management:
endpoint:
shutdown:
group-id: ${consumer.group-id}
topic: ${consumer.topic}
auto-offset-reset: ${consumer.auto-offset-reset}
-spring:
- data:
- mongodb:
- uri: mongodb://juplo:training@localhost:27017
- database: juplo
logging:
level:
root: INFO
--- /dev/null
-import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo;
+ package de.juplo.kafka;
+
+ import org.junit.jupiter.api.Test;
+ import org.springframework.beans.factory.annotation.Autowired;
-@AutoConfigureDataMongo
+ import org.springframework.boot.test.context.SpringBootTest;
+ import org.springframework.boot.test.web.client.TestRestTemplate;
+ import org.springframework.boot.test.web.server.LocalServerPort;
+ import org.springframework.kafka.test.context.EmbeddedKafka;
+
+ import static de.juplo.kafka.ApplicationTests.TOPIC;
+
+
+ @SpringBootTest(
+ webEnvironment = SpringBootTest.WebEnvironment.RANDOM_PORT,
+ properties = {
+ "consumer.bootstrap-server=${spring.embedded.kafka.brokers}",
+ "consumer.topic=" + TOPIC,
+ "spring.mongodb.embedded.version=4.4.13" })
+ @EmbeddedKafka(topics = TOPIC)
+ public class ApplicationIT
+ {
+ public static final String TOPIC = "FOO";
+
+ @LocalServerPort
+ private int port;
+
+ @Autowired
+ private TestRestTemplate restTemplate;
+
+
+
+ @Test
+ public void testApplicationStartup()
+ {
+ restTemplate.getForObject(
+ "http://localhost:" + port + "/actuator/health",
+ String.class
+ )
+ .contains("UP");
+ }
+ }
--- /dev/null
-import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo;
+ package de.juplo.kafka;
+
+ import lombok.extern.slf4j.Slf4j;
+ import org.apache.kafka.clients.consumer.ConsumerRecord;
+ import org.apache.kafka.clients.consumer.KafkaConsumer;
+ import org.apache.kafka.clients.producer.KafkaProducer;
+ import org.apache.kafka.clients.producer.ProducerRecord;
+ import org.apache.kafka.common.TopicPartition;
+ import org.apache.kafka.common.errors.RecordDeserializationException;
+ import org.apache.kafka.common.serialization.*;
+ import org.apache.kafka.common.utils.Bytes;
+ import org.junit.jupiter.api.*;
+ import org.springframework.beans.factory.annotation.Autowired;
+ import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
- "consumer.commit-interval=1s",
- "spring.mongodb.embedded.version=4.4.13" })
+ import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
+ import org.springframework.boot.test.context.TestConfiguration;
+ import org.springframework.context.annotation.Bean;
+ import org.springframework.context.annotation.Import;
+ import org.springframework.kafka.test.context.EmbeddedKafka;
+ import org.springframework.test.context.TestPropertySource;
+ import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
+
+ import java.time.Duration;
+ import java.util.*;
+ import java.util.concurrent.ExecutionException;
+ import java.util.concurrent.ExecutorService;
+ import java.util.function.BiConsumer;
+ import java.util.stream.Collectors;
+ import java.util.stream.IntStream;
+
+ import static de.juplo.kafka.ApplicationTests.PARTITIONS;
+ import static de.juplo.kafka.ApplicationTests.TOPIC;
+ import static org.assertj.core.api.Assertions.*;
+ import static org.awaitility.Awaitility.*;
+
+
+ @SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class)
+ @TestMethodOrder(MethodOrderer.OrderAnnotation.class)
+ @TestPropertySource(
+ properties = {
+ "consumer.bootstrap-server=${spring.embedded.kafka.brokers}",
+ "consumer.topic=" + TOPIC,
-@AutoConfigureDataMongo
++ "consumer.commit-interval=1s" })
+ @EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
+ @EnableAutoConfiguration
+ @Slf4j
+ class ApplicationTests
+ {
+ public static final String TOPIC = "FOO";
+ public static final int PARTITIONS = 10;
+
+
+ StringSerializer stringSerializer = new StringSerializer();
+
+ @Autowired
+ Serializer valueSerializer;
+ @Autowired
+ KafkaProducer<String, Bytes> kafkaProducer;
+ @Autowired
+ KafkaConsumer<String, Long> kafkaConsumer;
+ @Autowired
+ KafkaConsumer<Bytes, Bytes> offsetConsumer;
+ @Autowired
+ ApplicationProperties properties;
+ @Autowired
+ ExecutorService executor;
+ @Autowired
+ KeyCountingRebalanceListener keyCountingRebalanceListener;
+ @Autowired
+ KeyCountingRecordHandler keyCountingRecordHandler;
+
+ EndlessConsumer<String, Long> endlessConsumer;
+ Map<TopicPartition, Long> oldOffsets;
+ Map<TopicPartition, Long> newOffsets;
+ Set<ConsumerRecord<String, Long>> receivedRecords;
+
+
+ /** Tests methods */
+
+ @Test
+ void commitsCurrentOffsetsOnSuccess() throws ExecutionException, InterruptedException
+ {
+ send100Messages((partition, key, counter) ->
+ {
+ Bytes value = new Bytes(valueSerializer.serialize(TOPIC, counter));
+ return new ProducerRecord<>(TOPIC, partition, key, value);
+ });
+
+ await("100 records received")
+ .atMost(Duration.ofSeconds(30))
+ .pollInterval(Duration.ofSeconds(1))
+ .until(() -> receivedRecords.size() >= 100);
+
+ await("Offsets committed")
+ .atMost(Duration.ofSeconds(10))
+ .pollInterval(Duration.ofSeconds(1))
+ .untilAsserted(() ->
+ {
+ checkSeenOffsetsForProgress();
+ compareToCommitedOffsets(newOffsets);
+ });
+
+ assertThatExceptionOfType(IllegalStateException.class)
+ .isThrownBy(() -> endlessConsumer.exitStatus())
+ .describedAs("Consumer should still be running");
+ }
+
+ @Test
+ void commitsOffsetOfErrorForReprocessingOnDeserializationError()
+ {
+ send100Messages((partition, key, counter) ->
+ {
+ Bytes value = counter == 77
+ ? new Bytes(stringSerializer.serialize(TOPIC, "BOOM!"))
+ : new Bytes(valueSerializer.serialize(TOPIC, counter));
+ return new ProducerRecord<>(TOPIC, partition, key, value);
+ });
+
+ await("Consumer failed")
+ .atMost(Duration.ofSeconds(30))
+ .pollInterval(Duration.ofSeconds(1))
+ .until(() -> !endlessConsumer.running());
+
+ checkSeenOffsetsForProgress();
+ compareToCommitedOffsets(newOffsets);
+
+ endlessConsumer.start();
+ await("Consumer failed")
+ .atMost(Duration.ofSeconds(30))
+ .pollInterval(Duration.ofSeconds(1))
+ .until(() -> !endlessConsumer.running());
+
+ checkSeenOffsetsForProgress();
+ compareToCommitedOffsets(newOffsets);
+ assertThat(receivedRecords.size())
+ .describedAs("Received not all sent events")
+ .isLessThan(100);
+
+ assertThatNoException()
+ .describedAs("Consumer should not be running")
+ .isThrownBy(() -> endlessConsumer.exitStatus());
+ assertThat(endlessConsumer.exitStatus())
+ .describedAs("Consumer should have exited abnormally")
+ .containsInstanceOf(RecordDeserializationException.class);
+ }
+
+
+ /** Helper methods for the verification of expectations */
+
+ void compareToCommitedOffsets(Map<TopicPartition, Long> offsetsToCheck)
+ {
+ doForCurrentOffsets((tp, offset) ->
+ {
+ Long expected = offsetsToCheck.get(tp) + 1;
+ log.debug("Checking, if the offset for {} is {}", tp, expected);
+ assertThat(offset)
+ .describedAs("Committed offset corresponds to the offset of the consumer")
+ .isEqualTo(expected);
+ });
+ }
+
+ void checkSeenOffsetsForProgress()
+ {
+ // Be sure, that some messages were consumed...!
+ Set<TopicPartition> withProgress = new HashSet<>();
+ partitions().forEach(tp ->
+ {
+ Long oldOffset = oldOffsets.get(tp) + 1;
+ Long newOffset = newOffsets.get(tp) + 1;
+ if (!oldOffset.equals(newOffset))
+ {
+ log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset);
+ withProgress.add(tp);
+ }
+ });
+ assertThat(withProgress)
+ .describedAs("Some offsets must have changed, compared to the old offset-positions")
+ .isNotEmpty();
+ }
+
+
+ /** Helper methods for setting up and running the tests */
+
+ void seekToEnd()
+ {
+ offsetConsumer.assign(partitions());
+ offsetConsumer.seekToEnd(partitions());
+ partitions().forEach(tp ->
+ {
+ // seekToEnd() works lazily: it only takes effect on poll()/position()
+ Long offset = offsetConsumer.position(tp);
+ log.info("New position for {}: {}", tp, offset);
+ });
+ // The new positions must be commited!
+ offsetConsumer.commitSync();
+ offsetConsumer.unsubscribe();
+ }
+
+ void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
+ {
+ offsetConsumer.assign(partitions());
+ partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp)));
+ offsetConsumer.unsubscribe();
+ }
+
+ List<TopicPartition> partitions()
+ {
+ return
+ IntStream
+ .range(0, PARTITIONS)
+ .mapToObj(partition -> new TopicPartition(TOPIC, partition))
+ .collect(Collectors.toList());
+ }
+
+
+ public interface RecordGenerator<K, V>
+ {
+ public ProducerRecord<String, Bytes> generate(int partition, String key, long counter);
+ }
+
+ void send100Messages(RecordGenerator recordGenerator)
+ {
+ long i = 0;
+
+ for (int partition = 0; partition < 10; partition++)
+ {
+ for (int key = 0; key < 10; key++)
+ {
+ ProducerRecord<String, Bytes> record =
+ recordGenerator.generate(partition, Integer.toString(partition*10+key%2), ++i);
+
+ kafkaProducer.send(record, (metadata, e) ->
+ {
+ if (metadata != null)
+ {
+ log.debug(
+ "{}|{} - {}={}",
+ metadata.partition(),
+ metadata.offset(),
+ record.key(),
+ record.value());
+ }
+ else
+ {
+ log.warn(
+ "Exception for {}={}: {}",
+ record.key(),
+ record.value(),
+ e.toString());
+ }
+ });
+ }
+ }
+ }
+
+
+ @BeforeEach
+ public void init()
+ {
+ seekToEnd();
+
+ oldOffsets = new HashMap<>();
+ newOffsets = new HashMap<>();
+ receivedRecords = new HashSet<>();
+
+ doForCurrentOffsets((tp, offset) ->
+ {
+ oldOffsets.put(tp, offset - 1);
+ newOffsets.put(tp, offset - 1);
+ });
+
+ TestRecordHandler<String, Long> captureOffsetAndExecuteTestHandler =
+ new TestRecordHandler<String, Long>(keyCountingRecordHandler) {
+ @Override
+ public void onNewRecord(ConsumerRecord<String, Long> record)
+ {
+ newOffsets.put(
+ new TopicPartition(record.topic(), record.partition()),
+ record.offset());
+ receivedRecords.add(record);
+ }
+ };
+
+ endlessConsumer =
+ new EndlessConsumer<>(
+ executor,
+ properties.getClientId(),
+ properties.getTopic(),
+ kafkaConsumer,
+ keyCountingRebalanceListener,
+ captureOffsetAndExecuteTestHandler);
+
+ endlessConsumer.start();
+ }
+
+ @AfterEach
+ public void deinit()
+ {
+ try
+ {
+ endlessConsumer.stop();
+ }
+ catch (Exception e)
+ {
+ log.info("Exception while stopping the consumer: {}", e.toString());
+ }
+ }
+
+
+ @TestConfiguration
+ @Import(ApplicationConfiguration.class)
+ public static class Configuration
+ {
+ @Bean
+ Serializer<Long> serializer()
+ {
+ return new LongSerializer();
+ }
+
+ @Bean
+ KafkaProducer<String, Bytes> kafkaProducer(ApplicationProperties properties)
+ {
+ Properties props = new Properties();
+ props.put("bootstrap.servers", properties.getBootstrapServer());
+ props.put("linger.ms", 100);
+ props.put("key.serializer", StringSerializer.class.getName());
+ props.put("value.serializer", BytesSerializer.class.getName());
+
+ return new KafkaProducer<>(props);
+ }
+
+ @Bean
+ KafkaConsumer<Bytes, Bytes> offsetConsumer(ApplicationProperties properties)
+ {
+ Properties props = new Properties();
+ props.put("bootstrap.servers", properties.getBootstrapServer());
+ props.put("client.id", "OFFSET-CONSUMER");
+ props.put("group.id", properties.getGroupId());
+ props.put("key.deserializer", BytesDeserializer.class.getName());
+ props.put("value.deserializer", BytesDeserializer.class.getName());
+
+ return new KafkaConsumer<>(props);
+ }
+ }
+ }
--- /dev/null
- @Override
-
- public void beforeNextPoll()
- {
- handler.beforeNextPoll();
- }
+ package de.juplo.kafka;
+
+ import lombok.RequiredArgsConstructor;
+ import org.apache.kafka.clients.consumer.ConsumerRecord;
+
+
+ @RequiredArgsConstructor
+ public abstract class TestRecordHandler<K, V> implements RecordHandler<K, V>
+ {
+ private final RecordHandler<K, V> handler;
+
+
+ public abstract void onNewRecord(ConsumerRecord<K, V> record);
+
+
+ @Override
+ public void accept(ConsumerRecord<K, V> record)
+ {
+ this.onNewRecord(record);
+ handler.accept(record);
+ }
+ }