From: Kai Moritz Date: Sat, 23 Jul 2022 14:15:29 +0000 (+0200) Subject: Merge der überarbeiteten Compose-Konfiguration ('rebalance-listener') X-Git-Tag: wip-DEPRECATED~11^2^2 X-Git-Url: http://juplo.de/gitweb/?a=commitdiff_plain;h=0ac94b34af644f6fa5a0556fc7e2bd322167c608;hp=66863e3169440f73ff450c7ba8ea4b9662b180e0;p=demos%2Fkafka%2Ftraining Merge der überarbeiteten Compose-Konfiguration ('rebalance-listener') --- diff --git a/README.sh b/README.sh index 13176d2..72f0c60 100755 --- a/README.sh +++ b/README.sh @@ -24,65 +24,23 @@ fi echo "Waiting for the Kafka-Cluster to become ready..." docker-compose exec cli cub kafka-ready -b kafka:9092 1 60 > /dev/null 2>&1 || exit 1 -docker-compose up -d kafka-ui - +docker-compose up setup +docker-compose up -d producer consumer +sleep 5 docker-compose exec -T cli bash << 'EOF' -echo "Creating topic with 3 partitions..." -kafka-topics --bootstrap-server kafka:9092 --delete --if-exists --topic test -# tag::createtopic[] -kafka-topics --bootstrap-server kafka:9092 --create --topic test --partitions 3 -# end::createtopic[] -kafka-topics --bootstrap-server kafka:9092 --describe --topic test +echo "Writing poison pill into topic test..." +# tag::poisonpill[] +echo 'BOOM!' | kafkacat -P -b kafka:9092 -t test +# end::poisonpill[] EOF - -docker-compose up -d consumer - -docker-compose up -d producer -sleep 10 -http -v :8081/seen -sleep 1 -http -v :8081/seen -sleep 1 -http -v :8081/seen -sleep 1 -http -v :8081/seen - +while [[ $(http 0:8081/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "Consumer is still running..."; sleep 1; done +http -v :8081/actuator/health +echo "Restarting consumer" +http -v post :8081/start +while ! [[ $(http 0:8081/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "Waiting for consumer..."; sleep 1; done +while [[ $(http 0:8081/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "Consumer is still running..."; sleep 1; done +http -v :8081/actuator/health +http -v post :8081/actuator/shutdown docker-compose stop producer -docker-compose exec -T cli bash << 'EOF' -echo "Altering number of partitions from 3 to 7..." -# tag::altertopic[] -kafka-topics --bootstrap-server kafka:9092 --alter --topic test --partitions 7 -kafka-topics --bootstrap-server kafka:9092 --describe --topic test -# end::altertopic[] -EOF - -docker-compose start producer -sleep 1 -http -v :8081/seen -sleep 1 -http -v :8081/seen -sleep 1 -http -v :8081/seen -sleep 1 -http -v :8081/seen -sleep 1 -http -v :8081/seen -sleep 1 -http -v :8081/seen -sleep 1 -http -v :8081/seen -sleep 1 -http -v :8081/seen -sleep 1 -http -v :8081/seen -sleep 1 -http -v :8081/seen -sleep 1 -http -v :8081/seen -sleep 1 -http -v :8081/seen -sleep 1 -http -v :8081/seen -sleep 1 -http -v :8081/seen -docker-compose stop producer consumer +docker-compose ps +docker-compose logs --tail=100 consumer diff --git a/docker-compose.yml b/docker-compose.yml index 1b067cd..159f9cb 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -24,13 +24,13 @@ services: depends_on: - zookeeper - kafka-ui: - image: provectuslabs/kafka-ui:0.3.3 - ports: - - 8080:8080 - environment: - KAFKA_CLUSTERS_0_NAME: local - KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:9092 + setup: + image: juplo/toolbox + command: > + bash -c " + kafka-topics --bootstrap-server kafka:9092 --delete --if-exists --topic test + kafka-topics --bootstrap-server kafka:9092 --create --topic test --partitions 2 + " cli: image: juplo/toolbox @@ -45,7 +45,7 @@ services: producer.bootstrap-server: kafka:9092 producer.client-id: producer producer.topic: test - producer.throttle-ms: 10 + producer.throttle-ms: 200 consumer: diff --git a/pom.xml b/pom.xml index 9db9d9d..1f5caab 100644 --- a/pom.xml +++ b/pom.xml @@ -47,6 +47,16 @@ spring-boot-starter-test test + + org.springframework.kafka + spring-kafka-test + test + + + org.awaitility + awaitility + test + diff --git a/src/main/java/de/juplo/kafka/Application.java b/src/main/java/de/juplo/kafka/Application.java index de4b66d..b5bd1b9 100644 --- a/src/main/java/de/juplo/kafka/Application.java +++ b/src/main/java/de/juplo/kafka/Application.java @@ -1,39 +1,63 @@ package de.juplo.kafka; +import lombok.extern.slf4j.Slf4j; import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.ApplicationArguments; +import org.springframework.boot.ApplicationRunner; import org.springframework.boot.SpringApplication; import org.springframework.boot.autoconfigure.SpringBootApplication; -import org.springframework.boot.context.properties.EnableConfigurationProperties; -import org.springframework.context.annotation.Bean; -import java.util.concurrent.Executors; +import javax.annotation.PreDestroy; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; @SpringBootApplication -@EnableConfigurationProperties(ApplicationProperties.class) -public class Application +@Slf4j +public class Application implements ApplicationRunner { @Autowired - ApplicationProperties properties; + EndlessConsumer endlessConsumer; + @Autowired + ExecutorService executor; - @Bean - public EndlessConsumer consumer() + @Override + public void run(ApplicationArguments args) throws Exception { - EndlessConsumer consumer = - new EndlessConsumer( - Executors.newFixedThreadPool(1), - properties.getBootstrapServer(), - properties.getGroupId(), - properties.getClientId(), - properties.getTopic(), - properties.getAutoOffsetReset()); - - consumer.start(); - - return consumer; + log.info("Starting EndlessConsumer"); + endlessConsumer.start(); } + @PreDestroy + public void stopExecutor() + { + try + { + log.info("Shutting down the ExecutorService."); + executor.shutdown(); + log.info("Waiting 5 seconds for the ExecutorService to terminate..."); + executor.awaitTermination(5, TimeUnit.SECONDS); + } + catch (InterruptedException e) + { + log.error("Exception while waiting for the termination of the ExecutorService: {}", e.toString()); + } + finally + { + if (!executor.isTerminated()) + { + log.warn("Forcing shutdown of ExecutorService!"); + executor + .shutdownNow() + .forEach(runnable -> log.warn("Unprocessed task: {}", runnable.getClass().getSimpleName())); + } + log.info("Shutdow of ExecutorService finished"); + } + } + + public static void main(String[] args) { SpringApplication.run(Application.class, args); diff --git a/src/main/java/de/juplo/kafka/ApplicationConfiguration.java b/src/main/java/de/juplo/kafka/ApplicationConfiguration.java new file mode 100644 index 0000000..4054e93 --- /dev/null +++ b/src/main/java/de/juplo/kafka/ApplicationConfiguration.java @@ -0,0 +1,67 @@ +package de.juplo.kafka; + +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.common.serialization.LongDeserializer; +import org.apache.kafka.common.serialization.StringDeserializer; +import org.springframework.boot.context.properties.EnableConfigurationProperties; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +import java.util.Properties; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.function.Consumer; + + +@Configuration +@EnableConfigurationProperties(ApplicationProperties.class) +public class ApplicationConfiguration +{ + @Bean + public Consumer> consumer() + { + return (record) -> + { + // Handle record + }; + } + + @Bean + public EndlessConsumer endlessConsumer( + KafkaConsumer kafkaConsumer, + ExecutorService executor, + Consumer> handler, + ApplicationProperties properties) + { + return + new EndlessConsumer<>( + executor, + properties.getClientId(), + properties.getTopic(), + kafkaConsumer, + handler); + } + + @Bean + public ExecutorService executor() + { + return Executors.newSingleThreadExecutor(); + } + + @Bean(destroyMethod = "close") + public KafkaConsumer kafkaConsumer(ApplicationProperties properties) + { + Properties props = new Properties(); + + props.put("bootstrap.servers", properties.getBootstrapServer()); + props.put("group.id", properties.getGroupId()); + props.put("client.id", properties.getClientId()); + props.put("auto.offset.reset", properties.getAutoOffsetReset()); + props.put("metadata.max.age.ms", "1000"); + props.put("key.deserializer", StringDeserializer.class.getName()); + props.put("value.deserializer", LongDeserializer.class.getName()); + + return new KafkaConsumer<>(props); + } +} diff --git a/src/main/java/de/juplo/kafka/ApplicationHealthIndicator.java b/src/main/java/de/juplo/kafka/ApplicationHealthIndicator.java index ab9782c..dc3a26e 100644 --- a/src/main/java/de/juplo/kafka/ApplicationHealthIndicator.java +++ b/src/main/java/de/juplo/kafka/ApplicationHealthIndicator.java @@ -10,7 +10,7 @@ import org.springframework.stereotype.Component; @RequiredArgsConstructor public class ApplicationHealthIndicator implements HealthIndicator { - private final EndlessConsumer consumer; + private final EndlessConsumer consumer; @Override diff --git a/src/main/java/de/juplo/kafka/DriverController.java b/src/main/java/de/juplo/kafka/DriverController.java index 1fb2a1b..ed38080 100644 --- a/src/main/java/de/juplo/kafka/DriverController.java +++ b/src/main/java/de/juplo/kafka/DriverController.java @@ -33,7 +33,7 @@ public class DriverController @GetMapping("seen") - public Map> seen() + public Map> seen() { return consumer.getSeen(); } diff --git a/src/main/java/de/juplo/kafka/EndlessConsumer.java b/src/main/java/de/juplo/kafka/EndlessConsumer.java index c2d4447..8802df9 100644 --- a/src/main/java/de/juplo/kafka/EndlessConsumer.java +++ b/src/main/java/de/juplo/kafka/EndlessConsumer.java @@ -1,13 +1,11 @@ package de.juplo.kafka; +import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; -import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.apache.kafka.clients.consumer.ConsumerRecords; -import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.clients.consumer.*; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.errors.RecordDeserializationException; import org.apache.kafka.common.errors.WakeupException; -import org.apache.kafka.common.serialization.StringDeserializer; import javax.annotation.PreDestroy; import java.time.Duration; @@ -20,101 +18,84 @@ import java.util.concurrent.locks.ReentrantLock; @Slf4j -public class EndlessConsumer implements Runnable +@RequiredArgsConstructor +public class EndlessConsumer implements ConsumerRebalanceListener, Runnable { private final ExecutorService executor; - private final String bootstrapServer; - private final String groupId; private final String id; private final String topic; - private final String autoOffsetReset; + private final Consumer consumer; + private final java.util.function.Consumer> handler; private final Lock lock = new ReentrantLock(); private final Condition condition = lock.newCondition(); private boolean running = false; private Exception exception; private long consumed = 0; - private KafkaConsumer consumer = null; + private final Map> seen = new HashMap<>(); + private final Map offsets = new HashMap<>(); - private final Map> seen = new HashMap<>(); + @Override + public void onPartitionsRevoked(Collection partitions) + { + partitions.forEach(tp -> + { + Integer partition = tp.partition(); + Long newOffset = consumer.position(tp); + Long oldOffset = offsets.remove(partition); + log.info( + "{} - removing partition: {}, consumed {} records (offset {} -> {})", + id, + partition, + newOffset - oldOffset, + oldOffset, + newOffset); + Map removed = seen.remove(partition); + for (String key : removed.keySet()) + { + log.info( + "{} - Seen {} messages for partition={}|key={}", + id, + removed.get(key), + partition, + key); + } + }); + } - public EndlessConsumer( - ExecutorService executor, - String bootstrapServer, - String groupId, - String clientId, - String topic, - String autoOffsetReset) + @Override + public void onPartitionsAssigned(Collection partitions) { - this.executor = executor; - this.bootstrapServer = bootstrapServer; - this.groupId = groupId; - this.id = clientId; - this.topic = topic; - this.autoOffsetReset = autoOffsetReset; + partitions.forEach(tp -> + { + Integer partition = tp.partition(); + Long offset = consumer.position(tp); + log.info("{} - adding partition: {}, offset={}", id, partition, offset); + offsets.put(partition, offset); + seen.put(partition, new HashMap<>()); + }); } + @Override public void run() { try { - Properties props = new Properties(); - props.put("bootstrap.servers", bootstrapServer); - props.put("group.id", groupId); - props.put("client.id", id); - props.put("auto.offset.reset", autoOffsetReset); - props.put("metadata.max.age.ms", "1000"); - props.put("key.deserializer", StringDeserializer.class.getName()); - props.put("value.deserializer", StringDeserializer.class.getName()); - - this.consumer = new KafkaConsumer<>(props); - log.info("{} - Subscribing to topic {}", id, topic); - consumer.subscribe(Arrays.asList(topic), new ConsumerRebalanceListener() - { - @Override - public void onPartitionsRevoked(Collection partitions) - { - partitions.forEach(tp -> - { - log.info("{} - removing partition: {}", id, tp); - Map removed = seen.remove(tp.partition()); - for (String key : removed.keySet()) - { - log.info( - "{} - Seen {} messages for partition={}|key={}", - id, - removed.get(key), - tp.partition(), - key); - } - }); - } - - @Override - public void onPartitionsAssigned(Collection partitions) - { - partitions.forEach(tp -> - { - log.info("{} - adding partition: {}", id, tp); - seen.put(tp.partition(), new HashMap<>()); - }); - } - }); + consumer.subscribe(Arrays.asList(topic), this); while (true) { - ConsumerRecords records = + ConsumerRecords records = consumer.poll(Duration.ofSeconds(1)); // Do something with the data... log.info("{} - Received {} messages", id, records.count()); - for (ConsumerRecord record : records) + for (ConsumerRecord record : records) { - consumed++; log.info( "{} - {}: {}/{} - {}={}", id, @@ -125,14 +106,18 @@ public class EndlessConsumer implements Runnable record.value() ); + handler.accept(record); + + consumed++; + Integer partition = record.partition(); - String key = record.key() == null ? "NULL" : record.key(); - Map byKey = seen.get(partition); + String key = record.key() == null ? "NULL" : record.key().toString(); + Map byKey = seen.get(partition); if (!byKey.containsKey(key)) - byKey.put(key, 0); + byKey.put(key, 0l); - int seenByKey = byKey.get(key); + long seenByKey = byKey.get(key); seenByKey++; byKey.put(key, seenByKey); } @@ -140,9 +125,24 @@ public class EndlessConsumer implements Runnable } catch(WakeupException e) { - log.info("{} - RIIING!", id); + log.info("{} - RIIING! Request to stop consumption - commiting current offsets!", id); + consumer.commitSync(); shutdown(); } + catch(RecordDeserializationException e) + { + TopicPartition tp = e.topicPartition(); + long offset = e.offset(); + log.error( + "{} - Could not deserialize message on topic {} with offset={}: {}", + id, + tp, + offset, + e.getCause().toString()); + + consumer.commitSync(); + shutdown(e); + } catch(Exception e) { log.error("{} - Unexpected error: {}", id, e.toString(), e); @@ -150,8 +150,6 @@ public class EndlessConsumer implements Runnable } finally { - log.info("{} - Closing the KafkaConsumer", id); - consumer.close(); log.info("{} - Consumer-Thread exiting", id); } } @@ -166,9 +164,25 @@ public class EndlessConsumer implements Runnable lock.lock(); try { - running = false; - exception = e; - condition.signal(); + try + { + log.info("{} - Unsubscribing from topic {}", id, topic); + consumer.unsubscribe(); + } + catch (Exception ue) + { + log.error( + "{} - Error while unsubscribing from topic {}: {}", + id, + topic, + ue.toString()); + } + finally + { + running = false; + exception = e; + condition.signal(); + } } finally { @@ -176,7 +190,7 @@ public class EndlessConsumer implements Runnable } } - public Map> getSeen() + public Map> getSeen() { return seen; } diff --git a/src/test/java/de/juplo/kafka/ApplicationTests.java b/src/test/java/de/juplo/kafka/ApplicationTests.java new file mode 100644 index 0000000..40dc149 --- /dev/null +++ b/src/test/java/de/juplo/kafka/ApplicationTests.java @@ -0,0 +1,315 @@ +package de.juplo.kafka; + +import lombok.extern.slf4j.Slf4j; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.clients.producer.KafkaProducer; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.errors.RecordDeserializationException; +import org.apache.kafka.common.serialization.*; +import org.apache.kafka.common.utils.Bytes; +import org.junit.jupiter.api.*; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer; +import org.springframework.boot.test.context.TestConfiguration; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Import; +import org.springframework.kafka.test.context.EmbeddedKafka; +import org.springframework.test.context.TestPropertySource; +import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; + +import java.time.Duration; +import java.util.*; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static de.juplo.kafka.ApplicationTests.PARTITIONS; +import static de.juplo.kafka.ApplicationTests.TOPIC; +import static org.assertj.core.api.Assertions.*; +import static org.awaitility.Awaitility.*; + + +@SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class) +@TestMethodOrder(MethodOrderer.OrderAnnotation.class) +@TestPropertySource( + properties = { + "consumer.bootstrap-server=${spring.embedded.kafka.brokers}", + "consumer.topic=" + TOPIC }) +@EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS) +@Slf4j +class ApplicationTests +{ + public static final String TOPIC = "FOO"; + public static final int PARTITIONS = 10; + + + StringSerializer stringSerializer = new StringSerializer(); + + @Autowired + Serializer valueSerializer; + @Autowired + KafkaProducer kafkaProducer; + @Autowired + KafkaConsumer kafkaConsumer; + @Autowired + KafkaConsumer offsetConsumer; + @Autowired + ApplicationProperties properties; + @Autowired + ExecutorService executor; + + Consumer> testHandler; + EndlessConsumer endlessConsumer; + Map oldOffsets; + Map newOffsets; + Set> receivedRecords; + + + /** Tests methods */ + + @Test + @Order(1) // << The poistion pill is not skipped. Hence, this test must run first + void commitsCurrentOffsetsOnSuccess() throws ExecutionException, InterruptedException + { + send100Messages(i -> new Bytes(valueSerializer.serialize(TOPIC, i))); + + await("100 records received") + .atMost(Duration.ofSeconds(30)) + .until(() -> receivedRecords.size() >= 100); + + await("Offsets committed") + .atMost(Duration.ofSeconds(10)) + .untilAsserted(() -> + { + checkSeenOffsetsForProgress(); + compareToCommitedOffsets(newOffsets); + }); + + assertThatExceptionOfType(IllegalStateException.class) + .isThrownBy(() -> endlessConsumer.exitStatus()) + .describedAs("Consumer should still be running"); + } + + @Test + @Order(2) + void commitsOffsetOfErrorForReprocessingOnError() + { + send100Messages(counter -> + counter == 77 + ? new Bytes(stringSerializer.serialize(TOPIC, "BOOM!")) + : new Bytes(valueSerializer.serialize(TOPIC, counter))); + + await("Consumer failed") + .atMost(Duration.ofSeconds(30)) + .until(() -> !endlessConsumer.running()); + + checkSeenOffsetsForProgress(); + compareToCommitedOffsets(newOffsets); + + endlessConsumer.start(); + await("Consumer failed") + .atMost(Duration.ofSeconds(30)) + .until(() -> !endlessConsumer.running()); + + checkSeenOffsetsForProgress(); + compareToCommitedOffsets(newOffsets); + assertThat(receivedRecords.size()) + .describedAs("Received not all sent events") + .isLessThan(100); + + assertThatNoException() + .describedAs("Consumer should not be running") + .isThrownBy(() -> endlessConsumer.exitStatus()); + assertThat(endlessConsumer.exitStatus()) + .describedAs("Consumer should have exited abnormally") + .containsInstanceOf(RecordDeserializationException.class); + } + + + /** Helper methods for the verification of expectations */ + + void compareToCommitedOffsets(Map offsetsToCheck) + { + doForCurrentOffsets((tp, offset) -> + { + Long expected = offsetsToCheck.get(tp) + 1; + log.debug("Checking, if the offset for {} is {}", tp, expected); + assertThat(offset) + .describedAs("Committed offset corresponds to the offset of the consumer") + .isEqualTo(expected); + }); + } + + void checkSeenOffsetsForProgress() + { + // Be sure, that some messages were consumed...! + Set withProgress = new HashSet<>(); + partitions().forEach(tp -> + { + Long oldOffset = oldOffsets.get(tp); + Long newOffset = newOffsets.get(tp); + if (!oldOffset.equals(newOffset)) + { + log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset); + withProgress.add(tp); + } + }); + assertThat(withProgress) + .describedAs("Some offsets must have changed, compared to the old offset-positions") + .isNotEmpty(); + } + + + /** Helper methods for setting up and running the tests */ + + void doForCurrentOffsets(BiConsumer consumer) + { + offsetConsumer.assign(partitions()); + partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp))); + offsetConsumer.unsubscribe(); + } + + List partitions() + { + return + IntStream + .range(0, PARTITIONS) + .mapToObj(partition -> new TopicPartition(TOPIC, partition)) + .collect(Collectors.toList()); + } + + + void send100Messages(Function messageGenerator) + { + long i = 0; + + for (int partition = 0; partition < 10; partition++) + { + for (int key = 0; key < 10; key++) + { + Bytes value = messageGenerator.apply(++i); + + ProducerRecord record = + new ProducerRecord<>( + TOPIC, + partition, + Integer.toString(key%2), + value); + + kafkaProducer.send(record, (metadata, e) -> + { + if (metadata != null) + { + log.debug( + "{}|{} - {}={}", + metadata.partition(), + metadata.offset(), + record.key(), + record.value()); + } + else + { + log.warn( + "Exception for {}={}: {}", + record.key(), + record.value(), + e.toString()); + } + }); + } + } + } + + + @BeforeEach + public void init() + { + testHandler = record -> {} ; + + oldOffsets = new HashMap<>(); + newOffsets = new HashMap<>(); + receivedRecords = new HashSet<>(); + + doForCurrentOffsets((tp, offset) -> + { + oldOffsets.put(tp, offset - 1); + newOffsets.put(tp, offset - 1); + }); + + Consumer> captureOffsetAndExecuteTestHandler = + record -> + { + newOffsets.put( + new TopicPartition(record.topic(), record.partition()), + record.offset()); + receivedRecords.add(record); + testHandler.accept(record); + }; + + endlessConsumer = + new EndlessConsumer<>( + executor, + properties.getClientId(), + properties.getTopic(), + kafkaConsumer, + captureOffsetAndExecuteTestHandler); + + endlessConsumer.start(); + } + + @AfterEach + public void deinit() + { + try + { + endlessConsumer.stop(); + } + catch (Exception e) + { + log.info("Exception while stopping the consumer: {}", e.toString()); + } + } + + + @TestConfiguration + @Import(ApplicationConfiguration.class) + public static class Configuration + { + @Bean + Serializer serializer() + { + return new LongSerializer(); + } + + @Bean + KafkaProducer kafkaProducer(ApplicationProperties properties) + { + Properties props = new Properties(); + props.put("bootstrap.servers", properties.getBootstrapServer()); + props.put("linger.ms", 100); + props.put("key.serializer", StringSerializer.class.getName()); + props.put("value.serializer", BytesSerializer.class.getName()); + + return new KafkaProducer<>(props); + } + + @Bean + KafkaConsumer offsetConsumer(ApplicationProperties properties) + { + Properties props = new Properties(); + props.put("bootstrap.servers", properties.getBootstrapServer()); + props.put("client.id", "OFFSET-CONSUMER"); + props.put("group.id", properties.getGroupId()); + props.put("key.deserializer", BytesDeserializer.class.getName()); + props.put("value.deserializer", BytesDeserializer.class.getName()); + + return new KafkaConsumer<>(props); + } + } +}