import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.TopicPartition;
-import org.apache.kafka.common.errors.WakeupException;
-import org.apache.kafka.common.serialization.BytesSerializer;
-import org.apache.kafka.common.serialization.LongSerializer;
-import org.apache.kafka.common.serialization.StringSerializer;
+import org.apache.kafka.common.errors.RecordDeserializationException;
+import org.apache.kafka.common.serialization.*;
import org.apache.kafka.common.utils.Bytes;
-import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.*;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
import org.springframework.boot.test.context.TestConfiguration;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Import;
+import org.springframework.kafka.support.serializer.JsonSerializer;
import org.springframework.kafka.test.context.EmbeddedKafka;
import org.springframework.test.context.TestPropertySource;
import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
+import java.time.Duration;
+import java.time.LocalDateTime;
import java.util.*;
+import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
-import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import static de.juplo.kafka.ApplicationTests.PARTITIONS;
import static de.juplo.kafka.ApplicationTests.TOPIC;
-import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.*;
+import static org.awaitility.Awaitility.*;
@SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class)
+@TestMethodOrder(MethodOrderer.OrderAnnotation.class)
@TestPropertySource(
properties = {
"consumer.bootstrap-server=${spring.embedded.kafka.brokers}",
- "consumer.topic=" + TOPIC })
+ "consumer.topic=" + TOPIC,
+ "consumer.commit-interval=1s" })
@EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
@Slf4j
class ApplicationTests
StringSerializer stringSerializer = new StringSerializer();
- LongSerializer longSerializer = new LongSerializer();
+ @Autowired
+ Serializer valueSerializer;
@Autowired
KafkaProducer<String, Bytes> kafkaProducer;
@Autowired
- KafkaConsumer<String, Long> kafkaConsumer;
+ KafkaConsumer<String, ValidMessage> kafkaConsumer;
+ @Autowired
+ KafkaConsumer<Bytes, Bytes> offsetConsumer;
@Autowired
ApplicationProperties properties;
@Autowired
ExecutorService executor;
+ Consumer<ConsumerRecord<String, ValidMessage>> testHandler;
+ EndlessConsumer<String, ValidMessage> endlessConsumer;
+ Map<TopicPartition, Long> oldOffsets;
+ Map<TopicPartition, Long> newOffsets;
+ Set<ConsumerRecord<String, ValidMessage>> receivedRecords;
+
+
+ /** Tests methods */
@Test
void commitsCurrentOffsetsOnSuccess()
{
- send100Messages(i -> new Bytes(longSerializer.serialize(TOPIC, i)));
+ send100Messages((partition, key, counter) ->
+ {
+ Bytes value;
+ String type;
+
+ if (counter%3 != 0)
+ {
+ value = serializeClientMessage(key, counter);
+ type = "message";
+ }
+ else {
+ value = serializeGreeting(key);
+ type = "greeting";
+ }
+
+ return toRecord(partition, key, value, Optional.of(type));
+ });
+
+ await("100 records received")
+ .atMost(Duration.ofSeconds(30))
+ .pollInterval(Duration.ofSeconds(1))
+ .until(() -> receivedRecords.size() >= 100);
+
+ await("Offsets committed")
+ .atMost(Duration.ofSeconds(10))
+ .pollInterval(Duration.ofSeconds(1))
+ .untilAsserted(() ->
+ {
+ checkSeenOffsetsForProgress();
+ compareToCommitedOffsets(newOffsets);
+ });
+
+ assertThatExceptionOfType(IllegalStateException.class)
+ .isThrownBy(() -> endlessConsumer.exitStatus())
+ .describedAs("Consumer should still be running");
+ }
- Set<ConsumerRecord<String, Long>> received = new HashSet<>();
- Map<Integer, Long> offsets = runEndlessConsumer(record ->
+ @Test
+ void commitsOffsetOfErrorForReprocessingOnDeserializationErrorInvalidMessage()
+ {
+ send100Messages((partition, key, counter) ->
{
- received.add(record);
- if (received.size() == 100)
- throw new WakeupException();
+ Bytes value;
+ String type;
+
+ if (counter == 77)
+ {
+ value = serializeFooMessage(key, counter);
+ type = null;
+ }
+ else
+ {
+ if (counter%3 != 0)
+ {
+ value = serializeClientMessage(key, counter);
+ type = "message";
+ }
+ else {
+ value = serializeGreeting(key);
+ type = "greeting";
+ }
+ }
+
+ return toRecord(partition, key, value, Optional.ofNullable(type));
});
- check(offsets);
+ await("Consumer failed")
+ .atMost(Duration.ofSeconds(30))
+ .pollInterval(Duration.ofSeconds(1))
+ .until(() -> !endlessConsumer.running());
+
+ checkSeenOffsetsForProgress();
+ compareToCommitedOffsets(newOffsets);
+
+ endlessConsumer.start();
+ await("Consumer failed")
+ .atMost(Duration.ofSeconds(30))
+ .pollInterval(Duration.ofSeconds(1))
+ .until(() -> !endlessConsumer.running());
+
+ checkSeenOffsetsForProgress();
+ compareToCommitedOffsets(newOffsets);
+ assertThat(receivedRecords.size())
+ .describedAs("Received not all sent events")
+ .isLessThan(100);
+
+ assertThatNoException()
+ .describedAs("Consumer should not be running")
+ .isThrownBy(() -> endlessConsumer.exitStatus());
+ assertThat(endlessConsumer.exitStatus())
+ .describedAs("Consumer should have exited abnormally")
+ .containsInstanceOf(RecordDeserializationException.class);
}
@Test
- void commitsNoOffsetsOnError()
+ void commitsOffsetOfErrorForReprocessingOnDeserializationErrorOnUnknownMessage()
{
- send100Messages(counter ->
- counter == 77
- ? new Bytes(stringSerializer.serialize(TOPIC, "BOOM!"))
- : new Bytes(longSerializer.serialize(TOPIC, counter)));
+ send100Messages((partition, key, counter) ->
+ {
+ Bytes value;
+ String type;
- Map<Integer, Long> oldOffsets = new HashMap<>();
- doForCurrentOffsets((tp, offset) -> oldOffsets.put(tp.partition(), offset -1));
- Map<Integer, Long> newOffsets = runEndlessConsumer((record) -> {});
+ if (counter == 77)
+ {
+ value = serializeFooMessage(key, counter);
+ type = "foo";
+ }
+ else
+ {
+ if (counter%3 != 0)
+ {
+ value = serializeClientMessage(key, counter);
+ type = "message";
+ }
+ else {
+ value = serializeGreeting(key);
+ type = "greeting";
+ }
+ }
+
+ return toRecord(partition, key, value, Optional.of(type));
+ });
+
+ await("Consumer failed")
+ .atMost(Duration.ofSeconds(30))
+ .pollInterval(Duration.ofSeconds(1))
+ .until(() -> !endlessConsumer.running());
+
+ checkSeenOffsetsForProgress();
+ compareToCommitedOffsets(newOffsets);
+
+ endlessConsumer.start();
+ await("Consumer failed")
+ .atMost(Duration.ofSeconds(30))
+ .pollInterval(Duration.ofSeconds(1))
+ .until(() -> !endlessConsumer.running());
+
+ checkSeenOffsetsForProgress();
+ compareToCommitedOffsets(newOffsets);
+ assertThat(receivedRecords.size())
+ .describedAs("Received not all sent events")
+ .isLessThan(100);
+
+ assertThatNoException()
+ .describedAs("Consumer should not be running")
+ .isThrownBy(() -> endlessConsumer.exitStatus());
+ assertThat(endlessConsumer.exitStatus())
+ .describedAs("Consumer should have exited abnormally")
+ .containsInstanceOf(RecordDeserializationException.class);
+ }
+
+
+ /** Helper methods for the verification of expectations */
+
+ void compareToCommitedOffsets(Map<TopicPartition, Long> offsetsToCheck)
+ {
+ doForCurrentOffsets((tp, offset) ->
+ {
+ Long expected = offsetsToCheck.get(tp) + 1;
+ log.debug("Checking, if the offset for {} is {}", tp, expected);
+ assertThat(offset)
+ .describedAs("Committed offset corresponds to the offset of the consumer")
+ .isEqualTo(expected);
+ });
+ }
- check(oldOffsets);
+ void checkSeenOffsetsForProgress()
+ {
+ // Be sure, that some messages were consumed...!
+ Set<TopicPartition> withProgress = new HashSet<>();
+ partitions().forEach(tp ->
+ {
+ Long oldOffset = oldOffsets.get(tp) + 1;
+ Long newOffset = newOffsets.get(tp) + 1;
+ if (!oldOffset.equals(newOffset))
+ {
+ log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset);
+ withProgress.add(tp);
+ }
+ });
+ assertThat(withProgress)
+ .describedAs("Some offsets must have changed, compared to the old offset-positions")
+ .isNotEmpty();
}
- void send100Messages(Function<Long, Bytes> messageGenerator)
+ /** Helper methods for setting up and running the tests */
+
+ void seekToEnd()
{
- long i = 0;
+ offsetConsumer.assign(partitions());
+ offsetConsumer.seekToEnd(partitions());
+ partitions().forEach(tp ->
+ {
+ // seekToEnd() works lazily: it only takes effect on poll()/position()
+ Long offset = offsetConsumer.position(tp);
+ log.info("New position for {}: {}", tp, offset);
+ });
+ // The new positions must be commited!
+ offsetConsumer.commitSync();
+ offsetConsumer.unsubscribe();
+ }
+
+ void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
+ {
+ offsetConsumer.assign(partitions());
+ partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp)));
+ offsetConsumer.unsubscribe();
+ }
+
+ List<TopicPartition> partitions()
+ {
+ return
+ IntStream
+ .range(0, PARTITIONS)
+ .mapToObj(partition -> new TopicPartition(TOPIC, partition))
+ .collect(Collectors.toList());
+ }
+
+
+ public interface RecordGenerator<K, V>
+ {
+ public ProducerRecord<String, Bytes> generate(int partition, String key, int counter);
+ }
+
+ void send100Messages(RecordGenerator recordGenerator)
+ {
+ int i = 0;
for (int partition = 0; partition < 10; partition++)
{
for (int key = 0; key < 10; key++)
{
- Bytes value = messageGenerator.apply(++i);
-
ProducerRecord<String, Bytes> record =
- new ProducerRecord<>(
- TOPIC,
- partition,
- Integer.toString(key%2),
- value);
+ recordGenerator.generate(partition, Integer.toString(partition*10+key%2), ++i);
kafkaProducer.send(record, (metadata, e) ->
{
}
}
- Map<Integer, Long> runEndlessConsumer(Consumer<ConsumerRecord<String, Long>> consumer)
- {
- Map<Integer, Long> offsets = new HashMap<>();
- doForCurrentOffsets((tp, offset) -> offsets.put(tp.partition(), offset -1));
- Consumer<ConsumerRecord<String, Long>> captureOffset = record -> offsets.put(record.partition(), record.offset());
- EndlessConsumer<String, Long> endlessConsumer =
- new EndlessConsumer<>(
- executor,
- properties.getClientId(),
- properties.getTopic(),
- kafkaConsumer,
- captureOffset.andThen(consumer));
+ ProducerRecord<String, Bytes> toRecord(int partition, String key, Bytes value, Optional<String> type)
+ {
+ ProducerRecord<String, Bytes> record =
+ new ProducerRecord<>(TOPIC, partition, key, value);
- endlessConsumer.run();
+ type.ifPresent(typeId -> record.headers().add("__TypeId__", typeId.getBytes()));
+ return record;
+ }
- return offsets;
+ Bytes serializeClientMessage(String key, int value)
+ {
+ TestClientMessage message = new TestClientMessage(key, Integer.toString(value));
+ return new Bytes(valueSerializer.serialize(TOPIC, message));
}
- List<TopicPartition> partitions()
+ Bytes serializeGreeting(String key)
{
- return
- IntStream
- .range(0, PARTITIONS)
- .mapToObj(partition -> new TopicPartition(TOPIC, partition))
- .collect(Collectors.toList());
+ TestGreeting message = new TestGreeting(key, LocalDateTime.now());
+ return new Bytes(valueSerializer.serialize(TOPIC, message));
}
- void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
+ Bytes serializeFooMessage(String key, int value)
{
- kafkaConsumer.assign(partitions());
- partitions().forEach(tp -> consumer.accept(tp, kafkaConsumer.position(tp)));
- kafkaConsumer.unsubscribe();
+ TestFooMessage message = new TestFooMessage(key, (long)value);
+ return new Bytes(valueSerializer.serialize(TOPIC, message));
}
- void check(Map<Integer, Long> offsets)
+ @BeforeEach
+ public void init()
{
+ testHandler = record -> {} ;
+
+ seekToEnd();
+
+ oldOffsets = new HashMap<>();
+ newOffsets = new HashMap<>();
+ receivedRecords = new HashSet<>();
+
doForCurrentOffsets((tp, offset) ->
{
- Long expected = offsets.get(tp.partition()) + 1;
- log.debug("Checking, if the offset for {} is {}", tp, expected);
- assertThat(offset).isEqualTo(expected);
+ oldOffsets.put(tp, offset - 1);
+ newOffsets.put(tp, offset - 1);
});
+
+ Consumer<ConsumerRecord<String, ValidMessage>> captureOffsetAndExecuteTestHandler =
+ record ->
+ {
+ newOffsets.put(
+ new TopicPartition(record.topic(), record.partition()),
+ record.offset());
+ receivedRecords.add(record);
+ testHandler.accept(record);
+ };
+
+ endlessConsumer =
+ new EndlessConsumer<>(
+ executor,
+ properties.getClientId(),
+ properties.getTopic(),
+ kafkaConsumer,
+ captureOffsetAndExecuteTestHandler);
+
+ endlessConsumer.start();
+ }
+
+ @AfterEach
+ public void deinit()
+ {
+ try
+ {
+ endlessConsumer.stop();
+ }
+ catch (Exception e)
+ {
+ log.info("Exception while stopping the consumer: {}", e.toString());
+ }
}
@Import(ApplicationConfiguration.class)
public static class Configuration
{
+ @Bean
+ Serializer<ValidMessage> serializer()
+ {
+ return new JsonSerializer<>();
+ }
+
@Bean
KafkaProducer<String, Bytes> kafkaProducer(ApplicationProperties properties)
{
return new KafkaProducer<>(props);
}
+
+ @Bean
+ KafkaConsumer<Bytes, Bytes> offsetConsumer(ApplicationProperties properties)
+ {
+ Properties props = new Properties();
+ props.put("bootstrap.servers", properties.getBootstrapServer());
+ props.put("client.id", "OFFSET-CONSUMER");
+ props.put("group.id", properties.getGroupId());
+ props.put("key.deserializer", BytesDeserializer.class.getName());
+ props.put("value.deserializer", BytesDeserializer.class.getName());
+
+ return new KafkaConsumer<>(props);
+ }
}
}