import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.TopicPartition;
-import org.apache.kafka.common.serialization.BytesDeserializer;
-import org.apache.kafka.common.serialization.BytesSerializer;
-import org.apache.kafka.common.serialization.LongSerializer;
-import org.apache.kafka.common.serialization.StringSerializer;
+import org.apache.kafka.common.errors.RecordDeserializationException;
+import org.apache.kafka.common.serialization.*;
import org.apache.kafka.common.utils.Bytes;
import org.junit.jupiter.api.*;
import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
import org.springframework.boot.test.context.TestConfiguration;
import org.springframework.context.annotation.Bean;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.function.BiConsumer;
-import java.util.function.Consumer;
-import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
-import static de.juplo.kafka.ApplicationTests.PARTITIONS;
-import static de.juplo.kafka.ApplicationTests.TOPIC;
-import static org.assertj.core.api.Assertions.assertThat;
+import static de.juplo.kafka.ApplicationTests.*;
+import static org.assertj.core.api.Assertions.*;
import static org.awaitility.Awaitility.*;
@TestMethodOrder(MethodOrderer.OrderAnnotation.class)
@TestPropertySource(
properties = {
- "consumer.bootstrap-server=${spring.embedded.kafka.brokers}",
- "consumer.topic=" + TOPIC })
-@EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
+ "sumup.requests.bootstrap-server=${spring.embedded.kafka.brokers}",
+ "sumup.requests.topic-in=" + INPUT_TOPIC,
+ "sumup.requests.commit-interval=1s" })
+@EmbeddedKafka(topics = { INPUT_TOPIC, OUTPUT_TOPIC }, partitions = PARTITIONS)
+@EnableAutoConfiguration
@Slf4j
class ApplicationTests
{
- public static final String TOPIC = "FOO";
+ public static final String INPUT_TOPIC = "FOO";
+ public static final String OUTPUT_TOPIC = "BAR";
public static final int PARTITIONS = 10;
StringSerializer stringSerializer = new StringSerializer();
- LongSerializer longSerializer = new LongSerializer();
@Autowired
- KafkaProducer<String, Bytes> kafkaProducer;
+ Serializer valueSerializer;
@Autowired
- KafkaConsumer<String, Long> kafkaConsumer;
+ KafkaProducer<String, Bytes> testProducer;
+ @Autowired
+ KafkaConsumer<String, Integer> kafkaConsumer;
@Autowired
KafkaConsumer<Bytes, Bytes> offsetConsumer;
@Autowired
ApplicationProperties properties;
@Autowired
ExecutorService executor;
+ @Autowired
+ RecordHandler noopRecordHandler;
- Consumer<ConsumerRecord<String, Long>> testHandler;
- EndlessConsumer<String, Long> endlessConsumer;
+ EndlessConsumer<String, Integer> endlessConsumer;
Map<TopicPartition, Long> oldOffsets;
Map<TopicPartition, Long> newOffsets;
+ Set<ConsumerRecord<String, Integer>> receivedRecords;
/** Tests methods */
@Test
- @Order(1) // << The poistion pill is not skipped. Hence, this test must run first
void commitsCurrentOffsetsOnSuccess() throws ExecutionException, InterruptedException
{
- send100Messages(i -> new Bytes(longSerializer.serialize(TOPIC, i)));
-
- Set<ConsumerRecord<String, Long>> received = new HashSet<>();
- testHandler = record -> received.add(record);
+ send100Messages((partition, key, counter) ->
+ {
+ Bytes value = new Bytes(valueSerializer.serialize(INPUT_TOPIC, counter));
+ return new ProducerRecord<>(INPUT_TOPIC, partition, key, value);
+ });
await("100 records received")
.atMost(Duration.ofSeconds(30))
- .until(() -> received.size() >= 100);
+ .pollInterval(Duration.ofSeconds(1))
+ .until(() -> receivedRecords.size() >= 100);
await("Offsets committed")
.atMost(Duration.ofSeconds(10))
+ .pollInterval(Duration.ofSeconds(1))
.untilAsserted(() ->
{
checkSeenOffsetsForProgress();
compareToCommitedOffsets(newOffsets);
});
+
+ assertThatExceptionOfType(IllegalStateException.class)
+ .isThrownBy(() -> endlessConsumer.exitStatus())
+ .describedAs("Consumer should still be running");
}
@Test
- @Order(2)
- void commitsNoOffsetsOnError()
+ void commitsOffsetOfErrorForReprocessingOnDeserializationError()
{
- send100Messages(counter ->
- counter == 77
- ? new Bytes(stringSerializer.serialize(TOPIC, "BOOM!"))
- : new Bytes(longSerializer.serialize(TOPIC, counter)));
+ send100Messages((partition, key, counter) ->
+ {
+ Bytes value = counter == 77
+ ? new Bytes(stringSerializer.serialize(INPUT_TOPIC, "BOOM!"))
+ : new Bytes(valueSerializer.serialize(INPUT_TOPIC, counter));
+ return new ProducerRecord<>(INPUT_TOPIC, partition, key, value);
+ });
+
+ await("Consumer failed")
+ .atMost(Duration.ofSeconds(30))
+ .pollInterval(Duration.ofSeconds(1))
+ .until(() -> !endlessConsumer.running());
+
+ checkSeenOffsetsForProgress();
+ compareToCommitedOffsets(newOffsets);
+ endlessConsumer.start();
await("Consumer failed")
.atMost(Duration.ofSeconds(30))
+ .pollInterval(Duration.ofSeconds(1))
.until(() -> !endlessConsumer.running());
checkSeenOffsetsForProgress();
- compareToCommitedOffsets(oldOffsets);
+ compareToCommitedOffsets(newOffsets);
+ assertThat(receivedRecords.size())
+ .describedAs("Received not all sent events")
+ .isLessThan(100);
+
+ assertThatNoException()
+ .describedAs("Consumer should not be running")
+ .isThrownBy(() -> endlessConsumer.exitStatus());
+ assertThat(endlessConsumer.exitStatus())
+ .describedAs("Consumer should have exited abnormally")
+ .containsInstanceOf(RecordDeserializationException.class);
}
{
Long expected = offsetsToCheck.get(tp) + 1;
log.debug("Checking, if the offset for {} is {}", tp, expected);
- assertThat(offset).isEqualTo(expected);
+ assertThat(offset)
+ .describedAs("Committed offset corresponds to the offset of the consumer")
+ .isEqualTo(expected);
});
}
Set<TopicPartition> withProgress = new HashSet<>();
partitions().forEach(tp ->
{
- Long oldOffset = oldOffsets.get(tp);
- Long newOffset = newOffsets.get(tp);
+ Long oldOffset = oldOffsets.get(tp) + 1;
+ Long newOffset = newOffsets.get(tp) + 1;
if (!oldOffset.equals(newOffset))
{
log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset);
withProgress.add(tp);
}
});
- assertThat(withProgress).isNotEmpty().describedAs("Found no partitions with any offset-progress");
+ assertThat(withProgress)
+ .describedAs("Some offsets must have changed, compared to the old offset-positions")
+ .isNotEmpty();
}
/** Helper methods for setting up and running the tests */
+ void seekToEnd()
+ {
+ offsetConsumer.assign(partitions());
+ offsetConsumer.seekToEnd(partitions());
+ partitions().forEach(tp ->
+ {
+ // seekToEnd() works lazily: it only takes effect on poll()/position()
+ Long offset = offsetConsumer.position(tp);
+ log.info("New position for {}: {}", tp, offset);
+ });
+ // The new positions must be commited!
+ offsetConsumer.commitSync();
+ offsetConsumer.unsubscribe();
+ }
+
void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
{
offsetConsumer.assign(partitions());
return
IntStream
.range(0, PARTITIONS)
- .mapToObj(partition -> new TopicPartition(TOPIC, partition))
+ .mapToObj(partition -> new TopicPartition(INPUT_TOPIC, partition))
.collect(Collectors.toList());
}
- void send100Messages(Function<Long, Bytes> messageGenerator)
+ public interface RecordGenerator<K, V>
+ {
+ public ProducerRecord<String, Bytes> generate(int partition, String key, int counter);
+ }
+
+ void send100Messages(RecordGenerator recordGenerator)
{
- long i = 0;
+ int i = 0;
for (int partition = 0; partition < 10; partition++)
{
for (int key = 0; key < 10; key++)
{
- Bytes value = messageGenerator.apply(++i);
-
ProducerRecord<String, Bytes> record =
- new ProducerRecord<>(
- TOPIC,
- partition,
- Integer.toString(key%2),
- value);
+ recordGenerator.generate(partition, Integer.toString(partition*10+key%2), ++i);
- kafkaProducer.send(record, (metadata, e) ->
+ testProducer.send(record, (metadata, e) ->
{
if (metadata != null)
{
@BeforeEach
public void init()
{
- testHandler = record -> {} ;
+ seekToEnd();
oldOffsets = new HashMap<>();
newOffsets = new HashMap<>();
+ receivedRecords = new HashSet<>();
doForCurrentOffsets((tp, offset) ->
{
newOffsets.put(tp, offset - 1);
});
- Consumer<ConsumerRecord<String, Long>> captureOffsetAndExecuteTestHandler =
- record ->
- {
- newOffsets.put(
- new TopicPartition(record.topic(), record.partition()),
- record.offset());
- testHandler.accept(record);
+ TestRecordHandler<String, Integer> captureOffsetAndExecuteTestHandler =
+ new TestRecordHandler<String, Integer>(noopRecordHandler) {
+ @Override
+ public void onNewRecord(ConsumerRecord<String, Integer> record)
+ {
+ newOffsets.put(
+ new TopicPartition(record.topic(), record.partition()),
+ record.offset());
+ receivedRecords.add(record);
+ }
};
endlessConsumer =
new EndlessConsumer<>(
executor,
properties.getClientId(),
- properties.getTopic(),
+ properties.getTopicIn(),
kafkaConsumer,
captureOffsetAndExecuteTestHandler);
public static class Configuration
{
@Bean
- KafkaProducer<String, Bytes> kafkaProducer(ApplicationProperties properties)
+ Serializer<Integer> valueSerializer()
+ {
+ return new IntegerSerializer();
+ }
+
+ @Bean
+ KafkaProducer<String, Bytes> testProducer(ApplicationProperties properties)
{
Properties props = new Properties();
props.put("bootstrap.servers", properties.getBootstrapServer());
return new KafkaConsumer<>(props);
}
+
+ @Bean
+ KafkaProducer<String, String> kafkaProducer(ApplicationProperties properties)
+ {
+ Properties props = new Properties();
+ props.put("bootstrap.servers", properties.getBootstrapServer());
+ props.put("key.serializer", StringSerializer.class.getName());
+ props.put("value.serializer", StringSerializer.class.getName());
+
+ return new KafkaProducer<>(props);
+ }
}
}