import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.TopicPartition;
-import org.apache.kafka.common.serialization.BytesDeserializer;
-import org.apache.kafka.common.serialization.BytesSerializer;
-import org.apache.kafka.common.serialization.LongSerializer;
-import org.apache.kafka.common.serialization.StringSerializer;
+import org.apache.kafka.common.serialization.*;
import org.apache.kafka.common.utils.Bytes;
import org.junit.jupiter.api.*;
import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.boot.autoconfigure.kafka.KafkaAutoConfiguration;
+import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
import org.springframework.boot.test.context.TestConfiguration;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Import;
+import org.springframework.context.annotation.Primary;
+import org.springframework.kafka.support.serializer.JsonSerializer;
import org.springframework.kafka.test.context.EmbeddedKafka;
import org.springframework.test.context.TestPropertySource;
import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
import java.time.Duration;
import java.util.*;
import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
import java.util.function.BiConsumer;
+import java.util.function.BiFunction;
import java.util.function.Consumer;
-import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import static de.juplo.kafka.ApplicationTests.PARTITIONS;
import static de.juplo.kafka.ApplicationTests.TOPIC;
-import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.*;
import static org.awaitility.Awaitility.*;
-@SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class)
-@TestMethodOrder(MethodOrderer.OrderAnnotation.class)
+@SpringJUnitConfig(
+ initializers = ConfigDataApplicationContextInitializer.class,
+ classes = {
+ EndlessConsumer.class,
+ KafkaAutoConfiguration.class,
+ ApplicationTests.Configuration.class })
@TestPropertySource(
properties = {
- "consumer.bootstrap-server=${spring.embedded.kafka.brokers}",
+ "spring.kafka.consumer.bootstrap-servers=${spring.embedded.kafka.brokers}",
+ "spring.kafka.producer.bootstrap-servers=${spring.embedded.kafka.brokers}",
"consumer.topic=" + TOPIC })
@EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
@Slf4j
StringSerializer stringSerializer = new StringSerializer();
- LongSerializer longSerializer = new LongSerializer();
+ @Autowired
+ Serializer valueSerializer;
@Autowired
KafkaProducer<String, Bytes> kafkaProducer;
@Autowired
- KafkaConsumer<String, Long> kafkaConsumer;
+ org.apache.kafka.clients.consumer.Consumer<String, ClientMessage> kafkaConsumer;
@Autowired
KafkaConsumer<Bytes, Bytes> offsetConsumer;
@Autowired
- ApplicationProperties properties;
+ ApplicationProperties applicationProperties;
+ @Autowired
+ KafkaProperties kafkaProperties;
+ @Autowired
+ EndlessConsumer endlessConsumer;
@Autowired
- ExecutorService executor;
+ RecordHandler recordHandler;
- Consumer<ConsumerRecord<String, Long>> testHandler;
- EndlessConsumer<String, Long> endlessConsumer;
Map<TopicPartition, Long> oldOffsets;
Map<TopicPartition, Long> newOffsets;
+ Set<ConsumerRecord<String, ClientMessage>> receivedRecords;
/** Tests methods */
@Test
- @Order(1) // << The poistion pill is not skipped. Hence, this test must run first
void commitsCurrentOffsetsOnSuccess() throws ExecutionException, InterruptedException
{
- send100Messages(i -> new Bytes(longSerializer.serialize(TOPIC, i)));
-
- Set<ConsumerRecord<String, Long>> received = new HashSet<>();
- testHandler = record -> received.add(record);
+ send100Messages((key, counter) -> serialize(key, counter));
await("100 records received")
.atMost(Duration.ofSeconds(30))
- .until(() -> received.size() >= 100);
+ .until(() -> receivedRecords.size() == 100);
await("Offsets committed")
.atMost(Duration.ofSeconds(10))
checkSeenOffsetsForProgress();
compareToCommitedOffsets(newOffsets);
});
+
+ assertThat(endlessConsumer.isRunning())
+ .describedAs("Consumer should still be running")
+ .isTrue();
}
@Test
- @Order(2)
- void commitsNoOffsetsOnError()
+ void commitsCurrentOffsetsOnDeserializationError()
{
- send100Messages(counter ->
+ send100Messages((key, counter) ->
counter == 77
? new Bytes(stringSerializer.serialize(TOPIC, "BOOM!"))
- : new Bytes(longSerializer.serialize(TOPIC, counter)));
+ : serialize(key, counter));
+
+ await("99 records received")
+ .atMost(Duration.ofSeconds(30))
+ .until(() -> receivedRecords.size() == 99);
+
+ await("Offsets committed")
+ .atMost(Duration.ofSeconds(10))
+ .untilAsserted(() ->
+ {
+ // UNSCHÖN:
+ // Funktioniert nur, weil nach der Nachrichten, die den
+ // Deserialisierungs-Fehler auslöst noch valide Nachrichten
+ // gelesen werden.
+ // GRUND:
+ // Der MessageHandler sieht den Offset der Fehlerhaften
+ // Nachricht nicht!
+ checkSeenOffsetsForProgress();
+ compareToCommitedOffsets(newOffsets);
+ });
+
+ assertThat(endlessConsumer.isRunning())
+ .describedAs("Consumer should still be running")
+ .isTrue();
+ }
+
+ @Test
+ void commitsOffsetOnProgramLogicErrorFoo()
+ {
+ recordHandler.testHandler = (record) ->
+ {
+ if (Integer.parseInt(record.value().message)%10 ==0)
+ throw new RuntimeException("BOOM: " + record.value().message + "%10 == 0");
+ };
- await("Consumer failed")
+ send100Messages((key, counter) -> serialize(key, counter));
+
+ await("80 records received")
.atMost(Duration.ofSeconds(30))
- .until(() -> !endlessConsumer.running());
+ .until(() -> receivedRecords.size() == 100);
+
+ await("Offsets committed")
+ .atMost(Duration.ofSeconds(10))
+ .untilAsserted(() ->
+ {
+ checkSeenOffsetsForProgress();
+ compareToCommitedOffsets(newOffsets);
+ });
- checkSeenOffsetsForProgress();
- compareToCommitedOffsets(oldOffsets);
+ assertThat(endlessConsumer.isRunning())
+ .describedAs("Consumer should still be running")
+ .isTrue();
}
doForCurrentOffsets((tp, offset) ->
{
Long expected = offsetsToCheck.get(tp) + 1;
- log.debug("Checking, if the offset for {} is {}", tp, expected);
- assertThat(offset).isEqualTo(expected);
+ log.debug("TEST: Comparing the expected offset of {} for {} to {}", expected, tp, offset);
+ assertThat(offset)
+ .describedAs("Committed offset corresponds to the offset of the consumer")
+ .isEqualTo(expected);
});
}
Long newOffset = newOffsets.get(tp);
if (!oldOffset.equals(newOffset))
{
- log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset);
+ log.debug("TEST: Progress for {}: {} -> {}", tp, oldOffset, newOffset);
withProgress.add(tp);
}
});
- assertThat(withProgress).isNotEmpty().describedAs("Found no partitions with any offset-progress");
+ log.debug("TEST: Offsets with progress: {}", withProgress);
+ assertThat(withProgress)
+ .describedAs("Some offsets must have changed, compared to the old offset-positions")
+ .isNotEmpty();
}
}
- void send100Messages(Function<Long, Bytes> messageGenerator)
+ void send100Messages(BiFunction<Integer, Long, Bytes> messageGenerator)
{
long i = 0;
{
for (int key = 0; key < 10; key++)
{
- Bytes value = messageGenerator.apply(++i);
+ Bytes value = messageGenerator.apply(key, ++i);
ProducerRecord<String, Bytes> record =
new ProducerRecord<>(
Integer.toString(key%2),
value);
+ record.headers().add("__TypeId__", "message".getBytes());
kafkaProducer.send(record, (metadata, e) ->
{
if (metadata != null)
{
log.debug(
- "{}|{} - {}={}",
+ "TEST: Sending partition={}, offset={} - {}={}",
metadata.partition(),
metadata.offset(),
record.key(),
else
{
log.warn(
- "Exception for {}={}: {}",
+ "TEST: Exception for {}={}: {}",
record.key(),
record.value(),
e.toString());
}
}
+ Bytes serialize(Integer key, Long value)
+ {
+ ClientMessage message = new ClientMessage();
+ message.setClient(key.toString());
+ message.setMessage(value.toString());
+ return new Bytes(valueSerializer.serialize(TOPIC, message));
+ }
+
@BeforeEach
public void init()
{
- testHandler = record -> {} ;
+ recordHandler.testHandler = (record) -> {};
oldOffsets = new HashMap<>();
newOffsets = new HashMap<>();
+ receivedRecords = new HashSet<>();
doForCurrentOffsets((tp, offset) ->
{
newOffsets.put(tp, offset - 1);
});
- Consumer<ConsumerRecord<String, Long>> captureOffsetAndExecuteTestHandler =
+ recordHandler.captureOffsets =
record ->
{
+ receivedRecords.add(record);
+ log.debug("TEST: Processing record #{}: {}", receivedRecords.size(), record.value());
newOffsets.put(
new TopicPartition(record.topic(), record.partition()),
record.offset());
- testHandler.accept(record);
};
- endlessConsumer =
- new EndlessConsumer<>(
- executor,
- properties.getClientId(),
- properties.getTopic(),
- kafkaConsumer,
- captureOffsetAndExecuteTestHandler);
-
endlessConsumer.start();
}
}
catch (Exception e)
{
- log.info("Exception while stopping the consumer: {}", e.toString());
+ log.info("TEST: Exception while stopping the consumer: {}", e.toString());
}
}
+ public static class RecordHandler implements Consumer<ConsumerRecord<String, ClientMessage>>
+ {
+ Consumer<ConsumerRecord<String, ClientMessage>> captureOffsets;
+ Consumer<ConsumerRecord<String, ClientMessage>> testHandler;
+
+
+ @Override
+ public void accept(ConsumerRecord<String, ClientMessage> record)
+ {
+ captureOffsets
+ .andThen(testHandler)
+ .accept(record);
+ }
+ }
@TestConfiguration
@Import(ApplicationConfiguration.class)
public static class Configuration
{
+ @Primary
+ @Bean
+ public Consumer<ConsumerRecord<String, ClientMessage>> testHandler()
+ {
+ return new RecordHandler();
+ }
+
+ @Bean
+ Serializer<ClientMessage> serializer()
+ {
+ return new JsonSerializer<>();
+ }
+
@Bean
- KafkaProducer<String, Bytes> kafkaProducer(ApplicationProperties properties)
+ KafkaProducer<String, Bytes> kafkaProducer(KafkaProperties properties)
{
Properties props = new Properties();
- props.put("bootstrap.servers", properties.getBootstrapServer());
+ props.put("bootstrap.servers", properties.getConsumer().getBootstrapServers());
props.put("linger.ms", 100);
props.put("key.serializer", StringSerializer.class.getName());
props.put("value.serializer", BytesSerializer.class.getName());
}
@Bean
- KafkaConsumer<Bytes, Bytes> offsetConsumer(ApplicationProperties properties)
+ KafkaConsumer<Bytes, Bytes> offsetConsumer(KafkaProperties properties)
{
Properties props = new Properties();
- props.put("bootstrap.servers", properties.getBootstrapServer());
+ props.put("bootstrap.servers", properties.getConsumer().getBootstrapServers());
props.put("client.id", "OFFSET-CONSUMER");
- props.put("group.id", properties.getGroupId());
+ props.put("group.id", properties.getConsumer().getGroupId());
props.put("key.deserializer", BytesDeserializer.class.getName());
props.put("value.deserializer", BytesDeserializer.class.getName());