import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.TopicPartition;
-import org.apache.kafka.common.errors.RecordDeserializationException;
import org.apache.kafka.common.serialization.*;
import org.apache.kafka.common.utils.Bytes;
import org.junit.jupiter.api.*;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.function.BiConsumer;
-import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
@Autowired
KafkaProducer<String, Bytes> kafkaProducer;
@Autowired
- KafkaConsumer<String, Long> kafkaConsumer;
+ KafkaConsumer<String, String> kafkaConsumer;
+ @Autowired
+ KafkaConsumer<Bytes, Bytes> offsetConsumer;
@Autowired
PartitionStatisticsRepository partitionStatisticsRepository;
@Autowired
@Autowired
PartitionStatisticsRepository repository;
@Autowired
- KeyCountingRebalanceListener keyCountingRebalanceListener;
+ SumRebalanceListener sumRebalanceListener;
@Autowired
- KeyCountingRecordHandler keyCountingRecordHandler;
+ SumRecordHandler sumRecordHandler;
- EndlessConsumer<String, Long> endlessConsumer;
+ EndlessConsumer<String, String> endlessConsumer;
Map<TopicPartition, Long> oldOffsets;
Map<TopicPartition, Long> newOffsets;
- Set<ConsumerRecord<String, Long>> receivedRecords;
+ Set<ConsumerRecord<String, String>> receivedRecords;
/** Tests methods */
@Test
- @Order(1) // << The poistion pill is not skipped. Hence, this test must run first
void commitsCurrentOffsetsOnSuccess() throws ExecutionException, InterruptedException
{
- send100Messages(i -> new Bytes(valueSerializer.serialize(TOPIC, i)));
+ send100Messages((partition, key, counter) ->
+ {
+ Bytes value = new Bytes(valueSerializer.serialize(TOPIC, counter));
+ return new ProducerRecord<>(TOPIC, partition, key, value);
+ });
await("100 records received")
.atMost(Duration.ofSeconds(30))
+ .pollInterval(Duration.ofSeconds(1))
.until(() -> receivedRecords.size() >= 100);
await("Offsets committed")
.atMost(Duration.ofSeconds(10))
+ .pollInterval(Duration.ofSeconds(1))
.untilAsserted(() ->
{
checkSeenOffsetsForProgress();
.describedAs("Consumer should still be running");
}
- @Test
- @Order(2)
- void commitsOffsetOfErrorForReprocessingOnError()
- {
- send100Messages(counter ->
- counter == 77
- ? new Bytes(stringSerializer.serialize(TOPIC, "BOOM!"))
- : new Bytes(valueSerializer.serialize(TOPIC, counter)));
-
- await("Consumer failed")
- .atMost(Duration.ofSeconds(30))
- .until(() -> !endlessConsumer.running());
-
- checkSeenOffsetsForProgress();
- compareToCommitedOffsets(newOffsets);
-
- endlessConsumer.start();
- await("Consumer failed")
- .atMost(Duration.ofSeconds(30))
- .until(() -> !endlessConsumer.running());
-
- checkSeenOffsetsForProgress();
- compareToCommitedOffsets(newOffsets);
- assertThat(receivedRecords.size())
- .describedAs("Received not all sent events")
- .isLessThan(100);
-
- assertThatNoException()
- .describedAs("Consumer should not be running")
- .isThrownBy(() -> endlessConsumer.exitStatus());
- assertThat(endlessConsumer.exitStatus())
- .describedAs("Consumer should have exited abnormally")
- .containsInstanceOf(RecordDeserializationException.class);
- }
-
/** Helper methods for the verification of expectations */
Set<TopicPartition> withProgress = new HashSet<>();
partitions().forEach(tp ->
{
- Long oldOffset = oldOffsets.get(tp);
- Long newOffset = newOffsets.get(tp);
+ Long oldOffset = oldOffsets.get(tp) + 1;
+ Long newOffset = newOffsets.get(tp) + 1;
if (!oldOffset.equals(newOffset))
{
log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset);
/** Helper methods for setting up and running the tests */
+ void seekToEnd()
+ {
+ offsetConsumer.assign(partitions());
+ partitions().forEach(tp ->
+ {
+ Long offset = offsetConsumer.position(tp);
+ log.info("New position for {}: {}", tp, offset);
+ Integer partition = tp.partition();
+ StatisticsDocument document =
+ partitionStatisticsRepository
+ .findById(partition.toString())
+ .orElse(new StatisticsDocument(partition));
+ document.offset = offset;
+ partitionStatisticsRepository.save(document);
+ });
+ offsetConsumer.unsubscribe();
+ }
+
void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
{
partitions().forEach(tp ->
Optional<Long> offset = partitionStatisticsRepository.findById(partition).map(document -> document.offset);
consumer.accept(tp, offset.orElse(0l));
});
- }
+ }
List<TopicPartition> partitions()
{
}
- void send100Messages(Function<Long, Bytes> messageGenerator)
+ public interface RecordGenerator<K, V>
+ {
+ public ProducerRecord<String, Bytes> generate(int partition, String key, long counter);
+ }
+
+ void send100Messages(RecordGenerator recordGenerator)
{
long i = 0;
{
for (int key = 0; key < 10; key++)
{
- Bytes value = messageGenerator.apply(++i);
-
ProducerRecord<String, Bytes> record =
- new ProducerRecord<>(
- TOPIC,
- partition,
- Integer.toString(key%2),
- value);
+ recordGenerator.generate(partition, Integer.toString(partition*10+key%2), ++i);
kafkaProducer.send(record, (metadata, e) ->
{
@BeforeEach
public void init()
{
+ seekToEnd();
+
oldOffsets = new HashMap<>();
newOffsets = new HashMap<>();
receivedRecords = new HashSet<>();
newOffsets.put(tp, offset - 1);
});
- TestRecordHandler<String, Long> captureOffsetAndExecuteTestHandler =
- new TestRecordHandler<String, Long>(keyCountingRecordHandler) {
+ TestRecordHandler<String, String> captureOffsetAndExecuteTestHandler =
+ new TestRecordHandler<String, String>(sumRecordHandler) {
@Override
- public void onNewRecord(ConsumerRecord<String, Long> record)
+ public void onNewRecord(ConsumerRecord<String, String> record)
{
newOffsets.put(
new TopicPartition(record.topic(), record.partition()),
properties.getClientId(),
properties.getTopic(),
kafkaConsumer,
- keyCountingRebalanceListener,
+ sumRebalanceListener,
captureOffsetAndExecuteTestHandler);
endlessConsumer.start();
return new KafkaProducer<>(props);
}
+
+ @Bean
+ KafkaConsumer<Bytes, Bytes> offsetConsumer(ApplicationProperties properties)
+ {
+ Properties props = new Properties();
+ props.put("bootstrap.servers", properties.getBootstrapServer());
+ props.put("client.id", "OFFSET-CONSUMER");
+ props.put("enable.auto.commit", false);
+ props.put("auto.offset.reset", "latest");
+ props.put("key.deserializer", BytesDeserializer.class.getName());
+ props.put("value.deserializer", BytesDeserializer.class.getName());
+
+ return new KafkaConsumer<>(props);
+ }
}
}