import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.RecordDeserializationException;
import org.apache.kafka.common.serialization.*;
import org.apache.kafka.common.utils.Bytes;
import org.junit.jupiter.api.*;
import org.springframework.test.context.TestPropertySource;
import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
-import java.time.Clock;
import java.time.Duration;
import java.util.*;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.function.BiConsumer;
-import java.util.function.Consumer;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
@Autowired
KafkaProducer<String, Bytes> kafkaProducer;
@Autowired
- KafkaConsumer<String, String> kafkaConsumer;
+ KafkaConsumer<String, Long> kafkaConsumer;
@Autowired
PartitionStatisticsRepository partitionStatisticsRepository;
@Autowired
ExecutorService executor;
@Autowired
PartitionStatisticsRepository repository;
+ @Autowired
+ KeyCountingRebalanceListener keyCountingRebalanceListener;
+ @Autowired
+ KeyCountingRecordHandler keyCountingRecordHandler;
- Consumer<ConsumerRecord<String, String>> testHandler;
- EndlessConsumer<String, String> endlessConsumer;
+ EndlessConsumer<String, Long> endlessConsumer;
Map<TopicPartition, Long> oldOffsets;
Map<TopicPartition, Long> newOffsets;
- Set<ConsumerRecord<String, String>> receivedRecords;
+ Set<ConsumerRecord<String, Long>> receivedRecords;
/** Tests methods */
@Test
+ @Order(1) // << The poistion pill is not skipped. Hence, this test must run first
void commitsCurrentOffsetsOnSuccess() throws ExecutionException, InterruptedException
{
send100Messages(i -> new Bytes(valueSerializer.serialize(TOPIC, i)));
.describedAs("Consumer should still be running");
}
+ @Test
+ @Order(2)
+ void commitsOffsetOfErrorForReprocessingOnError()
+ {
+ send100Messages(counter ->
+ counter == 77
+ ? new Bytes(stringSerializer.serialize(TOPIC, "BOOM!"))
+ : new Bytes(valueSerializer.serialize(TOPIC, counter)));
+
+ await("Consumer failed")
+ .atMost(Duration.ofSeconds(30))
+ .until(() -> !endlessConsumer.running());
+
+ checkSeenOffsetsForProgress();
+ compareToCommitedOffsets(newOffsets);
+
+ endlessConsumer.start();
+ await("Consumer failed")
+ .atMost(Duration.ofSeconds(30))
+ .until(() -> !endlessConsumer.running());
+
+ checkSeenOffsetsForProgress();
+ compareToCommitedOffsets(newOffsets);
+ assertThat(receivedRecords.size())
+ .describedAs("Received not all sent events")
+ .isLessThan(100);
+
+ assertThatNoException()
+ .describedAs("Consumer should not be running")
+ .isThrownBy(() -> endlessConsumer.exitStatus());
+ assertThat(endlessConsumer.exitStatus())
+ .describedAs("Consumer should have exited abnormally")
+ .containsInstanceOf(RecordDeserializationException.class);
+ }
+
/** Helper methods for the verification of expectations */
Optional<Long> offset = partitionStatisticsRepository.findById(partition).map(document -> document.offset);
consumer.accept(tp, offset.orElse(0l));
});
- }
+ }
List<TopicPartition> partitions()
{
@BeforeEach
public void init()
{
- testHandler = record -> {} ;
-
oldOffsets = new HashMap<>();
newOffsets = new HashMap<>();
receivedRecords = new HashSet<>();
newOffsets.put(tp, offset - 1);
});
- Consumer<ConsumerRecord<String, String>> captureOffsetAndExecuteTestHandler =
- record ->
- {
- newOffsets.put(
- new TopicPartition(record.topic(), record.partition()),
- record.offset());
- receivedRecords.add(record);
- testHandler.accept(record);
+ TestRecordHandler<String, Long> captureOffsetAndExecuteTestHandler =
+ new TestRecordHandler<String, Long>(keyCountingRecordHandler) {
+ @Override
+ public void onNewRecord(ConsumerRecord<String, Long> record)
+ {
+ newOffsets.put(
+ new TopicPartition(record.topic(), record.partition()),
+ record.offset());
+ receivedRecords.add(record);
+ }
};
endlessConsumer =
new EndlessConsumer<>(
executor,
- repository,
properties.getClientId(),
properties.getTopic(),
- Clock.systemDefaultZone(),
- properties.getCommitInterval(),
kafkaConsumer,
+ keyCountingRebalanceListener,
captureOffsetAndExecuteTestHandler);
endlessConsumer.start();