import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.TopicPartition;
-import org.apache.kafka.common.serialization.BytesDeserializer;
-import org.apache.kafka.common.serialization.BytesSerializer;
-import org.apache.kafka.common.serialization.LongSerializer;
-import org.apache.kafka.common.serialization.StringSerializer;
+import org.apache.kafka.common.errors.RecordDeserializationException;
+import org.apache.kafka.common.serialization.*;
import org.apache.kafka.common.utils.Bytes;
import org.junit.jupiter.api.*;
import org.springframework.beans.factory.annotation.Autowired;
import static de.juplo.kafka.ApplicationTests.TOPIC;
import static org.assertj.core.api.Assertions.assertThat;
import static org.awaitility.Awaitility.*;
+import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
+import static org.junit.jupiter.api.Assertions.assertThrows;
@SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class)
StringSerializer stringSerializer = new StringSerializer();
- LongSerializer longSerializer = new LongSerializer();
+ @Autowired
+ Serializer valueSerializer;
@Autowired
KafkaProducer<String, Bytes> kafkaProducer;
@Autowired
EndlessConsumer<String, Long> endlessConsumer;
Map<TopicPartition, Long> oldOffsets;
Map<TopicPartition, Long> newOffsets;
+ Set<ConsumerRecord<String, Long>> receivedRecords;
/** Tests methods */
@Order(1) // << The poistion pill is not skipped. Hence, this test must run first
void commitsCurrentOffsetsOnSuccess() throws ExecutionException, InterruptedException
{
- send100Messages(i -> new Bytes(longSerializer.serialize(TOPIC, i)));
-
- Set<ConsumerRecord<String, Long>> received = new HashSet<>();
- testHandler = record -> received.add(record);
+ send100Messages(i -> new Bytes(valueSerializer.serialize(TOPIC, i)));
await("100 records received")
.atMost(Duration.ofSeconds(30))
- .until(() -> received.size() >= 100);
+ .until(() -> receivedRecords.size() >= 100);
await("Offsets committed")
.atMost(Duration.ofSeconds(10))
checkSeenOffsetsForProgress();
compareToCommitedOffsets(newOffsets);
});
+
+ assertThrows(
+ IllegalStateException.class,
+ () -> endlessConsumer.exitStatus(),
+ "Consumer should still be running");
}
@Test
@Order(2)
- void commitsNoOffsetsOnError()
+ void commitsOffsetOfErrorForReprocessingOnError()
{
send100Messages(counter ->
counter == 77
? new Bytes(stringSerializer.serialize(TOPIC, "BOOM!"))
- : new Bytes(longSerializer.serialize(TOPIC, counter)));
+ : new Bytes(valueSerializer.serialize(TOPIC, counter)));
+
+ await("Consumer failed")
+ .atMost(Duration.ofSeconds(30))
+ .until(() -> !endlessConsumer.running());
+ checkSeenOffsetsForProgress();
+ compareToCommitedOffsets(newOffsets);
+
+ endlessConsumer.start();
await("Consumer failed")
.atMost(Duration.ofSeconds(30))
.until(() -> !endlessConsumer.running());
checkSeenOffsetsForProgress();
- compareToCommitedOffsets(oldOffsets);
+ compareToCommitedOffsets(newOffsets);
+ assertThat(receivedRecords.size())
+ .describedAs("Received not all sent events")
+ .isLessThan(100);
+
+ assertDoesNotThrow(
+ () -> endlessConsumer.exitStatus(),
+ "Consumer should not be running");
+ assertThat(endlessConsumer.exitStatus())
+ .describedAs("Consumer should have exited abnormally")
+ .containsInstanceOf(RecordDeserializationException.class);
}
{
Long expected = offsetsToCheck.get(tp) + 1;
log.debug("Checking, if the offset for {} is {}", tp, expected);
- assertThat(offset).isEqualTo(expected);
+ assertThat(offset)
+ .describedAs("Committed offset corresponds to the offset of the consumer")
+ .isEqualTo(expected);
});
}
withProgress.add(tp);
}
});
- assertThat(withProgress).isNotEmpty().describedAs("Found no partitions with any offset-progress");
+ assertThat(withProgress)
+ .describedAs("Some offsets must have changed, compared to the old offset-positions")
+ .isNotEmpty();
}
oldOffsets = new HashMap<>();
newOffsets = new HashMap<>();
+ receivedRecords = new HashSet<>();
doForCurrentOffsets((tp, offset) ->
{
newOffsets.put(
new TopicPartition(record.topic(), record.partition()),
record.offset());
+ receivedRecords.add(record);
testHandler.accept(record);
};
@Import(ApplicationConfiguration.class)
public static class Configuration
{
+ @Bean
+ Serializer<Long> serializer()
+ {
+ return new LongSerializer();
+ }
+
@Bean
KafkaProducer<String, Bytes> kafkaProducer(ApplicationProperties properties)
{