import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Import;
import org.springframework.context.annotation.Primary;
+import org.springframework.kafka.listener.MessageListenerContainer;
import org.springframework.kafka.support.serializer.JsonSerializer;
import org.springframework.kafka.test.context.EmbeddedKafka;
import org.springframework.test.context.TestPropertySource;
@TestPropertySource(
properties = {
"spring.kafka.consumer.bootstrap-servers=${spring.embedded.kafka.brokers}",
+ "spring.kafka.producer.bootstrap-servers=${spring.embedded.kafka.brokers}",
"consumer.topic=" + TOPIC })
@EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
@Slf4j
await("100 records received")
.atMost(Duration.ofSeconds(30))
- .until(() -> receivedRecords.size() >= 100);
+ .until(() -> receivedRecords.size() == 100);
await("Offsets committed")
.atMost(Duration.ofSeconds(10))
compareToCommitedOffsets(newOffsets);
});
- assertThatExceptionOfType(IllegalStateException.class)
- .isThrownBy(() -> endlessConsumer.exitStatus())
- .describedAs("Consumer should still be running");
+ assertThat(endlessConsumer.isRunning())
+ .describedAs("Consumer should still be running")
+ .isTrue();
}
@Test
@Order(2)
- void commitsOffsetOfErrorForReprocessingOnError()
+ void commitsCurrentOffsetsOnError()
{
send100Messages((key, counter) ->
counter == 77
? new Bytes(stringSerializer.serialize(TOPIC, "BOOM!"))
: serialize(key, counter));
- await("Consumer failed")
+ await("99 records received")
.atMost(Duration.ofSeconds(30))
- .untilAsserted(() -> checkSeenOffsetsForProgress());
-
- compareToCommitedOffsets(newOffsets);
- assertThat(receivedRecords.size())
- .describedAs("Received not all sent events")
- .isLessThan(100);
-
- assertThatNoException()
- .describedAs("Consumer should not be running")
- .isThrownBy(() -> endlessConsumer.exitStatus());
- assertThat(endlessConsumer.exitStatus())
- .containsInstanceOf(RecordDeserializationException.class)
- .describedAs("Consumer should have exited abnormally");
+ .until(() -> receivedRecords.size() == 99);
+
+ await("Offsets committed")
+ .atMost(Duration.ofSeconds(10))
+ .untilAsserted(() ->
+ {
+ // UNSCHÖN:
+ // Funktioniert nur, weil nach der Nachrichten, die den
+ // Deserialisierungs-Fehler auslöst noch valide Nachrichten
+ // gelesen werden.
+ // GRUND:
+ // Der MessageHandler sieht den Offset der Fehlerhaften
+ // Nachricht nicht!
+ checkSeenOffsetsForProgress();
+ compareToCommitedOffsets(newOffsets);
+ });
+
+ assertThat(endlessConsumer.isRunning())
+ .describedAs("Consumer should still be running")
+ .isTrue();
}
doForCurrentOffsets((tp, offset) ->
{
Long expected = offsetsToCheck.get(tp) + 1;
- log.debug("Checking, if the offset for {} is {}", tp, expected);
+ log.debug("TEST: Comparing the expected offset of {} for {} to {}", expected, tp, offset);
assertThat(offset)
.describedAs("Committed offset corresponds to the offset of the consumer")
.isEqualTo(expected);
Long newOffset = newOffsets.get(tp);
if (!oldOffset.equals(newOffset))
{
- log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset);
+ log.debug("TEST: Progress for {}: {} -> {}", tp, oldOffset, newOffset);
withProgress.add(tp);
}
});
+ log.debug("TEST: Offsets with progress: {}", withProgress);
assertThat(withProgress)
.describedAs("Some offsets must have changed, compared to the old offset-positions")
.isNotEmpty();
if (metadata != null)
{
log.debug(
- "{}|{} - {}={}",
+ "TEST: Sending partition={}, offset={} - {}={}",
metadata.partition(),
metadata.offset(),
record.key(),
else
{
log.warn(
- "Exception for {}={}: {}",
+ "TEST: Exception for {}={}: {}",
record.key(),
record.value(),
e.toString());
record ->
{
receivedRecords.add(record);
+ log.debug("TEST: Processing record #{}: {}", receivedRecords.size(), record.value());
newOffsets.put(
new TopicPartition(record.topic(), record.partition()),
record.offset());
}
catch (Exception e)
{
- log.info("Exception while stopping the consumer: {}", e.toString());
+ log.info("TEST: Exception while stopping the consumer: {}", e.toString());
}
}