EndlessConsumer<String, Long> endlessConsumer;
Map<TopicPartition, Long> oldOffsets;
Map<TopicPartition, Long> newOffsets;
+ Set<ConsumerRecord<String, Long>> receivedRecords;
/** Tests methods */
{
send100Messages(i -> new Bytes(longSerializer.serialize(TOPIC, i)));
- Set<ConsumerRecord<String, Long>> received = new HashSet<>();
- testHandler = record -> received.add(record);
-
- endlessConsumer.start();
-
await("100 records received")
.atMost(Duration.ofSeconds(30))
- .until(() -> received.size() >= 100);
+ .until(() -> receivedRecords.size() >= 100);
await("Offsets committed")
.atMost(Duration.ofSeconds(10))
? new Bytes(stringSerializer.serialize(TOPIC, "BOOM!"))
: new Bytes(longSerializer.serialize(TOPIC, counter)));
- Set<ConsumerRecord<String, Long>> received = new HashSet<>();
- testHandler = record -> received.add(record);
-
- endlessConsumer.start();
-
await("Consumer failed")
.atMost(Duration.ofSeconds(30))
.until(() -> !endlessConsumer.running());
checkSeenOffsetsForProgress();
compareToCommitedOffsets(newOffsets);
- assertThat(received.size())
+ assertThat(receivedRecords.size())
.describedAs("Received not all sent events")
.isLessThan(100);
}
oldOffsets = new HashMap<>();
newOffsets = new HashMap<>();
+ receivedRecords = new HashSet<>();
doForCurrentOffsets((tp, offset) ->
{
newOffsets.put(
new TopicPartition(record.topic(), record.partition()),
record.offset());
+ receivedRecords.add(record);
testHandler.accept(record);
};
properties.getTopic(),
kafkaConsumer,
captureOffsetAndExecuteTestHandler);
+
+ endlessConsumer.start();
}
@AfterEach