projects
/
demos
/
kafka
/
training
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Tests: Refaktorisiert - Nachrichten werden für alle Tests aufgezeichnet
[demos/kafka/training]
/
src
/
test
/
java
/
de
/
juplo
/
kafka
/
ApplicationTests.java
diff --git
a/src/test/java/de/juplo/kafka/ApplicationTests.java
b/src/test/java/de/juplo/kafka/ApplicationTests.java
index
61e0a8d
..
4cc4f91
100644
(file)
--- a/
src/test/java/de/juplo/kafka/ApplicationTests.java
+++ b/
src/test/java/de/juplo/kafka/ApplicationTests.java
@@
-69,6
+69,7
@@
class ApplicationTests
EndlessConsumer<String, Long> endlessConsumer;
Map<TopicPartition, Long> oldOffsets;
Map<TopicPartition, Long> newOffsets;
EndlessConsumer<String, Long> endlessConsumer;
Map<TopicPartition, Long> oldOffsets;
Map<TopicPartition, Long> newOffsets;
+ Set<ConsumerRecord<String, Long>> receivedRecords;
/** Tests methods */
/** Tests methods */
@@
-79,14
+80,9
@@
class ApplicationTests
{
send100Messages(i -> new Bytes(longSerializer.serialize(TOPIC, i)));
{
send100Messages(i -> new Bytes(longSerializer.serialize(TOPIC, i)));
- Set<ConsumerRecord<String, Long>> received = new HashSet<>();
- testHandler = record -> received.add(record);
-
- endlessConsumer.start();
-
await("100 records received")
.atMost(Duration.ofSeconds(30))
await("100 records received")
.atMost(Duration.ofSeconds(30))
- .until(() -> received.size() >= 100);
+ .until(() -> received
Records
.size() >= 100);
await("Offsets committed")
.atMost(Duration.ofSeconds(10))
await("Offsets committed")
.atMost(Duration.ofSeconds(10))
@@
-106,11
+102,6
@@
class ApplicationTests
? new Bytes(stringSerializer.serialize(TOPIC, "BOOM!"))
: new Bytes(longSerializer.serialize(TOPIC, counter)));
? new Bytes(stringSerializer.serialize(TOPIC, "BOOM!"))
: new Bytes(longSerializer.serialize(TOPIC, counter)));
- Set<ConsumerRecord<String, Long>> received = new HashSet<>();
- testHandler = record -> received.add(record);
-
- endlessConsumer.start();
-
await("Consumer failed")
.atMost(Duration.ofSeconds(30))
.until(() -> !endlessConsumer.running());
await("Consumer failed")
.atMost(Duration.ofSeconds(30))
.until(() -> !endlessConsumer.running());
@@
-125,7
+116,7
@@
class ApplicationTests
checkSeenOffsetsForProgress();
compareToCommitedOffsets(newOffsets);
checkSeenOffsetsForProgress();
compareToCommitedOffsets(newOffsets);
- assertThat(received.size())
+ assertThat(received
Records
.size())
.describedAs("Received not all sent events")
.isLessThan(100);
}
.describedAs("Received not all sent events")
.isLessThan(100);
}
@@
-233,6
+224,7
@@
class ApplicationTests
oldOffsets = new HashMap<>();
newOffsets = new HashMap<>();
oldOffsets = new HashMap<>();
newOffsets = new HashMap<>();
+ receivedRecords = new HashSet<>();
doForCurrentOffsets((tp, offset) ->
{
doForCurrentOffsets((tp, offset) ->
{
@@
-246,6
+238,7
@@
class ApplicationTests
newOffsets.put(
new TopicPartition(record.topic(), record.partition()),
record.offset());
newOffsets.put(
new TopicPartition(record.topic(), record.partition()),
record.offset());
+ receivedRecords.add(record);
testHandler.accept(record);
};
testHandler.accept(record);
};
@@
-256,6
+249,8
@@
class ApplicationTests
properties.getTopic(),
kafkaConsumer,
captureOffsetAndExecuteTestHandler);
properties.getTopic(),
kafkaConsumer,
captureOffsetAndExecuteTestHandler);
+
+ endlessConsumer.start();
}
@AfterEach
}
@AfterEach