Refaktorisierungen des Testfalls gemerged (Branch 'deserialization')
[demos/kafka/training] / src / test / java / de / juplo / kafka / ApplicationTests.java
index fbc668f..62906b3 100644 (file)
@@ -78,7 +78,11 @@ class ApplicationTests
        @Order(1) // << The poistion pill is not skipped. Hence, this test must run first
        void commitsCurrentOffsetsOnSuccess() throws ExecutionException, InterruptedException
        {
-               send100Messages((key, counter) -> serialize(key, counter));
+               send100Messages((partition, key, counter) ->
+               {
+                       Bytes value = serialize(key, counter);
+                       return new ProducerRecord<>(TOPIC, partition, key, value);
+               });
 
                await("100 records received")
                                .atMost(Duration.ofSeconds(30))
@@ -101,10 +105,13 @@ class ApplicationTests
        @Order(2)
        void commitsOffsetOfErrorForReprocessingOnError()
        {
-               send100Messages((key, counter) ->
-                               counter == 77
-                                               ? new Bytes(stringSerializer.serialize(TOPIC, "BOOM!"))
-                                               : serialize(key, counter));
+               send100Messages((partition, key, counter) ->
+               {
+                       Bytes value = counter == 77
+                                       ? new Bytes(stringSerializer.serialize(TOPIC, "BOOM!"))
+                                       : serialize(key, counter);
+                       return new ProducerRecord<>(TOPIC, partition, key, value);
+               });
 
                await("Consumer failed")
                                .atMost(Duration.ofSeconds(30))
@@ -186,7 +193,12 @@ class ApplicationTests
        }
 
 
-       void send100Messages(BiFunction<Integer, Long, Bytes> messageGenerator)
+       public interface RecordGenerator<K, V>
+       {
+               public ProducerRecord<String, Bytes> generate(int partition, String key, long counter);
+       }
+
+       void send100Messages(RecordGenerator recordGenerator)
        {
                long i = 0;
 
@@ -194,15 +206,10 @@ class ApplicationTests
                {
                        for (int key = 0; key < 10; key++)
                        {
-                               Bytes value = messageGenerator.apply(key, ++i);
-
                                ProducerRecord<String, Bytes> record =
-                                               new ProducerRecord<>(
-                                                               TOPIC,
-                                                               partition,
-                                                               Integer.toString(key%2),
-                                                               value);
+                                               recordGenerator.generate(partition, Integer.toString(partition*10+key%2), ++i);
 
+                               record.headers().add("__TypeId__", "message".getBytes());
                                kafkaProducer.send(record, (metadata, e) ->
                                {
                                        if (metadata != null)
@@ -227,10 +234,10 @@ class ApplicationTests
                }
        }
 
-       Bytes serialize(Integer key, Long value)
+       Bytes serialize(String key, Long value)
        {
                ClientMessage message = new ClientMessage();
-               message.setClient(key.toString());
+               message.setClient(key);
                message.setMessage(value.toString());
                return new Bytes(valueSerializer.serialize(TOPIC, message));
        }