import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.TopicPartition;
-import org.apache.kafka.common.errors.WakeupException;
+import org.apache.kafka.common.serialization.BytesDeserializer;
import org.apache.kafka.common.serialization.BytesSerializer;
import org.apache.kafka.common.serialization.LongSerializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.apache.kafka.common.utils.Bytes;
-import org.junit.jupiter.api.MethodOrderer;
-import org.junit.jupiter.api.Order;
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.TestMethodOrder;
+import org.junit.jupiter.api.*;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
import org.springframework.boot.test.context.TestConfiguration;
import org.springframework.test.context.TestPropertySource;
import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
+import java.time.Duration;
import java.util.*;
+import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
import static de.juplo.kafka.ApplicationTests.PARTITIONS;
import static de.juplo.kafka.ApplicationTests.TOPIC;
import static org.assertj.core.api.Assertions.assertThat;
+import static org.awaitility.Awaitility.*;
@SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class)
@Autowired
KafkaConsumer<String, Long> kafkaConsumer;
@Autowired
+ KafkaConsumer<Bytes, Bytes> offsetConsumer;
+ @Autowired
ApplicationProperties properties;
@Autowired
ExecutorService executor;
+ Consumer<ConsumerRecord<String, Long>> testHandler;
+ EndlessConsumer<String, Long> endlessConsumer;
Map<TopicPartition, Long> oldOffsets;
Map<TopicPartition, Long> newOffsets;
@Test
@Order(1) // << The poistion pill is not skipped. Hence, this test must run first
- void commitsCurrentOffsetsOnSuccess()
+ void commitsCurrentOffsetsOnSuccess() throws ExecutionException, InterruptedException
{
send100Messages(i -> new Bytes(longSerializer.serialize(TOPIC, i)));
Set<ConsumerRecord<String, Long>> received = new HashSet<>();
- runEndlessConsumer(record ->
- {
- received.add(record);
- if (received.size() == 100)
- throw new WakeupException();
- });
+ testHandler = record -> received.add(record);
+
+ await("100 records received")
+ .atMost(Duration.ofSeconds(30))
+ .until(() -> received.size() >= 100);
+
+ endlessConsumer.stop();
checkSeenOffsetsForProgress();
compareToCommitedOffsets(newOffsets);
? new Bytes(stringSerializer.serialize(TOPIC, "BOOM!"))
: new Bytes(longSerializer.serialize(TOPIC, counter)));
- runEndlessConsumer((record) -> {});
+ await("Consumer failed")
+ .atMost(Duration.ofSeconds(30))
+ .until(() -> !endlessConsumer.running());
checkSeenOffsetsForProgress();
compareToCommitedOffsets(oldOffsets);
}
}
- EndlessConsumer<String, Long> runEndlessConsumer(Consumer<ConsumerRecord<String, Long>> consumer)
+ @BeforeEach
+ public void init()
{
+ testHandler = record -> {} ;
+
oldOffsets = new HashMap<>();
newOffsets = new HashMap<>();
newOffsets.put(tp, offset - 1);
});
- Consumer<ConsumerRecord<String, Long>> captureOffset =
+ Consumer<ConsumerRecord<String, Long>> captureOffsetAndExecuteTestHandler =
record ->
- newOffsets.put(
- new TopicPartition(record.topic(), record.partition()),
- record.offset());
+ {
+ newOffsets.put(
+ new TopicPartition(record.topic(), record.partition()),
+ record.offset());
+ testHandler.accept(record);
+ };
- EndlessConsumer<String, Long> endlessConsumer =
+ endlessConsumer =
new EndlessConsumer<>(
executor,
properties.getClientId(),
properties.getTopic(),
kafkaConsumer,
- captureOffset.andThen(consumer));
-
- endlessConsumer.run();
+ captureOffsetAndExecuteTestHandler);
- return endlessConsumer;
+ endlessConsumer.start();
}
List<TopicPartition> partitions()
void doForCurrentOffsets(BiConsumer<TopicPartition, Long> consumer)
{
- kafkaConsumer.assign(partitions());
- partitions().forEach(tp -> consumer.accept(tp, kafkaConsumer.position(tp)));
- kafkaConsumer.unsubscribe();
+ offsetConsumer.assign(partitions());
+ partitions().forEach(tp -> consumer.accept(tp, offsetConsumer.position(tp)));
+ offsetConsumer.unsubscribe();
}
void checkSeenOffsetsForProgress()
}
+ @AfterEach
+ public void deinit()
+ {
+ try
+ {
+ endlessConsumer.stop();
+ }
+ catch (Exception e)
+ {
+ log.info("Exception while stopping the consumer: {}", e.toString());
+ }
+ }
+
@TestConfiguration
@Import(ApplicationConfiguration.class)
public static class Configuration
return new KafkaProducer<>(props);
}
+
+ @Bean
+ KafkaConsumer<Bytes, Bytes> offsetConsumer(ApplicationProperties properties)
+ {
+ Properties props = new Properties();
+ props.put("bootstrap.servers", properties.getBootstrapServer());
+ props.put("client.id", "OFFSET-CONSUMER");
+ props.put("group.id", properties.getGroupId());
+ props.put("key.deserializer", BytesDeserializer.class.getName());
+ props.put("value.deserializer", BytesDeserializer.class.getName());
+
+ return new KafkaConsumer<>(props);
+ }
}
}