public class ApplicationConfiguration
{
@Bean
- public ApplicationRecordHandler recordHandler(
+ public ApplicationRecordHandler applicationRecordHandler(
AdderResults adderResults,
KafkaProperties kafkaProperties,
ApplicationProperties applicationProperties)
ApplicationRecordHandler recordHandler,
AdderResults adderResults,
StateRepository stateRepository,
- KafkaProperties kafkaProperties,
- ApplicationProperties applicationProperties)
+ KafkaProperties kafkaProperties)
{
return new ApplicationRebalanceListener(
recordHandler,
Consumer<String, Message> kafkaConsumer,
ExecutorService executor,
ApplicationRebalanceListener rebalanceListener,
- ApplicationRecordHandler recordHandler,
+ RecordHandler recordHandler,
KafkaProperties kafkaProperties,
ApplicationProperties applicationProperties)
{
import com.mongodb.client.MongoClient;
import lombok.extern.slf4j.Slf4j;
-import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
-import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo;
import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
import org.springframework.boot.test.context.TestConfiguration;
+import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Import;
import org.springframework.kafka.test.context.EmbeddedKafka;
import org.springframework.test.context.TestPropertySource;
import java.time.Duration;
import java.util.*;
-import java.util.concurrent.ExecutorService;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
import java.util.stream.Collectors;
@Autowired
org.apache.kafka.clients.consumer.Consumer<K, V> kafkaConsumer;
@Autowired
- Consumer<ConsumerRecord<K, V>> consumer;
- @Autowired
- ApplicationProperties applicationProperties;
- @Autowired
KafkaProperties kafkaProperties;
@Autowired
- ExecutorService executor;
+ ApplicationProperties applicationProperties;
@Autowired
MongoClient mongoClient;
@Autowired
MongoProperties mongoProperties;
@Autowired
- ConsumerRebalanceListener rebalanceListener;
+ TestRecordHandler<K, V> recordHandler;
@Autowired
- RecordHandler<K, V> recordHandler;
+ EndlessConsumer<K, V> endlessConsumer;
KafkaProducer<Bytes, Bytes> testRecordProducer;
KafkaConsumer<Bytes, Bytes> offsetConsumer;
- EndlessConsumer<K, V> endlessConsumer;
Map<TopicPartition, Long> oldOffsets;
- Map<TopicPartition, Long> seenOffsets;
- Set<ConsumerRecord<K, V>> receivedRecords;
final RecordGenerator recordGenerator;
await(numberOfGeneratedMessages + " records received")
.atMost(Duration.ofSeconds(30))
.pollInterval(Duration.ofSeconds(1))
- .until(() -> receivedRecords.size() >= numberOfGeneratedMessages);
+ .until(() -> recordHandler.receivedRecords.size() >= numberOfGeneratedMessages);
await("Offsets committed")
.atMost(Duration.ofSeconds(10))
.untilAsserted(() ->
{
checkSeenOffsetsForProgress();
- assertSeenOffsetsEqualCommittedOffsets(seenOffsets);
+ assertSeenOffsetsEqualCommittedOffsets(recordHandler.seenOffsets);
});
assertThatExceptionOfType(IllegalStateException.class)
.until(() -> !endlessConsumer.running());
checkSeenOffsetsForProgress();
- assertSeenOffsetsEqualCommittedOffsets(seenOffsets);
+ assertSeenOffsetsEqualCommittedOffsets(recordHandler.seenOffsets);
endlessConsumer.start();
await("Consumer failed")
.until(() -> !endlessConsumer.running());
checkSeenOffsetsForProgress();
- assertSeenOffsetsEqualCommittedOffsets(seenOffsets);
- assertThat(receivedRecords.size())
+ assertSeenOffsetsEqualCommittedOffsets(recordHandler.seenOffsets);
+ assertThat(recordHandler.receivedRecords.size())
.describedAs("Received not all sent events")
.isLessThan(numberOfGeneratedMessages);
.until(() -> !endlessConsumer.running());
checkSeenOffsetsForProgress();
- assertSeenOffsetsAreBehindCommittedOffsets(seenOffsets);
+ assertSeenOffsetsAreBehindCommittedOffsets(recordHandler.seenOffsets);
endlessConsumer.start();
await("Consumer failed")
.pollInterval(Duration.ofSeconds(1))
.until(() -> !endlessConsumer.running());
- assertSeenOffsetsAreBehindCommittedOffsets(seenOffsets);
+ assertSeenOffsetsAreBehindCommittedOffsets(recordHandler.seenOffsets);
assertThatNoException()
.describedAs("Consumer should not be running")
partitions().forEach(tp ->
{
Long oldOffset = oldOffsets.get(tp) + 1;
- Long newOffset = seenOffsets.get(tp) + 1;
+ Long newOffset = recordHandler.seenOffsets.get(tp) + 1;
if (!oldOffset.equals(newOffset))
{
log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset);
seekToEnd();
oldOffsets = new HashMap<>();
- seenOffsets = new HashMap<>();
- receivedRecords = new HashSet<>();
+ recordHandler.seenOffsets = new HashMap<>();
+ recordHandler.receivedRecords = new HashSet<>();
doForCurrentOffsets((tp, offset) ->
{
oldOffsets.put(tp, offset - 1);
- seenOffsets.put(tp, offset - 1);
+ recordHandler.seenOffsets.put(tp, offset - 1);
});
- TestRecordHandler<K, V> captureOffsetAndExecuteTestHandler =
- new TestRecordHandler<K, V>(recordHandler)
- {
- @Override
- public void onNewRecord(ConsumerRecord<K, V> record)
- {
- seenOffsets.put(
- new TopicPartition(record.topic(), record.partition()),
- record.offset());
- receivedRecords.add(record);
- }
- };
-
- endlessConsumer =
- new EndlessConsumer<>(
- executor,
- kafkaProperties.getClientId(),
- applicationProperties.getTopic(),
- kafkaConsumer,
- rebalanceListener,
- captureOffsetAndExecuteTestHandler);
-
endlessConsumer.start();
}
@AfterEach
public void deinit()
{
+ try
+ {
+ endlessConsumer.stop();
+ }
+ catch (Exception e)
+ {
+ log.debug("{}", e.toString());
+ }
+
try
{
testRecordProducer.close();
@Import(ApplicationConfiguration.class)
public static class Configuration
{
+ @Bean
+ public RecordHandler recordHandler(RecordHandler applicationRecordHandler)
+ {
+ return new TestRecordHandler(applicationRecordHandler);
+ }
}
}