package de.juplo.kafka;
+import com.mongodb.client.MongoClient;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.junit.jupiter.api.*;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
+import org.springframework.boot.autoconfigure.mongo.MongoProperties;
import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo;
import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
import org.springframework.boot.test.context.TestConfiguration;
properties = {
"sumup.adder.bootstrap-server=${spring.embedded.kafka.brokers}",
"sumup.adder.topic=" + TOPIC,
- "sumup.adder.commit-interval=1s",
+ "sumup.adder.commit-interval=500ms",
"spring.mongodb.embedded.version=4.4.13" })
@EmbeddedKafka(topics = TOPIC, partitions = PARTITIONS)
@EnableAutoConfiguration
@Autowired
ExecutorService executor;
@Autowired
- PartitionStatisticsRepository partitionStatisticsRepository;
+ StateRepository stateRepository;
+ @Autowired
+ MongoClient mongoClient;
+ @Autowired
+ MongoProperties mongoProperties;
@Autowired
PollIntervalAwareConsumerRebalanceListener rebalanceListener;
@Autowired
checkSeenOffsetsForProgress();
compareToCommitedOffsets(oldOffsets);
- assertThat(receivedRecords.size())
- .describedAs("Received not all sent events")
- .isLessThan(numberOfGeneratedMessages);
assertThatNoException()
.describedAs("Consumer should not be running")
log.info("New position for {}: {}", tp, offset);
Integer partition = tp.partition();
StateDocument document =
- partitionStatisticsRepository
+ stateRepository
.findById(partition.toString())
.orElse(new StateDocument(partition));
document.offset = offset;
- partitionStatisticsRepository.save(document);
+ stateRepository.save(document);
});
offsetConsumer.unsubscribe();
}
partitions().forEach(tp ->
{
String partition = Integer.toString(tp.partition());
- Optional<Long> offset = partitionStatisticsRepository.findById(partition).map(document -> document.offset);
+ Optional<Long> offset = stateRepository.findById(partition).map(document -> document.offset);
consumer.accept(tp, offset.orElse(0l));
});
}
props.put("value.deserializer", BytesDeserializer.class.getName());
offsetConsumer = new KafkaConsumer<>(props);
+ mongoClient.getDatabase(mongoProperties.getDatabase()).drop();
seekToEnd();
oldOffsets = new HashMap<>();