import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo;
import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer;
import org.springframework.boot.test.context.TestConfiguration;
+import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Import;
import org.springframework.kafka.test.context.EmbeddedKafka;
import org.springframework.test.context.TestPropertySource;
import java.time.Duration;
import java.util.*;
-import java.util.concurrent.ExecutorService;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
import java.util.stream.Collectors;
@Autowired
- KafkaConsumer<K, V> kafkaConsumer;
+ org.apache.kafka.clients.consumer.Consumer<K, V> kafkaConsumer;
@Autowired
Consumer<ConsumerRecord<K, V>> consumer;
@Autowired
- ApplicationProperties properties;
- @Autowired
- ExecutorService executor;
+ ApplicationProperties applicationProperties;
@Autowired
MongoClient mongoClient;
@Autowired
MongoProperties mongoProperties;
@Autowired
- PollIntervalAwareConsumerRebalanceListener rebalanceListener;
+ RebalanceListener rebalanceListener;
+ @Autowired
+ TestRecordHandler<K, V> recordHandler;
@Autowired
- RecordHandler<K, V> recordHandler;
+ EndlessConsumer<K, V> endlessConsumer;
KafkaProducer<Bytes, Bytes> testRecordProducer;
KafkaConsumer<Bytes, Bytes> offsetConsumer;
- EndlessConsumer<K, V> endlessConsumer;
Map<TopicPartition, Long> oldOffsets;
- Map<TopicPartition, Long> newOffsets;
- Set<ConsumerRecord<K, V>> receivedRecords;
final RecordGenerator recordGenerator;
/** Tests methods */
@Test
- void commitsCurrentOffsetsOnSuccess()
+ void commitsCurrentOffsetsOnSuccess() throws Exception
{
int numberOfGeneratedMessages =
recordGenerator.generate(false, false, messageSender);
await(numberOfGeneratedMessages + " records received")
.atMost(Duration.ofSeconds(30))
.pollInterval(Duration.ofSeconds(1))
- .until(() -> receivedRecords.size() >= numberOfGeneratedMessages);
+ .until(() -> recordHandler.receivedRecords.size() >= numberOfGeneratedMessages);
await("Offsets committed")
.atMost(Duration.ofSeconds(10))
.untilAsserted(() ->
{
checkSeenOffsetsForProgress();
- compareToCommitedOffsets(newOffsets);
+ assertSeenOffsetsEqualCommittedOffsets(recordHandler.seenOffsets);
});
assertThatExceptionOfType(IllegalStateException.class)
.isThrownBy(() -> endlessConsumer.exitStatus())
.describedAs("Consumer should still be running");
+ endlessConsumer.stop();
recordGenerator.assertBusinessLogic();
}
.until(() -> !endlessConsumer.running());
checkSeenOffsetsForProgress();
- compareToCommitedOffsets(newOffsets);
+ assertSeenOffsetsEqualCommittedOffsets(recordHandler.seenOffsets);
endlessConsumer.start();
await("Consumer failed")
.until(() -> !endlessConsumer.running());
checkSeenOffsetsForProgress();
- compareToCommitedOffsets(newOffsets);
- assertThat(receivedRecords.size())
+ assertSeenOffsetsEqualCommittedOffsets(recordHandler.seenOffsets);
+ assertThat(recordHandler.receivedRecords.size())
.describedAs("Received not all sent events")
.isLessThan(numberOfGeneratedMessages);
.until(() -> !endlessConsumer.running());
checkSeenOffsetsForProgress();
- compareToCommitedOffsets(oldOffsets);
+ assertSeenOffsetsAreBehindCommittedOffsets(recordHandler.seenOffsets);
endlessConsumer.start();
await("Consumer failed")
.pollInterval(Duration.ofSeconds(1))
.until(() -> !endlessConsumer.running());
- checkSeenOffsetsForProgress();
- compareToCommitedOffsets(oldOffsets);
+ assertSeenOffsetsAreBehindCommittedOffsets(recordHandler.seenOffsets);
assertThatNoException()
.describedAs("Consumer should not be running")
/** Helper methods for the verification of expectations */
- void compareToCommitedOffsets(Map<TopicPartition, Long> offsetsToCheck)
+ void assertSeenOffsetsEqualCommittedOffsets(Map<TopicPartition, Long> offsetsToCheck)
{
doForCurrentOffsets((tp, offset) ->
{
Long expected = offsetsToCheck.get(tp) + 1;
- log.debug("Checking, if the offset for {} is {}", tp, expected);
+ log.debug("Checking, if the offset {} for {} is exactly {}", offset, tp, expected);
assertThat(offset)
.describedAs("Committed offset corresponds to the offset of the consumer")
.isEqualTo(expected);
});
}
+ void assertSeenOffsetsAreBehindCommittedOffsets(Map<TopicPartition, Long> offsetsToCheck)
+ {
+ List<Boolean> isOffsetBehindSeen = new LinkedList<>();
+
+ doForCurrentOffsets((tp, offset) ->
+ {
+ Long expected = offsetsToCheck.get(tp) + 1;
+ log.debug("Checking, if the offset {} for {} is at most {}", offset, tp, expected);
+ assertThat(offset)
+ .describedAs("Committed offset must be at most equal to the offset of the consumer")
+ .isLessThanOrEqualTo(expected);
+ isOffsetBehindSeen.add(offset < expected);
+ });
+
+ assertThat(isOffsetBehindSeen.stream().reduce(false, (result, next) -> result | next))
+ .describedAs("Committed offsets are behind seen offsets")
+ .isTrue();
+ }
+
void checkSeenOffsetsForProgress()
{
// Be sure, that some messages were consumed...!
partitions().forEach(tp ->
{
Long oldOffset = oldOffsets.get(tp) + 1;
- Long newOffset = newOffsets.get(tp) + 1;
+ Long newOffset = recordHandler.seenOffsets.get(tp) + 1;
if (!oldOffset.equals(newOffset))
{
log.debug("Progress for {}: {} -> {}", tp, oldOffset, newOffset);
{
Properties props;
props = new Properties();
- props.put("bootstrap.servers", properties.getBootstrapServer());
+ props.put("bootstrap.servers", applicationProperties.getBootstrapServer());
props.put("linger.ms", 100);
props.put("key.serializer", BytesSerializer.class.getName());
props.put("value.serializer", BytesSerializer.class.getName());
testRecordProducer = new KafkaProducer<>(props);
props = new Properties();
- props.put("bootstrap.servers", properties.getBootstrapServer());
+ props.put("bootstrap.servers", applicationProperties.getBootstrapServer());
props.put("client.id", "OFFSET-CONSUMER");
- props.put("group.id", properties.getGroupId());
+ props.put("group.id", applicationProperties.getGroupId());
props.put("key.deserializer", BytesDeserializer.class.getName());
props.put("value.deserializer", BytesDeserializer.class.getName());
offsetConsumer = new KafkaConsumer<>(props);
+ mongoClient.getDatabase(mongoProperties.getDatabase()).drop();
seekToEnd();
oldOffsets = new HashMap<>();
- newOffsets = new HashMap<>();
- receivedRecords = new HashSet<>();
+ recordHandler.seenOffsets = new HashMap<>();
+ recordHandler.receivedRecords = new HashSet<>();
doForCurrentOffsets((tp, offset) ->
{
oldOffsets.put(tp, offset - 1);
- newOffsets.put(tp, offset - 1);
+ recordHandler.seenOffsets.put(tp, offset - 1);
});
- TestRecordHandler<K, V> captureOffsetAndExecuteTestHandler =
- new TestRecordHandler<K, V>(recordHandler)
- {
- @Override
- public void onNewRecord(ConsumerRecord<K, V> record)
- {
- newOffsets.put(
- new TopicPartition(record.topic(), record.partition()),
- record.offset());
- receivedRecords.add(record);
- }
- };
-
- mongoClient.getDatabase(mongoProperties.getDatabase()).drop();
-
- endlessConsumer =
- new EndlessConsumer<>(
- executor,
- properties.getClientId(),
- properties.getTopic(),
- kafkaConsumer,
- rebalanceListener,
- captureOffsetAndExecuteTestHandler);
-
endlessConsumer.start();
}
try
{
endlessConsumer.stop();
+ }
+ catch (Exception e)
+ {
+ log.debug("{}", e.toString());
+ }
+
+ try
+ {
testRecordProducer.close();
offsetConsumer.close();
}
@Import(ApplicationConfiguration.class)
public static class Configuration
{
+ @Bean
+ public RecordHandler recordHandler(RecordHandler applicationRecordHandler)
+ {
+ return new TestRecordHandler(applicationRecordHandler);
+ }
}
}