import com.mongodb.client.MongoClient;
import lombok.extern.slf4j.Slf4j;
+import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.KafkaProducer;
@Autowired
MongoProperties mongoProperties;
@Autowired
- PollIntervalAwareConsumerRebalanceListener rebalanceListener;
+ ConsumerRebalanceListener rebalanceListener;
@Autowired
RecordHandler<K, V> recordHandler;
/** Tests methods */
@Test
- void commitsCurrentOffsetsOnSuccess()
+ void commitsCurrentOffsetsOnSuccess() throws Exception
{
int numberOfGeneratedMessages =
recordGenerator.generate(false, false, messageSender);
.isThrownBy(() -> endlessConsumer.exitStatus())
.describedAs("Consumer should still be running");
+ endlessConsumer.stop();
recordGenerator.assertBusinessLogic();
}
Long expected = offsetsToCheck.get(tp) + 1;
log.debug("Checking, if the offset {} for {} is at most {}", offset, tp, expected);
assertThat(offset)
- .describedAs("Committed offset corresponds to the offset of the consumer")
+ .describedAs("Committed offset must be at most equal to the offset of the consumer")
.isLessThanOrEqualTo(expected);
isOffsetBehindSeen.add(offset < expected);
});
{
try
{
- endlessConsumer.stop();
testRecordProducer.close();
offsetConsumer.close();
}