import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.serialization.ByteArraySerializer;
+import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
-import org.springframework.kafka.core.ConsumerFactory;
-import org.springframework.kafka.core.KafkaOperations;
+import org.springframework.kafka.core.*;
import org.springframework.kafka.listener.DeadLetterPublishingRecoverer;
import org.springframework.kafka.listener.DefaultErrorHandler;
+import org.springframework.kafka.support.serializer.DelegatingByTypeSerializer;
+import org.springframework.kafka.support.serializer.JsonSerializer;
import org.springframework.util.backoff.FixedBackOff;
+import java.util.Map;
import java.util.function.Consumer;
};
}
+ @Bean
+ public ProducerFactory<String, Object> producerFactory(KafkaProperties properties) {
+ return new DefaultKafkaProducerFactory<>(
+ properties.getProducer().buildProperties(),
+ new StringSerializer(),
+ new DelegatingByTypeSerializer(Map.of(
+ byte[].class, new ByteArraySerializer(),
+ ClientMessage.class, new JsonSerializer<>())));
+ }
+
+ @Bean
+ public KafkaTemplate<String, Object> kafkaTemplate(
+ ProducerFactory<String, Object> producerFactory) {
+
+ return new KafkaTemplate<>(producerFactory);
+ }
+
@Bean
public DeadLetterPublishingRecoverer recoverer(
ApplicationProperties properties,
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.TopicPartition;
-import org.apache.kafka.common.errors.RecordDeserializationException;
import org.apache.kafka.common.serialization.*;
import org.apache.kafka.common.utils.Bytes;
import org.junit.jupiter.api.*;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Import;
import org.springframework.context.annotation.Primary;
-import org.springframework.kafka.listener.MessageListenerContainer;
import org.springframework.kafka.support.serializer.JsonSerializer;
import org.springframework.kafka.test.context.EmbeddedKafka;
import org.springframework.test.context.TestPropertySource;
EndlessConsumer.class,
KafkaAutoConfiguration.class,
ApplicationTests.Configuration.class })
-@TestMethodOrder(MethodOrderer.OrderAnnotation.class)
@TestPropertySource(
properties = {
"spring.kafka.consumer.bootstrap-servers=${spring.embedded.kafka.brokers}",
/** Tests methods */
@Test
- @Order(1) // << The poistion pill is not skipped. Hence, this test must run first
void commitsCurrentOffsetsOnSuccess() throws ExecutionException, InterruptedException
{
send100Messages((key, counter) -> serialize(key, counter));
}
@Test
- @Order(2)
- void commitsCurrentOffsetsOnError()
+ void commitsCurrentOffsetsOnDeserializationError()
{
send100Messages((key, counter) ->
counter == 77
.isTrue();
}
+ @Test
+ void commitsOffsetOnProgramLogicErrorFoo()
+ {
+ recordHandler.testHandler = (record) ->
+ {
+ if (Integer.parseInt(record.value().message)%10 ==0)
+ throw new RuntimeException("BOOM: " + record.value().message + "%10 == 0");
+ };
+
+ send100Messages((key, counter) -> serialize(key, counter));
+
+ await("80 records received")
+ .atMost(Duration.ofSeconds(30))
+ .until(() -> receivedRecords.size() == 100);
+
+ await("Offsets committed")
+ .atMost(Duration.ofSeconds(10))
+ .untilAsserted(() ->
+ {
+ checkSeenOffsetsForProgress();
+ compareToCommitedOffsets(newOffsets);
+ });
+
+ assertThat(endlessConsumer.isRunning())
+ .describedAs("Consumer should still be running")
+ .isTrue();
+ }
+
/** Helper methods for the verification of expectations */