docker-compose stop adder-1
until [ $(http --check-status :8092/results/peter 2> /dev/null) ]; do echo "Waiting for some results for peter to show up on adder-2..."; sleep 1; done
-until [ $(http --check-status :8092/results/klaus 2> /dev/null) ]; do echo "Waiting for some results for peter to show up on adder-2..."; sleep 1; done
+until [ $(http --check-status :8092/results/klaus 2> /dev/null) ]; do echo "Waiting for some results for klaus to show up on adder-2..."; sleep 1; done
echo "Resultate für adder-2"
http -v --pretty none -S :8092/results
echo "Resultate für klaus von adder-2"
http :8092/results/klaus | jq .[].sum | uniq
+docker-compose kill -s 9 adder-2
+docker-compose start adder-1
docker-compose kill -s 9 peter klaus
+while ! [[ $(http 0:8091/actuator/health 2> /dev/null) =~ "UP" ]]; do echo "Waiting for adder-1..."; sleep 1; done
+until [ $(http --check-status :8091/results/peter 2> /dev/null) ]; do echo "Waiting for some results for peter to show up on adder-1..."; sleep 1; done
+until [ $(http --check-status :8091/results/klaus 2> /dev/null) ]; do echo "Waiting for some results for klaus to show up on adder-1..."; sleep 1; done
+
+echo "Resultate für adder-1"
+http -v --pretty none -S :8091/results
+echo
+
+echo "Resultate für peter von adder-1"
+http :8091/results/peter | jq .[].sum | uniq
+echo "Resultate für klaus von adder-1"
+http :8091/results/klaus | jq .[].sum | uniq
+
+sleep 5
+
+echo "Resultate für peter von adder-1"
+http :8091/results/peter | jq .[].sum | uniq
+echo "Resultate für klaus von adder-1"
+http :8091/results/klaus | jq .[].sum | uniq
import org.springframework.boot.autoconfigure.SpringBootApplication;
import javax.annotation.PreDestroy;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.TimeUnit;
@SpringBootApplication
{
@Autowired
EndlessConsumer endlessConsumer;
- @Autowired
- ExecutorService executor;
@Override
{
log.error("Unexpected exception while stopping EndlessConsumer: {}", e);
}
-
- try
- {
- log.info("Shutting down the ExecutorService.");
- executor.shutdown();
- log.info("Waiting 5 seconds for the ExecutorService to terminate...");
- executor.awaitTermination(5, TimeUnit.SECONDS);
- }
- catch (InterruptedException e)
- {
- log.error("Exception while waiting for the termination of the ExecutorService: {}", e);
- }
- finally
- {
- if (!executor.isTerminated())
- {
- log.warn("Forcing shutdown of ExecutorService!");
- executor
- .shutdownNow()
- .forEach(runnable -> log.warn("Unprocessed task: {}", runnable.getClass().getSimpleName()));
- }
- log.info("Shutdow of ExecutorService finished");
- }
}
package de.juplo.kafka;
-import org.apache.kafka.clients.consumer.Consumer;
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import java.util.Optional;
-import org.springframework.kafka.core.ConsumerFactory;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
+import org.springframework.kafka.config.KafkaListenerEndpointRegistry;
@Configuration
kafkaProperties.getClientId());
}
+ @Bean
+ public ApplicationErrorHandler applicationErrorHandler()
+ {
+ return new ApplicationErrorHandler();
+ }
+
@Bean
public EndlessConsumer<String, Message> endlessConsumer(
- Consumer<String, Message> kafkaConsumer,
- ExecutorService executor,
- ApplicationRebalanceListener rebalanceListener,
RecordHandler recordHandler,
+ ApplicationErrorHandler errorHandler,
KafkaProperties kafkaProperties,
- ApplicationProperties applicationProperties)
+ KafkaListenerEndpointRegistry endpointRegistry)
{
return
new EndlessConsumer<>(
- executor,
kafkaProperties.getClientId(),
- applicationProperties.getTopic(),
- kafkaConsumer,
- rebalanceListener,
+ endpointRegistry,
+ errorHandler,
recordHandler);
}
-
- @Bean
- public ExecutorService executor()
- {
- return Executors.newSingleThreadExecutor();
- }
-
- @Bean(destroyMethod = "close")
- public Consumer<String, Message> kafkaConsumer(ConsumerFactory<String, Message> factory)
- {
- return factory.createConsumer();
- }
}
--- /dev/null
+package de.juplo.kafka;
+
+import lombok.extern.slf4j.Slf4j;
+import org.apache.kafka.clients.consumer.Consumer;
+import org.apache.kafka.clients.consumer.ConsumerRecords;
+import org.springframework.kafka.listener.CommonErrorHandler;
+import org.springframework.kafka.listener.MessageListenerContainer;
+import org.springframework.util.Assert;
+
+import java.util.Optional;
+
+
+@Slf4j
+public class ApplicationErrorHandler implements CommonErrorHandler
+{
+ private Exception exception;
+ private boolean ack = true;
+
+
+ @Override
+ public void handleOtherException(
+ Exception thrownException,
+ Consumer<?, ?> consumer,
+ MessageListenerContainer container,
+ boolean batchListener)
+ {
+ Assert.isTrue(batchListener, getClass().getName() + " is only applicable for Batch-Listeners");
+ rememberExceptionAndStopContainer(thrownException, container);
+ }
+
+ @Override
+ public void handleBatch(
+ Exception thrownException,
+ ConsumerRecords<?, ?> data,
+ Consumer<?, ?> consumer,
+ MessageListenerContainer container,
+ Runnable invokeListener)
+ {
+ // Do not commit the polled offsets on a logic-error
+ ack = false;
+ rememberExceptionAndStopContainer(thrownException, container);
+ }
+
+ private void rememberExceptionAndStopContainer(
+ Exception exception,
+ MessageListenerContainer container)
+ {
+ log.error("{}, stopping container {} abnormally", exception, container);
+ this.exception = exception;
+ container.stopAbnormally(() -> log.info("{} is stopped", container));
+ }
+
+ @Override
+ public boolean isAckAfterHandle()
+ {
+ return ack;
+ }
+
+
+ public Optional<Exception> getException()
+ {
+ return Optional.ofNullable(exception);
+ }
+
+ public void clearState()
+ {
+ this.exception = null;
+ this.ack = true;
+ }
+}
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
import org.apache.kafka.common.TopicPartition;
+import org.springframework.kafka.listener.ConsumerAwareRebalanceListener;
import java.util.*;
@RequiredArgsConstructor
@Slf4j
-public class ApplicationRebalanceListener implements ConsumerRebalanceListener
+public class ApplicationRebalanceListener implements ConsumerAwareRebalanceListener
{
private final ApplicationRecordHandler recordHandler;
private final AdderResults adderResults;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
-import org.apache.kafka.clients.consumer.*;
-import org.apache.kafka.common.TopicPartition;
-import org.apache.kafka.common.errors.RecordDeserializationException;
-import org.apache.kafka.common.errors.WakeupException;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.springframework.kafka.annotation.KafkaListener;
+import org.springframework.kafka.config.KafkaListenerEndpointRegistry;
-import javax.annotation.PreDestroy;
-import java.time.Duration;
-import java.util.*;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.locks.Condition;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
+import java.util.List;
+import java.util.Optional;
-@Slf4j
@RequiredArgsConstructor
-public class EndlessConsumer<K, V> implements Runnable
+@Slf4j
+public class EndlessConsumer<K, V>
{
- private final ExecutorService executor;
private final String id;
- private final String topic;
- private final Consumer<K, V> consumer;
- private final ConsumerRebalanceListener rebalanceListener;
+ private final KafkaListenerEndpointRegistry registry;
+ private final ApplicationErrorHandler errorHandler;
private final RecordHandler<K, V> recordHandler;
- private final Lock lock = new ReentrantLock();
- private final Condition condition = lock.newCondition();
- private boolean running = false;
- private Exception exception;
private long consumed = 0;
-
- @Override
- public void run()
+ @KafkaListener(
+ id = "${spring.kafka.client-id}",
+ idIsGroup = false,
+ topics = "${sumup.adder.topic}",
+ batch = "true",
+ autoStartup = "false")
+ public void accept(List<ConsumerRecord<K, V>> records)
{
- try
- {
- log.info("{} - Subscribing to topic {}", id, topic);
- consumer.subscribe(Arrays.asList(topic), rebalanceListener);
-
- while (true)
- {
- ConsumerRecords<K, V> records =
- consumer.poll(Duration.ofSeconds(1));
-
// Do something with the data...
- log.info("{} - Received {} messages", id, records.count());
+ log.info("{} - Received {} messages", id, records.size());
for (ConsumerRecord<K, V> record : records)
{
log.info(
consumed++;
}
- }
- }
- catch(WakeupException e)
- {
- log.info("{} - RIIING! Request to stop consumption - commiting current offsets!", id);
- consumer.commitSync();
- shutdown();
- }
- catch(RecordDeserializationException e)
- {
- TopicPartition tp = e.topicPartition();
- long offset = e.offset();
- log.error(
- "{} - Could not deserialize message on topic {} with offset={}: {}",
- id,
- tp,
- offset,
- e.getCause().toString());
-
- consumer.commitSync();
- shutdown(e);
- }
- catch(Exception e)
- {
- log.error("{} - Unexpected error: {}", id, e.toString(), e);
- shutdown(e);
- }
- finally
- {
- log.info("{} - Consumer-Thread exiting", id);
- }
- }
-
- private void shutdown()
- {
- shutdown(null);
- }
-
- private void shutdown(Exception e)
- {
- lock.lock();
- try
- {
- try
- {
- log.info("{} - Unsubscribing from topic {}", id, topic);
- consumer.unsubscribe();
- }
- catch (Exception ue)
- {
- log.error(
- "{} - Error while unsubscribing from topic {}: {}",
- id,
- topic,
- ue.toString());
- }
- finally
- {
- running = false;
- exception = e;
- condition.signal();
- }
- }
- finally
- {
- lock.unlock();
- }
}
public void start()
{
- lock.lock();
- try
- {
- if (running)
- throw new IllegalStateException("Consumer instance " + id + " is already running!");
-
- log.info("{} - Starting - consumed {} messages before", id, consumed);
- running = true;
- exception = null;
- executor.submit(this);
- }
- finally
- {
- lock.unlock();
- }
- }
+ if (running())
+ throw new IllegalStateException("Consumer instance " + id + " is already running!");
- public synchronized void stop() throws InterruptedException
- {
- lock.lock();
- try
- {
- if (!running)
- throw new IllegalStateException("Consumer instance " + id + " is not running!");
-
- log.info("{} - Stopping", id);
- consumer.wakeup();
- condition.await();
- log.info("{} - Stopped - consumed {} messages so far", id, consumed);
- }
- finally
- {
- lock.unlock();
- }
+ log.info("{} - Starting - consumed {} messages before", id, consumed);
+ errorHandler.clearState();
+ registry.getListenerContainer(id).start();
}
- @PreDestroy
- public void destroy() throws ExecutionException, InterruptedException
+ public void stop()
{
- log.info("{} - Destroy!", id);
- log.info("{}: Consumed {} messages in total, exiting!", id, consumed);
+ if (!running())
+ throw new IllegalStateException("Consumer instance " + id + " is not running!");
+
+ log.info("{} - Stopping", id);
+ registry.getListenerContainer(id).stop();
+ log.info("{} - Stopped - consumed {} messages so far", id, consumed);
}
public boolean running()
{
- lock.lock();
- try
- {
- return running;
- }
- finally
- {
- lock.unlock();
- }
+ return registry.getListenerContainer(id).isRunning();
}
public Optional<Exception> exitStatus()
{
- lock.lock();
- try
- {
- if (running)
- throw new IllegalStateException("No exit-status available: Consumer instance " + id + " is running!");
-
- return Optional.ofNullable(exception);
- }
- finally
- {
- lock.unlock();
- }
+ if (running())
+ throw new IllegalStateException("No exit-status available: Consumer instance " + id + " is running!");
+
+ return errorHandler.getException();
}
}
import com.mongodb.client.MongoClient;
import lombok.extern.slf4j.Slf4j;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.junit.jupiter.api.*;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
-import org.springframework.boot.autoconfigure.kafka.KafkaAutoConfiguration;
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
import org.springframework.boot.autoconfigure.mongo.MongoProperties;
import org.springframework.boot.test.autoconfigure.data.mongo.AutoConfigureDataMongo;
import org.springframework.boot.test.context.TestConfiguration;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Import;
+import org.springframework.kafka.config.KafkaListenerEndpointRegistry;
+import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.test.context.EmbeddedKafka;
import org.springframework.test.context.TestPropertySource;
import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
import static org.awaitility.Awaitility.*;
-@SpringJUnitConfig(
- initializers = ConfigDataApplicationContextInitializer.class,
- classes = {
- KafkaAutoConfiguration.class,
- ApplicationTests.Configuration.class })
+@SpringJUnitConfig(initializers = ConfigDataApplicationContextInitializer.class)
@TestPropertySource(
properties = {
"spring.kafka.bootstrap-servers=${spring.embedded.kafka.brokers}",
@Autowired
MongoProperties mongoProperties;
@Autowired
+ KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry;
+ @Autowired
TestRecordHandler<K, V> recordHandler;
@Autowired
EndlessConsumer<K, V> endlessConsumer;
{
return new TestRecordHandler(applicationRecordHandler);
}
+
+ @Bean(destroyMethod = "close")
+ public org.apache.kafka.clients.consumer.Consumer<String, Message> kafkaConsumer(ConsumerFactory<String, Message> factory)
+ {
+ return factory.createConsumer();
+ }
}
}