X-Git-Url: https://juplo.de/gitweb/?a=blobdiff_plain;f=src%2Fmain%2Fjava%2Fde%2Fjuplo%2Fkafka%2Fchat%2Fbackend%2Fpersistence%2Fkafka%2FKafkaChatHomeService.java;h=4460432254379ca6ca0a420db8e4d795f03aa4d7;hb=394aa7848abcfc7b4510cd2688c00fce01b3c225;hp=7ebf0493056ae19450cbb3496821cc5a9c8d6a18;hpb=f14af7787fd519cffcdcd83cd375fb7477b4ced2;p=demos%2Fkafka%2Fchat diff --git a/src/main/java/de/juplo/kafka/chat/backend/persistence/kafka/KafkaChatHomeService.java b/src/main/java/de/juplo/kafka/chat/backend/persistence/kafka/KafkaChatHomeService.java index 7ebf0493..44604322 100644 --- a/src/main/java/de/juplo/kafka/chat/backend/persistence/kafka/KafkaChatHomeService.java +++ b/src/main/java/de/juplo/kafka/chat/backend/persistence/kafka/KafkaChatHomeService.java @@ -2,31 +2,47 @@ package de.juplo.kafka.chat.backend.persistence.kafka; import de.juplo.kafka.chat.backend.domain.ChatHomeService; import de.juplo.kafka.chat.backend.domain.ChatRoom; +import de.juplo.kafka.chat.backend.domain.ShardNotOwnedException; import lombok.extern.slf4j.Slf4j; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.producer.Producer; import org.apache.kafka.common.TopicPartition; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; +import java.time.Duration; import java.time.ZoneId; import java.util.*; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; @Slf4j -public class KafkaChatHomeService implements ChatHomeService, ConsumerRebalanceListener +public class KafkaChatHomeService implements ChatHomeService, Runnable, ConsumerRebalanceListener { + private final ExecutorService executorService; private final Consumer consumer; private final Producer producer; private final String topic; private final ZoneId zoneId; // private final long[] offsets; Erst mal immer alles neu einlesen - private final ChatHomeLoader[] chatHomeLoaders; + private final boolean[] isShardOwned; private final Map[] chatRoomMaps; + private final ReadWriteLock lock = new ReentrantReadWriteLock(); + + private boolean running; public KafkaChatHomeService( + ExecutorService executorService, Consumer consumer, Producer producer, String topic, @@ -34,6 +50,7 @@ public class KafkaChatHomeService implements ChatHomeService, ConsumerRebalanceL int numShards) { log.debug("Creating KafkaChatHomeService"); + this.executorService = executorService; this.consumer = consumer; this.producer = producer; this.topic = topic; @@ -43,7 +60,7 @@ public class KafkaChatHomeService implements ChatHomeService, ConsumerRebalanceL // { // this.offsets[i] = 0l; // } - this.chatHomeLoaders = new ChatHomeLoader[numShards]; + this.isShardOwned = new boolean[numShards]; this.chatRoomMaps = new Map[numShards]; } @@ -51,47 +68,52 @@ public class KafkaChatHomeService implements ChatHomeService, ConsumerRebalanceL @Override public void onPartitionsAssigned(Collection partitions) { - consumer.endOffsets(partitions).forEach((topicPartition, currentOffset) -> + try { - if (!topicPartition.topic().equals(topic)) + lock.writeLock().lock(); + + consumer.endOffsets(partitions).forEach((topicPartition, currentOffset) -> { - log.warn("Ignoring partition from unwanted topic: {}", topicPartition); - return; - } + if (!topicPartition.topic().equals(topic)) + { + log.warn("Ignoring partition from unwanted topic: {}", topicPartition); + return; + } - int partition = topicPartition.partition(); - long unseenOffset = 0; // offsets[partition]; - - log.info( - "Loading messages from partition {}: start-offset={} -> current-offset={}", - partition, - unseenOffset, - currentOffset); - - // TODO: reuse! Nicht immer alles neu laden, sondern erst ab offsets[partition]! - consumer.seek(topicPartition, unseenOffset); - chatHomeLoaders[partition] = new ChatHomeLoader( - producer, - currentOffset, - zoneId); - }); + int partition = topicPartition.partition(); + long unseenOffset = 0; // offsets[partition]; + + log.info( + "Loading messages from partition {}: start-offset={} -> current-offset={}", + partition, + unseenOffset, + currentOffset); + + // TODO: reuse! Nicht immer alles neu laden, sondern erst ab offsets[partition]! + consumer.seek(topicPartition, unseenOffset); + }); + + consumer.resume(partitions); + } + finally + { + lock.writeLock().unlock(); + } } @Override public void onPartitionsRevoked(Collection partitions) { - partitions.forEach(tp -> + partitions.forEach(topicPartition -> { - if (!tp.topic().equals(topic)) + if (!topicPartition.topic().equals(topic)) { - log.warn("Ignoring partition from unwanted topic: {}", tp); + log.warn("Ignoring partition from unwanted topic: {}", topicPartition); return; } - int partition = tp.partition(); - long unseenOffset = offsets[partition]; - - log.info("Reading partition {} from {} -> {}", partition, unseenOffset, currentOffset); + int partition = topicPartition.partition(); + // long unseenOffset = offsets[partition]; TODO: Offset merken...? }); log.info("Revoked partitions: {}", partitions); } @@ -99,56 +121,69 @@ public class KafkaChatHomeService implements ChatHomeService, ConsumerRebalanceL @Override public void onPartitionsLost(Collection partitions) { - log.info("Revoked partitions: {}", partitions); + // TODO: Muss auf den Verlust anders reagiert werden? + onPartitionsRevoked(partitions); } - private void foo() + @Override + public void run() { - Set owned = Arrays - .stream(ownedShards) - .collect( - () -> new HashSet<>(), - (set, i) -> set.add(i), - (a, b) -> a.addAll(b)); - for (int shard = 0; shard < numShards; shard++) + consumer.subscribe(List.of(topic)); + + running = true; + + try { - chatRoomMaps[shard] = owned.contains(shard) - ? new HashMap<>() - : null; - } - chatroomFlux - .filter(chatRoom -> + while (running) { - if (owned.contains(chatRoom.getShard())) - { - return true; - } - else + ConsumerRecords records = consumer.poll(Duration.ofMinutes(5)); + log.info("Fetched {} messages", records.count()); + + for (ConsumerRecord record : records) { - log.info("Ignoring not owned chat-room {}", chatRoom); - return false; - } - }) - .toStream() - .forEach(chatroom -> chatRoomMaps[chatroom.getShard()].put(chatroom.getId(), chatroom)); - } - @Override - public Mono putChatRoom(ChatRoom chatRoom) - { - chatRoomMaps[chatRoom.getShard()].put(chatRoom.getId(), chatRoom); - return Mono.just(chatRoom); + } + } + } } @Override public Mono getChatRoom(int shard, UUID id) { - return Mono.justOrEmpty(chatRoomMaps[shard].get(id)); + if (lock.readLock().tryLock()) + { + try + { + return Mono.justOrEmpty(chatRoomMaps[shard].get(id)); + } + finally + { + lock.readLock().unlock(); + } + } + else + { + throw new ShardNotOwnedException(shard); + } } @Override public Flux getChatRooms(int shard) { - return Flux.fromStream(chatRoomMaps[shard].values().stream()); + if (lock.readLock().tryLock()) + { + try + { + return Flux.fromStream(chatRoomMaps[shard].values().stream()); + } + finally + { + lock.readLock().unlock(); + } + } + else + { + throw new ShardNotOwnedException(shard); + } } }