X-Git-Url: https://juplo.de/gitweb/?a=blobdiff_plain;f=src%2Fmain%2Fjava%2Fde%2Fjuplo%2Fkafka%2Fchat%2Fbackend%2Fpersistence%2Fkafka%2FKafkaChatHomeService.java;h=4460432254379ca6ca0a420db8e4d795f03aa4d7;hb=394aa7848abcfc7b4510cd2688c00fce01b3c225;hp=a95df543813f3223895b631d2b40e334309c9c1c;hpb=43b2f8b3bdac4c19035fc1c39ba9e0bddaa4a4a0;p=demos%2Fkafka%2Fchat diff --git a/src/main/java/de/juplo/kafka/chat/backend/persistence/kafka/KafkaChatHomeService.java b/src/main/java/de/juplo/kafka/chat/backend/persistence/kafka/KafkaChatHomeService.java index a95df543..44604322 100644 --- a/src/main/java/de/juplo/kafka/chat/backend/persistence/kafka/KafkaChatHomeService.java +++ b/src/main/java/de/juplo/kafka/chat/backend/persistence/kafka/KafkaChatHomeService.java @@ -2,21 +2,31 @@ package de.juplo.kafka.chat.backend.persistence.kafka; import de.juplo.kafka.chat.backend.domain.ChatHomeService; import de.juplo.kafka.chat.backend.domain.ChatRoom; +import de.juplo.kafka.chat.backend.domain.ShardNotOwnedException; import lombok.extern.slf4j.Slf4j; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.producer.Producer; import org.apache.kafka.common.TopicPartition; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; +import java.time.Duration; import java.time.ZoneId; import java.util.*; import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; @Slf4j -public class KafkaChatHomeService implements ChatHomeService, ConsumerRebalanceListener +public class KafkaChatHomeService implements ChatHomeService, Runnable, ConsumerRebalanceListener { private final ExecutorService executorService; private final Consumer consumer; @@ -24,8 +34,11 @@ public class KafkaChatHomeService implements ChatHomeService, ConsumerRebalanceL private final String topic; private final ZoneId zoneId; // private final long[] offsets; Erst mal immer alles neu einlesen - private final ChatHomeLoader[] chatHomeLoaders; + private final boolean[] isShardOwned; private final Map[] chatRoomMaps; + private final ReadWriteLock lock = new ReentrantReadWriteLock(); + + private boolean running; public KafkaChatHomeService( @@ -47,7 +60,7 @@ public class KafkaChatHomeService implements ChatHomeService, ConsumerRebalanceL // { // this.offsets[i] = 0l; // } - this.chatHomeLoaders = new ChatHomeLoader[numShards]; + this.isShardOwned = new boolean[numShards]; this.chatRoomMaps = new Map[numShards]; } @@ -55,30 +68,37 @@ public class KafkaChatHomeService implements ChatHomeService, ConsumerRebalanceL @Override public void onPartitionsAssigned(Collection partitions) { - consumer.endOffsets(partitions).forEach((topicPartition, currentOffset) -> + try { - if (!topicPartition.topic().equals(topic)) - { - log.warn("Ignoring partition from unwanted topic: {}", topicPartition); - return; - } + lock.writeLock().lock(); - int partition = topicPartition.partition(); - long unseenOffset = 0; // offsets[partition]; - - log.info( - "Loading messages from partition {}: start-offset={} -> current-offset={}", - partition, - unseenOffset, - currentOffset); - - // TODO: reuse! Nicht immer alles neu laden, sondern erst ab offsets[partition]! - consumer.seek(topicPartition, unseenOffset); - chatHomeLoaders[partition] = new ChatHomeLoader( - producer, - currentOffset, - zoneId); - }); + consumer.endOffsets(partitions).forEach((topicPartition, currentOffset) -> + { + if (!topicPartition.topic().equals(topic)) + { + log.warn("Ignoring partition from unwanted topic: {}", topicPartition); + return; + } + + int partition = topicPartition.partition(); + long unseenOffset = 0; // offsets[partition]; + + log.info( + "Loading messages from partition {}: start-offset={} -> current-offset={}", + partition, + unseenOffset, + currentOffset); + + // TODO: reuse! Nicht immer alles neu laden, sondern erst ab offsets[partition]! + consumer.seek(topicPartition, unseenOffset); + }); + + consumer.resume(partitions); + } + finally + { + lock.writeLock().unlock(); + } } @Override @@ -105,15 +125,65 @@ public class KafkaChatHomeService implements ChatHomeService, ConsumerRebalanceL onPartitionsRevoked(partitions); } + @Override + public void run() + { + consumer.subscribe(List.of(topic)); + + running = true; + + try + { + while (running) + { + ConsumerRecords records = consumer.poll(Duration.ofMinutes(5)); + log.info("Fetched {} messages", records.count()); + + for (ConsumerRecord record : records) + { + + } + } + } + } + @Override public Mono getChatRoom(int shard, UUID id) { - return Mono.justOrEmpty(chatRoomMaps[shard].get(id)); + if (lock.readLock().tryLock()) + { + try + { + return Mono.justOrEmpty(chatRoomMaps[shard].get(id)); + } + finally + { + lock.readLock().unlock(); + } + } + else + { + throw new ShardNotOwnedException(shard); + } } @Override public Flux getChatRooms(int shard) { - return Flux.fromStream(chatRoomMaps[shard].values().stream()); + if (lock.readLock().tryLock()) + { + try + { + return Flux.fromStream(chatRoomMaps[shard].values().stream()); + } + finally + { + lock.readLock().unlock(); + } + } + else + { + throw new ShardNotOwnedException(shard); + } } }