X-Git-Url: https://juplo.de/gitweb/?a=blobdiff_plain;f=src%2Fmain%2Fjava%2Fde%2Fjuplo%2Fkafka%2Fchat%2Fbackend%2Fpersistence%2Fkafka%2FKafkaChatHomeService.java;h=a95df543813f3223895b631d2b40e334309c9c1c;hb=220a778c91468046054fac0400ba89825c46b3f5;hp=e171bc5738e29f27e73afc655aa7fce699c930f8;hpb=f7475320b20be8ba198ba914958e9e4dddf62e11;p=demos%2Fkafka%2Fchat diff --git a/src/main/java/de/juplo/kafka/chat/backend/persistence/kafka/KafkaChatHomeService.java b/src/main/java/de/juplo/kafka/chat/backend/persistence/kafka/KafkaChatHomeService.java index e171bc57..a95df543 100644 --- a/src/main/java/de/juplo/kafka/chat/backend/persistence/kafka/KafkaChatHomeService.java +++ b/src/main/java/de/juplo/kafka/chat/backend/persistence/kafka/KafkaChatHomeService.java @@ -5,37 +5,49 @@ import de.juplo.kafka.chat.backend.domain.ChatRoom; import lombok.extern.slf4j.Slf4j; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; +import org.apache.kafka.clients.producer.Producer; import org.apache.kafka.common.TopicPartition; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; +import java.time.ZoneId; import java.util.*; +import java.util.concurrent.ExecutorService; @Slf4j public class KafkaChatHomeService implements ChatHomeService, ConsumerRebalanceListener { + private final ExecutorService executorService; private final Consumer consumer; + private final Producer producer; private final String topic; + private final ZoneId zoneId; // private final long[] offsets; Erst mal immer alles neu einlesen - private final Map[] kafkaChatRoomServiceMaps; + private final ChatHomeLoader[] chatHomeLoaders; private final Map[] chatRoomMaps; public KafkaChatHomeService( + ExecutorService executorService, Consumer consumer, + Producer producer, String topic, + ZoneId zoneId, int numShards) { log.debug("Creating KafkaChatHomeService"); + this.executorService = executorService; this.consumer = consumer; + this.producer = producer; this.topic = topic; + this.zoneId = zoneId; // this.offsets = new long[numShards]; // for (int i=0; i< numShards; i++) // { // this.offsets[i] = 0l; // } - this.kafkaChatRoomServiceMaps = new Map[numShards]; + this.chatHomeLoaders = new ChatHomeLoader[numShards]; this.chatRoomMaps = new Map[numShards]; } @@ -43,16 +55,15 @@ public class KafkaChatHomeService implements ChatHomeService, ConsumerRebalanceL @Override public void onPartitionsAssigned(Collection partitions) { - consumer.endOffsets(partitions).forEach((tp, currentOffset) -> + consumer.endOffsets(partitions).forEach((topicPartition, currentOffset) -> { - if (!tp.topic().equals(topic)) + if (!topicPartition.topic().equals(topic)) { - log.warn("Ignoring partition from unwanted topic: {}", tp); + log.warn("Ignoring partition from unwanted topic: {}", topicPartition); return; } - int partition = tp.partition(); - kafkaChatRoomServiceMaps[partition] = new HashMap<>(); // TODO: reuse! Nicht immer alles neu laden + int partition = topicPartition.partition(); long unseenOffset = 0; // offsets[partition]; log.info( @@ -61,29 +72,28 @@ public class KafkaChatHomeService implements ChatHomeService, ConsumerRebalanceL unseenOffset, currentOffset); - consumer.seek(tp, unseenOffset); - chatRoomMaps[partition] - .values() - .stream() - handlers[partition] = new ChatRoomLoadingMessageHandlingStrategy(tp, currentOffset, unseenOffset); + // TODO: reuse! Nicht immer alles neu laden, sondern erst ab offsets[partition]! + consumer.seek(topicPartition, unseenOffset); + chatHomeLoaders[partition] = new ChatHomeLoader( + producer, + currentOffset, + zoneId); }); } @Override public void onPartitionsRevoked(Collection partitions) { - partitions.forEach(tp -> + partitions.forEach(topicPartition -> { - if (!tp.topic().equals(topic)) + if (!topicPartition.topic().equals(topic)) { - log.warn("Ignoring partition from unwanted topic: {}", tp); + log.warn("Ignoring partition from unwanted topic: {}", topicPartition); return; } - int partition = tp.partition(); - long unseenOffset = offsets[partition]; - - log.info("Reading partition {} from {} -> {}", partition, unseenOffset, currentOffset); + int partition = topicPartition.partition(); + // long unseenOffset = offsets[partition]; TODO: Offset merken...? }); log.info("Revoked partitions: {}", partitions); } @@ -91,45 +101,8 @@ public class KafkaChatHomeService implements ChatHomeService, ConsumerRebalanceL @Override public void onPartitionsLost(Collection partitions) { - log.info("Revoked partitions: {}", partitions); - } - - private void foo() - { - Set owned = Arrays - .stream(ownedShards) - .collect( - () -> new HashSet<>(), - (set, i) -> set.add(i), - (a, b) -> a.addAll(b)); - for (int shard = 0; shard < numShards; shard++) - { - chatRoomMaps[shard] = owned.contains(shard) - ? new HashMap<>() - : null; - } - chatroomFlux - .filter(chatRoom -> - { - if (owned.contains(chatRoom.getShard())) - { - return true; - } - else - { - log.info("Ignoring not owned chat-room {}", chatRoom); - return false; - } - }) - .toStream() - .forEach(chatroom -> chatRoomMaps[chatroom.getShard()].put(chatroom.getId(), chatroom)); - } - - @Override - public Mono putChatRoom(ChatRoom chatRoom) - { - chatRoomMaps[chatRoom.getShard()].put(chatRoom.getId(), chatRoom); - return Mono.just(chatRoom); + // TODO: Muss auf den Verlust anders reagiert werden? + onPartitionsRevoked(partitions); } @Override