X-Git-Url: https://juplo.de/gitweb/?a=blobdiff_plain;f=src%2Fmain%2Fjava%2Fde%2Fjuplo%2Fkafka%2Fchat%2Fbackend%2Fpersistence%2Fkafka%2FKafkaChatHomeService.java;h=3ca5b7f7436f33d9476fec5be4f899a4f4244ee9;hb=ead1db4a7daa0bcf944063f3920b3974c2f62e07;hp=5133d1a68203713f970a5106a73b67de5d1f17e7;hpb=035668bee4f02c4c70f43826026b40f81e3dd672;p=demos%2Fkafka%2Fchat diff --git a/src/main/java/de/juplo/kafka/chat/backend/persistence/kafka/KafkaChatHomeService.java b/src/main/java/de/juplo/kafka/chat/backend/persistence/kafka/KafkaChatHomeService.java index 5133d1a6..3ca5b7f7 100644 --- a/src/main/java/de/juplo/kafka/chat/backend/persistence/kafka/KafkaChatHomeService.java +++ b/src/main/java/de/juplo/kafka/chat/backend/persistence/kafka/KafkaChatHomeService.java @@ -13,12 +13,15 @@ import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.producer.Producer; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.errors.RecordDeserializationException; +import org.apache.kafka.common.errors.WakeupException; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; import java.time.*; import java.util.*; import java.util.concurrent.ExecutorService; +import java.util.stream.IntStream; @Slf4j @@ -29,8 +32,10 @@ public class KafkaChatHomeService implements ChatHomeService, Runnable, Consumer private final Producer producer; private final String topic; private final ZoneId zoneId; - // private final long[] offsets; Erst mal immer alles neu einlesen + private final int numShards; private final boolean[] isShardOwned; + private final long[] currentOffset; + private final long[] nextOffset; private final Map[] chatRoomMaps; private final KafkaLikeShardingStrategy shardingStrategy; @@ -52,12 +57,10 @@ public class KafkaChatHomeService implements ChatHomeService, Runnable, Consumer this.producer = producer; this.topic = topic; this.zoneId = zoneId; - // this.offsets = new long[numShards]; - // for (int i=0; i< numShards; i++) - // { - // this.offsets[i] = 0l; - // } + this.numShards = numShards; this.isShardOwned = new boolean[numShards]; + this.currentOffset = new long[numShards]; + this.nextOffset = new long[numShards]; this.chatRoomMaps = new Map[numShards]; this.shardingStrategy = new KafkaLikeShardingStrategy(numShards); } @@ -66,27 +69,22 @@ public class KafkaChatHomeService implements ChatHomeService, Runnable, Consumer @Override public void onPartitionsAssigned(Collection partitions) { + log.info("Newly assigned partitions! Pausing normal operations..."); loadInProgress = true; consumer.endOffsets(partitions).forEach((topicPartition, currentOffset) -> { - if (!topicPartition.topic().equals(topic)) - { - log.warn("Ignoring partition from unwanted topic: {}", topicPartition); - return; - } - int partition = topicPartition.partition(); - long unseenOffset = 0; // offsets[partition]; + isShardOwned[partition] = true; + this.currentOffset[partition] = currentOffset; log.info( - "Loading messages from partition {}: start-offset={} -> current-offset={}", + "Partition assigned: {} - loading messages: next={} -> current={}", partition, - unseenOffset, + nextOffset[partition], currentOffset); - // TODO: reuse! Nicht immer alles neu laden, sondern erst ab offsets[partition]! - consumer.seek(topicPartition, unseenOffset); + consumer.seek(topicPartition, nextOffset[partition]); }); consumer.resume(partitions); @@ -97,21 +95,16 @@ public class KafkaChatHomeService implements ChatHomeService, Runnable, Consumer { partitions.forEach(topicPartition -> { - if (!topicPartition.topic().equals(topic)) - { - log.warn("Ignoring partition from unwanted topic: {}", topicPartition); - return; - } - int partition = topicPartition.partition(); - // long unseenOffset = offsets[partition]; TODO: Offset merken...? + isShardOwned[partition] = false; + log.info("Partition revoked: {} - next={}", partition, nextOffset[partition]); }); - log.info("Revoked partitions: {}", partitions); } @Override public void onPartitionsLost(Collection partitions) { + log.warn("Lost partitions: {}, partitions"); // TODO: Muss auf den Verlust anders reagiert werden? onPartitionsRevoked(partitions); } @@ -123,38 +116,86 @@ public class KafkaChatHomeService implements ChatHomeService, Runnable, Consumer running = true; - try + while (running) { - while (running) + try { ConsumerRecords records = consumer.poll(Duration.ofMinutes(5)); log.info("Fetched {} messages", records.count()); if (loadInProgress) { - for (ConsumerRecord record : records) + loadMessages(records); + + if (isLoadingCompleted()) { - UUID chatRoomId = UUID.fromString(record.key()); - MessageTo messageTo = record.value(); - ChatRoom chatRoom = chatRoomMaps[record.partition()].get(chatRoomId); - Mono result = chatRoom.addMessage( - messageTo.getId(), - messageTo.getUser(), - messageTo.getText()); - result.block(). + log.info("Loading of messages completed! Pausing all owned partitions..."); + pauseAllOwnedPartions(); + log.info("Resuming normal operations..."); + loadInProgress = false; } } else { if (!records.isEmpty()) { - throw new IllegalStateException("All owned partions should be paused, when no load is in progress!"); + throw new IllegalStateException("All owned partitions should be paused, when no load is in progress!"); } } } + catch (WakeupException e) + { + } + catch (RecordDeserializationException e) + { + } } } + void loadMessages(ConsumerRecords records) + { + for (ConsumerRecord record : records) + { + nextOffset[record.partition()] = record.offset() + 1; + UUID chatRoomId = UUID.fromString(record.key()); + MessageTo messageTo = record.value(); + + Message.MessageKey key = Message.MessageKey.of(messageTo.getUser(), messageTo.getId()); + + Instant instant = Instant.ofEpochSecond(record.timestamp()); + LocalDateTime timestamp = LocalDateTime.ofInstant(instant, zoneId); + + Message message = new Message(key, record.offset(), timestamp, messageTo.getText()); + + ChatRoom chatRoom = chatRoomMaps[record.partition()].get(chatRoomId); + KafkaChatRoomService kafkaChatRoomService = + (KafkaChatRoomService) chatRoom.getChatRoomService(); + + kafkaChatRoomService.persistMessage(message); + } + } + + boolean isLoadingCompleted() + { + return IntStream + .range(0, numShards) + .filter(shard -> isShardOwned[shard]) + .mapToObj(shard -> nextOffset[shard] >= currentOffset[shard]) + .collect( + () -> Boolean.TRUE, + (acc, v) -> Boolean.valueOf(acc && v), + (a, b) -> Boolean.valueOf(a && b)); + } + + void pauseAllOwnedPartions() + { + consumer.pause(IntStream + .range(0, numShards) + .filter(shard -> isShardOwned[shard]) + .mapToObj(shard -> new TopicPartition(topic, shard)) + .toList()); + } + Mono sendMessage( UUID chatRoomId, Message.MessageKey key,