- this.executorService = executorService;
- this.consumer = consumer;
- this.producer = producer;
- this.topic = topic;
- this.zoneId = zoneId;
- // this.offsets = new long[numShards];
- // for (int i=0; i< numShards; i++)
- // {
- // this.offsets[i] = 0l;
- // }
- this.isShardOwned = new boolean[numShards];
- this.chatRoomMaps = new Map[numShards];
- this.shardingStrategy = new KafkaLikeShardingStrategy(numShards);
- }
-
-
- @Override
- public void onPartitionsAssigned(Collection<TopicPartition> partitions)
- {
- loadInProgress = true;
-
- consumer.endOffsets(partitions).forEach((topicPartition, currentOffset) ->
- {
- if (!topicPartition.topic().equals(topic))
- {
- log.warn("Ignoring partition from unwanted topic: {}", topicPartition);
- return;
- }
-
- int partition = topicPartition.partition();
- long unseenOffset = 0; // offsets[partition];
-
- log.info(
- "Loading messages from partition {}: start-offset={} -> current-offset={}",
- partition,
- unseenOffset,
- currentOffset);
-
- // TODO: reuse! Nicht immer alles neu laden, sondern erst ab offsets[partition]!
- consumer.seek(topicPartition, unseenOffset);
- });
-
- consumer.resume(partitions);
- }
-
- @Override
- public void onPartitionsRevoked(Collection<TopicPartition> partitions)
- {
- partitions.forEach(topicPartition ->
- {
- if (!topicPartition.topic().equals(topic))
- {
- log.warn("Ignoring partition from unwanted topic: {}", topicPartition);
- return;
- }
-
- int partition = topicPartition.partition();
- // long unseenOffset = offsets[partition]; TODO: Offset merken...?
- });
- log.info("Revoked partitions: {}", partitions);
- }
-
- @Override
- public void onPartitionsLost(Collection<TopicPartition> partitions)
- {
- // TODO: Muss auf den Verlust anders reagiert werden?
- onPartitionsRevoked(partitions);
- }
-
- @Override
- public void run()
- {
- consumer.subscribe(List.of(topic));
-
- running = true;
-
- try
- {
- while (running)
- {
- ConsumerRecords<String, MessageTo> records = consumer.poll(Duration.ofMinutes(5));
- log.info("Fetched {} messages", records.count());
-
- if (loadInProgress)
- {
- for (ConsumerRecord<String, MessageTo> record : records)
- {
- UUID chatRoomId = UUID.fromString(record.key());
- MessageTo messageTo = record.value();
- ChatRoom chatRoom = chatRoomMaps[record.partition()].get(chatRoomId);
- KafkaChatRoomService kafkaChatRoomService =
- (KafkaChatRoomService) chatRoom.getChatRoomService();
- Message.MessageKey key = Message.MessageKey.of(messageTo.getUser(), messageTo.getId());
- Instant instant = Instant.ofEpochSecond(record.timestamp());
- LocalDateTime timestamp = LocalDateTime.ofInstant(instant, zoneId);
- Message message = new Message(key, record.offset(), timestamp, messageTo.getText());
- kafkaChatRoomService.persistMessage(message);
- }
- }
- else
- {
- if (!records.isEmpty())
- {
- throw new IllegalStateException("All owned partions should be paused, when no load is in progress!");
- }
- }
- }
- }
- }
-
- Mono<Message> sendMessage(
- UUID chatRoomId,
- Message.MessageKey key,
- LocalDateTime timestamp,
- String text)
- {
- int shard = this.shardingStrategy.selectShard(chatRoomId);
- TopicPartition tp = new TopicPartition(topic, shard);
- ZonedDateTime zdt = ZonedDateTime.of(timestamp, zoneId);
- return Mono.create(sink ->
- {
- ProducerRecord<String, MessageTo> record =
- new ProducerRecord<>(
- tp.topic(),
- tp.partition(),
- zdt.toEpochSecond(),
- chatRoomId.toString(),
- MessageTo.of(key.getUsername(), key.getMessageId(), text));
-
- producer.send(record, ((metadata, exception) ->
- {
- if (metadata != null)
- {
- // On successful send
- Message message = new Message(key, metadata.offset(), timestamp, text);
- log.info("Successfully send message {}", message);
- sink.success(message);
- }
- else
- {
- // On send-failure
- log.error(
- "Could not send message for chat-room={}, key={}, timestamp={}, text={}: {}",
- chatRoomId,
- key,
- timestamp,
- text,
- exception);
- sink.error(exception);
- }
- }));
- });