import de.juplo.kafka.chat.backend.domain.ChatHomeService;
import de.juplo.kafka.chat.backend.domain.ChatRoom;
+import de.juplo.kafka.chat.backend.domain.Message;
+import de.juplo.kafka.chat.backend.domain.ShardNotOwnedException;
+import de.juplo.kafka.chat.backend.persistence.KafkaLikeShardingStrategy;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.producer.Producer;
+import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.TopicPartition;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
-import java.time.ZoneId;
+import java.time.*;
import java.util.*;
import java.util.concurrent.ExecutorService;
@Slf4j
-public class KafkaChatHomeService implements ChatHomeService, ConsumerRebalanceListener
+public class KafkaChatHomeService implements ChatHomeService, Runnable, ConsumerRebalanceListener
{
private final ExecutorService executorService;
private final Consumer<String, MessageTo> consumer;
private final String topic;
private final ZoneId zoneId;
// private final long[] offsets; Erst mal immer alles neu einlesen
- private final ChatHomeLoader[] chatHomeLoaders;
+ private final boolean[] isShardOwned;
private final Map<UUID, ChatRoom>[] chatRoomMaps;
+ private final KafkaLikeShardingStrategy shardingStrategy;
+
+ private boolean running;
+ private volatile boolean loadInProgress;
public KafkaChatHomeService(
// {
// this.offsets[i] = 0l;
// }
- this.chatHomeLoaders = new ChatHomeLoader[numShards];
+ this.isShardOwned = new boolean[numShards];
this.chatRoomMaps = new Map[numShards];
+ this.shardingStrategy = new KafkaLikeShardingStrategy(numShards);
}
@Override
public void onPartitionsAssigned(Collection<TopicPartition> partitions)
{
+ loadInProgress = true;
+
consumer.endOffsets(partitions).forEach((topicPartition, currentOffset) ->
{
if (!topicPartition.topic().equals(topic))
// TODO: reuse! Nicht immer alles neu laden, sondern erst ab offsets[partition]!
consumer.seek(topicPartition, unseenOffset);
- chatHomeLoaders[partition] = new ChatHomeLoader(
- producer,
- currentOffset,
- zoneId);
});
+
+ consumer.resume(partitions);
}
@Override
onPartitionsRevoked(partitions);
}
+ @Override
+ public void run()
+ {
+ consumer.subscribe(List.of(topic));
+
+ running = true;
+
+ try
+ {
+ while (running)
+ {
+ ConsumerRecords<String, MessageTo> records = consumer.poll(Duration.ofMinutes(5));
+ log.info("Fetched {} messages", records.count());
+
+ if (loadInProgress)
+ {
+ for (ConsumerRecord<String, MessageTo> record : records)
+ {
+ UUID chatRoomId = UUID.fromString(record.key());
+ MessageTo messageTo = record.value();
+ ChatRoom chatRoom = chatRoomMaps[record.partition()].get(chatRoomId);
+ KafkaChatRoomService kafkaChatRoomService =
+ (KafkaChatRoomService) chatRoom.getChatRoomService();
+ Message.MessageKey key = Message.MessageKey.of(messageTo.getUser(), messageTo.getId());
+ Instant instant = Instant.ofEpochSecond(record.timestamp());
+ LocalDateTime timestamp = LocalDateTime.ofInstant(instant, zoneId);
+ Message message = new Message(key, record.offset(), timestamp, messageTo.getText());
+ kafkaChatRoomService.persistMessage(message);
+ }
+ }
+ else
+ {
+ if (!records.isEmpty())
+ {
+ throw new IllegalStateException("All owned partions should be paused, when no load is in progress!");
+ }
+ }
+ }
+ }
+ }
+
+ Mono<Message> sendMessage(
+ UUID chatRoomId,
+ Message.MessageKey key,
+ LocalDateTime timestamp,
+ String text)
+ {
+ int shard = this.shardingStrategy.selectShard(chatRoomId);
+ TopicPartition tp = new TopicPartition(topic, shard);
+ ZonedDateTime zdt = ZonedDateTime.of(timestamp, zoneId);
+ return Mono.create(sink ->
+ {
+ ProducerRecord<String, MessageTo> record =
+ new ProducerRecord<>(
+ tp.topic(),
+ tp.partition(),
+ zdt.toEpochSecond(),
+ chatRoomId.toString(),
+ MessageTo.of(key.getUsername(), key.getMessageId(), text));
+
+ producer.send(record, ((metadata, exception) ->
+ {
+ if (metadata != null)
+ {
+ // On successful send
+ Message message = new Message(key, metadata.offset(), timestamp, text);
+ log.info("Successfully send message {}", message);
+ sink.success(message);
+ }
+ else
+ {
+ // On send-failure
+ log.error(
+ "Could not send message for chat-room={}, key={}, timestamp={}, text={}: {}",
+ chatRoomId,
+ key,
+ timestamp,
+ text,
+ exception);
+ sink.error(exception);
+ }
+ }));
+ });
+ }
+
+
@Override
public Mono<ChatRoom> getChatRoom(int shard, UUID id)
{
- return Mono.justOrEmpty(chatRoomMaps[shard].get(id));
+ if (loadInProgress)
+ {
+ throw new ShardNotOwnedException(shard);
+ }
+ else
+ {
+ return Mono.justOrEmpty(chatRoomMaps[shard].get(id));
+ }
}
@Override
public Flux<ChatRoom> getChatRooms(int shard)
{
- return Flux.fromStream(chatRoomMaps[shard].values().stream());
+ if (loadInProgress)
+ {
+ throw new ShardNotOwnedException(shard);
+ }
+ else
+ {
+ return Flux.fromStream(chatRoomMaps[shard].values().stream());
+ }
}
}