package de.juplo.kafka.chat.backend.implementation.kafka;
-import de.juplo.kafka.chat.backend.domain.*;
-import de.juplo.kafka.chat.backend.domain.exceptions.LoadInProgressException;
+import de.juplo.kafka.chat.backend.domain.ChatRoomData;
+import de.juplo.kafka.chat.backend.domain.ChatRoomInfo;
+import de.juplo.kafka.chat.backend.domain.Message;
+import de.juplo.kafka.chat.backend.domain.ShardingPublisherStrategy;
import de.juplo.kafka.chat.backend.domain.exceptions.ShardNotOwnedException;
import de.juplo.kafka.chat.backend.implementation.kafka.messages.AbstractMessageTo;
import de.juplo.kafka.chat.backend.implementation.kafka.messages.data.EventChatMessageReceivedTo;
import lombok.Getter;
+import lombok.ToString;
import lombok.extern.slf4j.Slf4j;
-import org.apache.kafka.clients.consumer.Consumer;
-import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
-import org.apache.kafka.clients.consumer.ConsumerRecord;
-import org.apache.kafka.clients.consumer.ConsumerRecords;
+import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.TopicPartition;
import reactor.core.publisher.Mono;
import java.time.*;
-import java.util.*;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.UUID;
import java.util.stream.IntStream;
+@ToString(of = { "topic", "instanceId" })
@Slf4j
-public class DataChannel implements Runnable, ConsumerRebalanceListener
+public class DataChannel implements Channel, ConsumerRebalanceListener
{
private final String instanceId;
private final String topic;
private final Consumer<String, AbstractMessageTo> consumer;
private final ZoneId zoneId;
private final int numShards;
- private final int bufferSize;
+ private final Duration pollingInterval;
+ private final int historyLimit;
private final Clock clock;
private final boolean[] isShardOwned;
private final long[] currentOffset;
private final long[] nextOffset;
private final Map<UUID, ChatRoomData>[] chatRoomData;
- private final InfoChannel infoChannel;
+ private final ChannelMediator channelMediator;
private final ShardingPublisherStrategy shardingPublisherStrategy;
private boolean running;
@Getter
- private volatile boolean loadInProgress;
+ private volatile ChannelState channelState = ChannelState.STARTING;
public DataChannel(
Consumer<String, AbstractMessageTo> dataChannelConsumer,
ZoneId zoneId,
int numShards,
- int bufferSize,
+ Duration pollingInterval,
+ int historyLimit,
Clock clock,
- InfoChannel infoChannel,
+ ChannelMediator channelMediator,
ShardingPublisherStrategy shardingPublisherStrategy)
{
log.debug(
this.producer = producer;
this.zoneId = zoneId;
this.numShards = numShards;
- this.bufferSize = bufferSize;
+ this.pollingInterval = pollingInterval;
+ this.historyLimit = historyLimit;
this.clock = clock;
this.isShardOwned = new boolean[numShards];
this.currentOffset = new long[numShards];
IntStream
.range(0, numShards)
.forEach(shard -> this.chatRoomData[shard] = new HashMap<>());
- this.infoChannel = infoChannel;
+ this.channelMediator = channelMediator;
this.shardingPublisherStrategy = shardingPublisherStrategy;
}
public void onPartitionsAssigned(Collection<TopicPartition> partitions)
{
log.info("Newly assigned partitions! Pausing normal operations...");
- loadInProgress = true;
+ channelState = ChannelState.LOAD_IN_PROGRESS;
consumer.endOffsets(partitions).forEach((topicPartition, currentOffset) ->
{
currentOffset);
consumer.seek(topicPartition, nextOffset[partition]);
- infoChannel.sendShardAssignedEvent(partition);
+ channelMediator.shardAssigned(partition);
shardingPublisherStrategy
.publishOwnership(partition)
.doOnSuccess(instanceId -> log.info(
"Could not publish instance {} as owner of shard {}: {}",
instanceId,
partition,
- throwable))
+ throwable.toString()))
+ .onErrorComplete()
.block();
});
{
int partition = topicPartition.partition();
isShardOwned[partition] = false;
+ nextOffset[partition] = consumer.position(topicPartition);
+
log.info("Partition revoked: {} - next={}", partition, nextOffset[partition]);
- infoChannel.sendShardRevokedEvent(partition);
+
+ chatRoomData[partition]
+ .values()
+ .forEach(chatRoomData -> chatRoomData.deactivate());
+
+ channelMediator.shardRevoked(partition);
});
}
{
try
{
- ConsumerRecords<String, AbstractMessageTo> records = consumer.poll(Duration.ofMinutes(1));
+ ConsumerRecords<String, AbstractMessageTo> records = consumer.poll(pollingInterval);
log.info("Fetched {} messages", records.count());
- if (loadInProgress)
+ switch (channelState)
{
- loadChatRoomData(records);
-
- if (isLoadingCompleted())
+ case LOAD_IN_PROGRESS ->
{
- log.info("Loading of messages completed! Pausing all owned partitions...");
- pauseAllOwnedPartions();
- log.info("Resuming normal operations...");
- loadInProgress = false;
+ loadChatRoomData(records);
+
+ if (isLoadingCompleted())
+ {
+ log.info("Loading of messages completed! Pausing all owned partitions...");
+ pauseAllOwnedPartions();
+ activateAllOwnedChatRooms();
+ log.info("Resuming normal operations...");
+ channelState = ChannelState.READY;
+ }
}
- }
- else
- {
- if (!records.isEmpty())
+ case SHUTTING_DOWN -> log.info("Shutdown in progress: ignoring {} fetched messages.", records.count());
+ default ->
{
- throw new IllegalStateException("All owned partitions should be paused, when no load is in progress!");
+ if (!records.isEmpty())
+ {
+ throw new IllegalStateException("All owned partitions should be paused, when in state " + channelState);
+ }
}
}
}
catch (WakeupException e)
{
log.info("Received WakeupException, exiting!");
+ channelState = ChannelState.SHUTTING_DOWN;
running = false;
}
}
Message.MessageKey key = Message.MessageKey.of(chatMessageTo.getUser(), chatMessageTo.getId());
Message message = new Message(key, offset, timestamp, chatMessageTo.getText());
- ChatRoomData chatRoomData = this
- .chatRoomData[partition]
- .computeIfAbsent(chatRoomId, this::computeChatRoomData);
+ ChatRoomData chatRoomData = computeChatRoomData(chatRoomId, partition);
KafkaChatMessageService kafkaChatRoomService =
(KafkaChatMessageService) chatRoomData.getChatRoomService();
.toList());
}
+ private void activateAllOwnedChatRooms()
+ {
+ IntStream
+ .range(0, numShards)
+ .filter(shard -> isShardOwned[shard])
+ .forEach(shard -> chatRoomData[shard]
+ .values()
+ .forEach(chatRoomData -> chatRoomData.activate()));
+ }
+
int[] getOwnedShards()
{
.toArray();
}
+ void createChatRoomData(ChatRoomInfo chatRoomInfo)
+ {
+ computeChatRoomData(
+ chatRoomInfo.getId(),
+ chatRoomInfo.getShard());
+ }
+
Mono<ChatRoomData> getChatRoomData(int shard, UUID id)
{
- if (loadInProgress)
+ ChannelState capturedState = channelState;
+ if (capturedState != ChannelState.READY)
{
- return Mono.error(new LoadInProgressException());
+ return Mono.error(new ChannelNotReadyException(capturedState));
}
if (!isShardOwned[shard])
return Mono.error(new ShardNotOwnedException(instanceId, shard));
}
- return infoChannel
- .getChatRoomInfo(id)
- .map(chatRoomInfo ->
- chatRoomData[shard].computeIfAbsent(id, this::computeChatRoomData));
+ return Mono.justOrEmpty(chatRoomData[shard].get(id));
+ }
+
+ private ChatRoomData computeChatRoomData(UUID chatRoomId, int shard)
+ {
+ ChatRoomData chatRoomData = this.chatRoomData[shard].get(chatRoomId);
+
+ if (chatRoomData != null)
+ {
+ log.info(
+ "Ignoring request to create already existing ChatRoomData for {}",
+ chatRoomId);
+ }
+ else
+ {
+ log.info("Creating ChatRoomData {} with history-limit {}", chatRoomId, historyLimit);
+ KafkaChatMessageService service = new KafkaChatMessageService(this, chatRoomId);
+ chatRoomData = new ChatRoomData(clock, service, historyLimit);
+ this.chatRoomData[shard].put(chatRoomId, chatRoomData);
+ }
+
+ return chatRoomData;
}
- private ChatRoomData computeChatRoomData(UUID chatRoomId)
+ ConsumerGroupMetadata getConsumerGroupMetadata()
{
- log.info("Creating ChatRoom {} with buffer-size {}", chatRoomId, bufferSize);
- KafkaChatMessageService service = new KafkaChatMessageService(this, chatRoomId);
- return new ChatRoomData(clock, service, bufferSize);
+ return consumer.groupMetadata();
}
}