refactor: Added logging of loaded messages
[demos/kafka/chat] / src / main / java / de / juplo / kafka / chat / backend / implementation / kafka / DataChannel.java
1 package de.juplo.kafka.chat.backend.implementation.kafka;
2
3 import de.juplo.kafka.chat.backend.domain.*;
4 import de.juplo.kafka.chat.backend.domain.exceptions.LoadInProgressException;
5 import de.juplo.kafka.chat.backend.domain.exceptions.ShardNotOwnedException;
6 import de.juplo.kafka.chat.backend.implementation.kafka.messages.AbstractMessageTo;
7 import de.juplo.kafka.chat.backend.implementation.kafka.messages.data.EventChatMessageReceivedTo;
8 import lombok.Getter;
9 import lombok.extern.slf4j.Slf4j;
10 import org.apache.kafka.clients.consumer.Consumer;
11 import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
12 import org.apache.kafka.clients.consumer.ConsumerRecord;
13 import org.apache.kafka.clients.consumer.ConsumerRecords;
14 import org.apache.kafka.clients.producer.Producer;
15 import org.apache.kafka.clients.producer.ProducerRecord;
16 import org.apache.kafka.common.TopicPartition;
17 import org.apache.kafka.common.errors.WakeupException;
18 import reactor.core.publisher.Mono;
19
20 import java.time.*;
21 import java.util.*;
22 import java.util.stream.IntStream;
23
24
25 @Slf4j
26 public class DataChannel implements Runnable, ConsumerRebalanceListener
27 {
28   private final String instanceId;
29   private final String topic;
30   private final Producer<String, AbstractMessageTo> producer;
31   private final Consumer<String, AbstractMessageTo> consumer;
32   private final ZoneId zoneId;
33   private final int numShards;
34   private final int bufferSize;
35   private final Clock clock;
36   private final boolean[] isShardOwned;
37   private final long[] currentOffset;
38   private final long[] nextOffset;
39   private final Map<UUID, ChatRoomData>[] chatRoomData;
40   private final InfoChannel infoChannel;
41   private final ShardingPublisherStrategy shardingPublisherStrategy;
42
43   private boolean running;
44   @Getter
45   private volatile boolean loadInProgress;
46
47
48   public DataChannel(
49     String instanceId,
50     String topic,
51     Producer<String, AbstractMessageTo> producer,
52     Consumer<String, AbstractMessageTo> dataChannelConsumer,
53     ZoneId zoneId,
54     int numShards,
55     int bufferSize,
56     Clock clock,
57     InfoChannel infoChannel,
58     ShardingPublisherStrategy shardingPublisherStrategy)
59   {
60     log.debug(
61         "{}: Creating DataChannel for topic {} with {} partitions",
62         instanceId,
63         topic,
64         numShards);
65     this.instanceId = instanceId;
66     this.topic = topic;
67     this.consumer = dataChannelConsumer;
68     this.producer = producer;
69     this.zoneId = zoneId;
70     this.numShards = numShards;
71     this.bufferSize = bufferSize;
72     this.clock = clock;
73     this.isShardOwned = new boolean[numShards];
74     this.currentOffset = new long[numShards];
75     this.nextOffset = new long[numShards];
76     this.chatRoomData = new Map[numShards];
77     IntStream
78         .range(0, numShards)
79         .forEach(shard -> this.chatRoomData[shard] = new HashMap<>());
80     this.infoChannel = infoChannel;
81     this.shardingPublisherStrategy = shardingPublisherStrategy;
82   }
83
84
85
86   Mono<Message> sendChatMessage(
87       UUID chatRoomId,
88       Message.MessageKey key,
89       LocalDateTime timestamp,
90       String text)
91   {
92     ZonedDateTime zdt = ZonedDateTime.of(timestamp, zoneId);
93     return Mono.create(sink ->
94     {
95       ProducerRecord<String, AbstractMessageTo> record =
96           new ProducerRecord<>(
97               topic,
98               null,
99               zdt.toEpochSecond(),
100               chatRoomId.toString(),
101               EventChatMessageReceivedTo.of(key.getUsername(), key.getMessageId(), text));
102
103       producer.send(record, ((metadata, exception) ->
104       {
105         if (exception == null)
106         {
107           // On successful send
108           Message message = new Message(key, metadata.offset(), timestamp, text);
109           log.info("Successfully send message {}", message);
110           sink.success(message);
111         }
112         else
113         {
114           // On send-failure
115           log.error(
116               "Could not send message for chat-room={}, key={}, timestamp={}, text={}: {}",
117               chatRoomId,
118               key,
119               timestamp,
120               text,
121               exception);
122           sink.error(exception);
123         }
124       }));
125     });
126   }
127
128   @Override
129   public void onPartitionsAssigned(Collection<TopicPartition> partitions)
130   {
131     log.info("Newly assigned partitions! Pausing normal operations...");
132     loadInProgress = true;
133
134     consumer.endOffsets(partitions).forEach((topicPartition, currentOffset) ->
135     {
136       int partition = topicPartition.partition();
137       isShardOwned[partition] =  true;
138       this.currentOffset[partition] = currentOffset;
139
140       log.info(
141           "Partition assigned: {} - loading messages: next={} -> current={}",
142           partition,
143           nextOffset[partition],
144           currentOffset);
145
146       consumer.seek(topicPartition, nextOffset[partition]);
147       infoChannel.sendShardAssignedEvent(partition);
148       shardingPublisherStrategy
149           .publishOwnership(partition)
150           .doOnSuccess(instanceId -> log.info(
151               "Successfully published instance {} as owner of shard {}",
152               instanceId,
153               partition))
154           .doOnError(throwable -> log.error(
155               "Could not publish instance {} as owner of shard {}: {}",
156               instanceId,
157               partition,
158               throwable))
159           .block();
160     });
161
162     consumer.resume(partitions);
163   }
164
165   @Override
166   public void onPartitionsRevoked(Collection<TopicPartition> partitions)
167   {
168     partitions.forEach(topicPartition ->
169     {
170       int partition = topicPartition.partition();
171       isShardOwned[partition] = false;
172       log.info("Partition revoked: {} - next={}", partition, nextOffset[partition]);
173       infoChannel.sendShardRevokedEvent(partition);
174     });
175   }
176
177   @Override
178   public void onPartitionsLost(Collection<TopicPartition> partitions)
179   {
180     log.warn("Lost partitions: {}, partitions");
181     // TODO: Muss auf den Verlust anders reagiert werden?
182     onPartitionsRevoked(partitions);
183   }
184
185   @Override
186   public void run()
187   {
188     running = true;
189
190     while (running)
191     {
192       try
193       {
194         ConsumerRecords<String, AbstractMessageTo> records = consumer.poll(Duration.ofMinutes(1));
195         log.info("Fetched {} messages", records.count());
196
197         if (loadInProgress)
198         {
199           loadChatRoomData(records);
200
201           if (isLoadingCompleted())
202           {
203             log.info("Loading of messages completed! Pausing all owned partitions...");
204             pauseAllOwnedPartions();
205             log.info("Resuming normal operations...");
206             loadInProgress = false;
207           }
208         }
209         else
210         {
211           if (!records.isEmpty())
212           {
213             throw new IllegalStateException("All owned partitions should be paused, when no load is in progress!");
214           }
215         }
216       }
217       catch (WakeupException e)
218       {
219         log.info("Received WakeupException, exiting!");
220         running = false;
221       }
222     }
223
224     log.info("Exiting normally");
225   }
226
227   private void loadChatRoomData(ConsumerRecords<String, AbstractMessageTo> records)
228   {
229     for (ConsumerRecord<String, AbstractMessageTo> record : records)
230     {
231       UUID chatRoomId = UUID.fromString(record.key());
232
233       switch (record.value().getType())
234       {
235         case EVENT_CHATMESSAGE_RECEIVED:
236           Instant instant = Instant.ofEpochSecond(record.timestamp());
237           LocalDateTime timestamp = LocalDateTime.ofInstant(instant, zoneId);
238           loadChatMessage(
239               chatRoomId,
240               timestamp,
241               record.offset(),
242               (EventChatMessageReceivedTo) record.value(),
243               record.partition());
244           break;
245
246         default:
247           log.debug(
248               "Ignoring message for chat-room {} with offset {}: {}",
249               chatRoomId,
250               record.offset(),
251               record.value());
252       }
253
254       nextOffset[record.partition()] = record.offset() + 1;
255     }
256   }
257
258   private void loadChatMessage(
259       UUID chatRoomId,
260       LocalDateTime timestamp,
261       long offset,
262       EventChatMessageReceivedTo chatMessageTo,
263       int partition)
264   {
265     Message.MessageKey key = Message.MessageKey.of(chatMessageTo.getUser(), chatMessageTo.getId());
266     Message message = new Message(key, offset, timestamp, chatMessageTo.getText());
267
268     ChatRoomData chatRoomData = this
269         .chatRoomData[partition]
270         .computeIfAbsent(chatRoomId, this::computeChatRoomData);
271     KafkaChatMessageService kafkaChatRoomService =
272         (KafkaChatMessageService) chatRoomData.getChatRoomService();
273
274     log.debug(
275         "Loaded message from partition={} at offset={}: {}",
276         partition,
277         offset,
278         message);
279     kafkaChatRoomService.persistMessage(message);
280   }
281
282   private boolean isLoadingCompleted()
283   {
284     return IntStream
285         .range(0, numShards)
286         .filter(shard -> isShardOwned[shard])
287         .allMatch(shard -> nextOffset[shard] >= currentOffset[shard]);
288   }
289
290   private void pauseAllOwnedPartions()
291   {
292     consumer.pause(IntStream
293         .range(0, numShards)
294         .filter(shard -> isShardOwned[shard])
295         .mapToObj(shard -> new TopicPartition(topic, shard))
296         .toList());
297   }
298
299
300   int[] getOwnedShards()
301   {
302     return IntStream
303         .range(0, numShards)
304         .filter(shard -> isShardOwned[shard])
305         .toArray();
306   }
307
308   Mono<ChatRoomData> getChatRoomData(int shard, UUID id)
309   {
310     if (loadInProgress)
311     {
312       return Mono.error(new LoadInProgressException());
313     }
314
315     if (!isShardOwned[shard])
316     {
317       return Mono.error(new ShardNotOwnedException(instanceId, shard));
318     }
319
320     return infoChannel
321         .getChatRoomInfo(id)
322         .map(chatRoomInfo ->
323             chatRoomData[shard].computeIfAbsent(id, this::computeChatRoomData));
324   }
325
326   private ChatRoomData computeChatRoomData(UUID chatRoomId)
327   {
328     log.info("Creating ChatRoom {} with buffer-size {}", chatRoomId, bufferSize);
329     KafkaChatMessageService service = new KafkaChatMessageService(this, chatRoomId);
330     return new ChatRoomData(clock, service, bufferSize);
331   }
332 }