refactor: Added success- and failure-callbacks for `ChatHomeService`
[demos/kafka/chat] / src / main / java / de / juplo / kafka / chat / backend / implementation / kafka / DataChannel.java
1 package de.juplo.kafka.chat.backend.implementation.kafka;
2
3 import de.juplo.kafka.chat.backend.domain.*;
4 import de.juplo.kafka.chat.backend.domain.exceptions.LoadInProgressException;
5 import de.juplo.kafka.chat.backend.domain.exceptions.ShardNotOwnedException;
6 import de.juplo.kafka.chat.backend.implementation.kafka.messages.AbstractMessageTo;
7 import de.juplo.kafka.chat.backend.implementation.kafka.messages.data.EventChatMessageReceivedTo;
8 import lombok.Getter;
9 import lombok.extern.slf4j.Slf4j;
10 import org.apache.kafka.clients.consumer.*;
11 import org.apache.kafka.clients.producer.Producer;
12 import org.apache.kafka.clients.producer.ProducerRecord;
13 import org.apache.kafka.common.TopicPartition;
14 import org.apache.kafka.common.errors.WakeupException;
15 import reactor.core.publisher.Mono;
16
17 import java.time.*;
18 import java.util.*;
19 import java.util.stream.IntStream;
20
21
22 @Slf4j
23 public class DataChannel implements Runnable, ConsumerRebalanceListener
24 {
25   private final String instanceId;
26   private final String topic;
27   private final Producer<String, AbstractMessageTo> producer;
28   private final Consumer<String, AbstractMessageTo> consumer;
29   private final ZoneId zoneId;
30   private final int numShards;
31   private final int bufferSize;
32   private final Clock clock;
33   private final boolean[] isShardOwned;
34   private final long[] currentOffset;
35   private final long[] nextOffset;
36   private final Map<UUID, ChatRoomData>[] chatRoomData;
37   private final InfoChannel infoChannel;
38   private final ShardingPublisherStrategy shardingPublisherStrategy;
39
40   private boolean running;
41   @Getter
42   private volatile boolean loadInProgress;
43
44
45   public DataChannel(
46     String instanceId,
47     String topic,
48     Producer<String, AbstractMessageTo> producer,
49     Consumer<String, AbstractMessageTo> dataChannelConsumer,
50     ZoneId zoneId,
51     int numShards,
52     int bufferSize,
53     Clock clock,
54     InfoChannel infoChannel,
55     ShardingPublisherStrategy shardingPublisherStrategy)
56   {
57     log.debug(
58         "{}: Creating DataChannel for topic {} with {} partitions",
59         instanceId,
60         topic,
61         numShards);
62     this.instanceId = instanceId;
63     this.topic = topic;
64     this.consumer = dataChannelConsumer;
65     this.producer = producer;
66     this.zoneId = zoneId;
67     this.numShards = numShards;
68     this.bufferSize = bufferSize;
69     this.clock = clock;
70     this.isShardOwned = new boolean[numShards];
71     this.currentOffset = new long[numShards];
72     this.nextOffset = new long[numShards];
73     this.chatRoomData = new Map[numShards];
74     IntStream
75         .range(0, numShards)
76         .forEach(shard -> this.chatRoomData[shard] = new HashMap<>());
77     this.infoChannel = infoChannel;
78     this.shardingPublisherStrategy = shardingPublisherStrategy;
79   }
80
81
82
83   Mono<Message> sendChatMessage(
84       UUID chatRoomId,
85       Message.MessageKey key,
86       LocalDateTime timestamp,
87       String text)
88   {
89     ZonedDateTime zdt = ZonedDateTime.of(timestamp, zoneId);
90     return Mono.create(sink ->
91     {
92       ProducerRecord<String, AbstractMessageTo> record =
93           new ProducerRecord<>(
94               topic,
95               null,
96               zdt.toEpochSecond(),
97               chatRoomId.toString(),
98               EventChatMessageReceivedTo.of(key.getUsername(), key.getMessageId(), text));
99
100       producer.send(record, ((metadata, exception) ->
101       {
102         if (exception == null)
103         {
104           // On successful send
105           Message message = new Message(key, metadata.offset(), timestamp, text);
106           log.info("Successfully send message {}", message);
107           sink.success(message);
108         }
109         else
110         {
111           // On send-failure
112           log.error(
113               "Could not send message for chat-room={}, key={}, timestamp={}, text={}: {}",
114               chatRoomId,
115               key,
116               timestamp,
117               text,
118               exception);
119           sink.error(exception);
120         }
121       }));
122     });
123   }
124
125   @Override
126   public void onPartitionsAssigned(Collection<TopicPartition> partitions)
127   {
128     log.info("Newly assigned partitions! Pausing normal operations...");
129     loadInProgress = true;
130
131     consumer.endOffsets(partitions).forEach((topicPartition, currentOffset) ->
132     {
133       int partition = topicPartition.partition();
134       isShardOwned[partition] =  true;
135       this.currentOffset[partition] = currentOffset;
136
137       log.info(
138           "Partition assigned: {} - loading messages: next={} -> current={}",
139           partition,
140           nextOffset[partition],
141           currentOffset);
142
143       consumer.seek(topicPartition, nextOffset[partition]);
144       infoChannel.sendShardAssignedEvent(partition);
145       shardingPublisherStrategy
146           .publishOwnership(partition)
147           .doOnSuccess(instanceId -> log.info(
148               "Successfully published instance {} as owner of shard {}",
149               instanceId,
150               partition))
151           .doOnError(throwable -> log.error(
152               "Could not publish instance {} as owner of shard {}: {}",
153               instanceId,
154               partition,
155               throwable))
156           .block();
157     });
158
159     consumer.resume(partitions);
160   }
161
162   @Override
163   public void onPartitionsRevoked(Collection<TopicPartition> partitions)
164   {
165     partitions.forEach(topicPartition ->
166     {
167       int partition = topicPartition.partition();
168       isShardOwned[partition] = false;
169       nextOffset[partition] = consumer.position(topicPartition);
170       log.info("Partition revoked: {} - next={}", partition, nextOffset[partition]);
171       infoChannel.sendShardRevokedEvent(partition);
172     });
173   }
174
175   @Override
176   public void onPartitionsLost(Collection<TopicPartition> partitions)
177   {
178     log.warn("Lost partitions: {}, partitions");
179     // TODO: Muss auf den Verlust anders reagiert werden?
180     onPartitionsRevoked(partitions);
181   }
182
183   @Override
184   public void run()
185   {
186     running = true;
187
188     while (running)
189     {
190       try
191       {
192         ConsumerRecords<String, AbstractMessageTo> records = consumer.poll(Duration.ofMinutes(1));
193         log.info("Fetched {} messages", records.count());
194
195         if (loadInProgress)
196         {
197           loadChatRoomData(records);
198
199           if (isLoadingCompleted())
200           {
201             log.info("Loading of messages completed! Pausing all owned partitions...");
202             pauseAllOwnedPartions();
203             log.info("Resuming normal operations...");
204             loadInProgress = false;
205           }
206         }
207         else
208         {
209           if (!records.isEmpty())
210           {
211             throw new IllegalStateException("All owned partitions should be paused, when no load is in progress!");
212           }
213         }
214       }
215       catch (WakeupException e)
216       {
217         log.info("Received WakeupException, exiting!");
218         running = false;
219       }
220     }
221
222     log.info("Exiting normally");
223   }
224
225   private void loadChatRoomData(ConsumerRecords<String, AbstractMessageTo> records)
226   {
227     for (ConsumerRecord<String, AbstractMessageTo> record : records)
228     {
229       UUID chatRoomId = UUID.fromString(record.key());
230
231       switch (record.value().getType())
232       {
233         case EVENT_CHATMESSAGE_RECEIVED:
234           Instant instant = Instant.ofEpochSecond(record.timestamp());
235           LocalDateTime timestamp = LocalDateTime.ofInstant(instant, zoneId);
236           loadChatMessage(
237               chatRoomId,
238               timestamp,
239               record.offset(),
240               (EventChatMessageReceivedTo) record.value(),
241               record.partition());
242           break;
243
244         default:
245           log.debug(
246               "Ignoring message for chat-room {} with offset {}: {}",
247               chatRoomId,
248               record.offset(),
249               record.value());
250       }
251
252       nextOffset[record.partition()] = record.offset() + 1;
253     }
254   }
255
256   private void loadChatMessage(
257       UUID chatRoomId,
258       LocalDateTime timestamp,
259       long offset,
260       EventChatMessageReceivedTo chatMessageTo,
261       int partition)
262   {
263     Message.MessageKey key = Message.MessageKey.of(chatMessageTo.getUser(), chatMessageTo.getId());
264     Message message = new Message(key, offset, timestamp, chatMessageTo.getText());
265
266     ChatRoomData chatRoomData = this
267         .chatRoomData[partition]
268         .computeIfAbsent(chatRoomId, this::computeChatRoomData);
269     KafkaChatMessageService kafkaChatRoomService =
270         (KafkaChatMessageService) chatRoomData.getChatRoomService();
271
272     log.debug(
273         "Loaded message from partition={} at offset={}: {}",
274         partition,
275         offset,
276         message);
277     kafkaChatRoomService.persistMessage(message);
278   }
279
280   private boolean isLoadingCompleted()
281   {
282     return IntStream
283         .range(0, numShards)
284         .filter(shard -> isShardOwned[shard])
285         .allMatch(shard ->
286         {
287           TopicPartition partition = new TopicPartition(topic, shard);
288           long position = consumer.position(partition);
289           return position >= currentOffset[shard];
290         });
291   }
292
293   private void pauseAllOwnedPartions()
294   {
295     consumer.pause(IntStream
296         .range(0, numShards)
297         .filter(shard -> isShardOwned[shard])
298         .mapToObj(shard -> new TopicPartition(topic, shard))
299         .toList());
300   }
301
302
303   int[] getOwnedShards()
304   {
305     return IntStream
306         .range(0, numShards)
307         .filter(shard -> isShardOwned[shard])
308         .toArray();
309   }
310
311   Mono<ChatRoomData> getChatRoomData(int shard, UUID id)
312   {
313     if (loadInProgress)
314     {
315       return Mono.error(new LoadInProgressException());
316     }
317
318     if (!isShardOwned[shard])
319     {
320       return Mono.error(new ShardNotOwnedException(instanceId, shard));
321     }
322
323     return infoChannel
324         .getChatRoomInfo(id)
325         .map(chatRoomInfo ->
326             chatRoomData[shard].computeIfAbsent(id, this::computeChatRoomData));
327   }
328
329   private ChatRoomData computeChatRoomData(UUID chatRoomId)
330   {
331     log.info("Creating ChatRoom {} with buffer-size {}", chatRoomId, bufferSize);
332     KafkaChatMessageService service = new KafkaChatMessageService(this, chatRoomId);
333     return new ChatRoomData(clock, service, bufferSize);
334   }
335
336   ConsumerGroupMetadata getConsumerGroupMetadata()
337   {
338     return consumer.groupMetadata();
339   }
340 }