risingwave_meta/barrier/
complete_task.rs

1// Copyright 2025 RisingWave Labs
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use std::collections::HashMap;
16use std::future::{Future, pending};
17use std::mem::replace;
18use std::sync::Arc;
19
20use anyhow::Context;
21use futures::future::try_join_all;
22use prometheus::HistogramTimer;
23use risingwave_common::catalog::{DatabaseId, TableId};
24use risingwave_common::must_match;
25use risingwave_common::util::deployment::Deployment;
26use risingwave_pb::hummock::HummockVersionStats;
27use tokio::task::JoinHandle;
28
29use crate::barrier::checkpoint::CheckpointControl;
30use crate::barrier::command::CommandContext;
31use crate::barrier::context::GlobalBarrierWorkerContext;
32use crate::barrier::notifier::Notifier;
33use crate::barrier::progress::TrackingJob;
34use crate::barrier::rpc::ControlStreamManager;
35use crate::barrier::schedule::PeriodicBarriers;
36use crate::hummock::CommitEpochInfo;
37use crate::manager::MetaSrvEnv;
38use crate::rpc::metrics::GLOBAL_META_METRICS;
39use crate::{MetaError, MetaResult};
40
41pub(super) enum CompletingTask {
42    None,
43    Completing {
44        #[expect(clippy::type_complexity)]
45        /// `database_id` -> (`Some(database_graph_committed_epoch)`, [(`creating_job_id`, `creating_job_committed_epoch`)])
46        epochs_to_ack: HashMap<DatabaseId, (Option<u64>, Vec<(TableId, u64)>)>,
47
48        // The join handle of a spawned task that completes the barrier.
49        // The return value indicate whether there is some create streaming job command
50        // that has finished but not checkpointed. If there is any, we will force checkpoint on the next barrier
51        join_handle: JoinHandle<MetaResult<HummockVersionStats>>,
52    },
53    #[expect(dead_code)]
54    Err(MetaError),
55}
56
57/// Only for checkpoint barrier. For normal barrier, there won't be a task.
58#[derive(Default)]
59pub(super) struct CompleteBarrierTask {
60    pub(super) commit_info: CommitEpochInfo,
61    pub(super) finished_jobs: Vec<TrackingJob>,
62    pub(super) finished_cdc_table_backfill: Vec<TableId>,
63    pub(super) notifiers: Vec<Notifier>,
64    /// `database_id` -> (Some((`command_ctx`, `enqueue_time`)), vec!((`creating_job_id`, `epoch`)))
65    #[expect(clippy::type_complexity)]
66    pub(super) epoch_infos: HashMap<
67        DatabaseId,
68        (
69            Option<(CommandContext, HistogramTimer)>,
70            Vec<(TableId, u64)>,
71        ),
72    >,
73    /// Source IDs that have finished loading data and need `LoadFinish` commands
74    pub(super) load_finished_source_ids: Vec<u32>,
75}
76
77impl CompleteBarrierTask {
78    #[expect(clippy::type_complexity)]
79    pub(super) fn epochs_to_ack(&self) -> HashMap<DatabaseId, (Option<u64>, Vec<(TableId, u64)>)> {
80        self.epoch_infos
81            .iter()
82            .map(|(database_id, (command_context, creating_job_epochs))| {
83                (
84                    *database_id,
85                    (
86                        command_context
87                            .as_ref()
88                            .map(|(command, _)| command.barrier_info.prev_epoch.value().0),
89                        creating_job_epochs.clone(),
90                    ),
91                )
92            })
93            .collect()
94    }
95}
96
97impl CompleteBarrierTask {
98    pub(super) async fn complete_barrier(
99        self,
100        context: &impl GlobalBarrierWorkerContext,
101        env: MetaSrvEnv,
102    ) -> MetaResult<HummockVersionStats> {
103        let result: MetaResult<HummockVersionStats> = try {
104            let wait_commit_timer = GLOBAL_META_METRICS
105                .barrier_wait_commit_latency
106                .start_timer();
107            let version_stats = context.commit_epoch(self.commit_info).await?;
108
109            // Handle load finished source IDs for refreshable batch sources
110            // Spawn this asynchronously to avoid deadlock during barrier collection
111            if !self.load_finished_source_ids.is_empty() {
112                context
113                    .handle_load_finished_source_ids(self.load_finished_source_ids.clone())
114                    .await?;
115            }
116
117            for command_ctx in self
118                .epoch_infos
119                .values()
120                .flat_map(|(command, _)| command.as_ref().map(|(command, _)| command))
121            {
122                context.post_collect_command(command_ctx).await?;
123            }
124
125            wait_commit_timer.observe_duration();
126            version_stats
127        };
128
129        let version_stats = {
130            let version_stats = match result {
131                Ok(version_stats) => version_stats,
132                Err(e) => {
133                    for notifier in self.notifiers {
134                        notifier.notify_collection_failed(e.clone());
135                    }
136                    return Err(e);
137                }
138            };
139            self.notifiers.into_iter().for_each(|notifier| {
140                notifier.notify_collected();
141            });
142            try_join_all(
143                self.finished_jobs
144                    .into_iter()
145                    .map(|finished_job| context.finish_creating_job(finished_job)),
146            )
147            .await?;
148            try_join_all(
149                self.finished_cdc_table_backfill
150                    .into_iter()
151                    .map(|job_id| context.finish_cdc_table_backfill(job_id)),
152            )
153            .await?;
154            for (database_id, (command, _)) in self.epoch_infos {
155                if let Some((command_ctx, enqueue_time)) = command {
156                    let duration_sec = enqueue_time.stop_and_record();
157                    Self::report_complete_event(&env, duration_sec, &command_ctx);
158                    GLOBAL_META_METRICS
159                        .last_committed_barrier_time
160                        .with_label_values(&[database_id.database_id.to_string().as_str()])
161                        .set(command_ctx.barrier_info.curr_epoch.value().as_unix_secs() as i64);
162                }
163            }
164            version_stats
165        };
166
167        Ok(version_stats)
168    }
169}
170
171impl CompleteBarrierTask {
172    fn report_complete_event(env: &MetaSrvEnv, duration_sec: f64, command_ctx: &CommandContext) {
173        // Record barrier latency in event log.
174        use risingwave_pb::meta::event_log;
175        let event = event_log::EventBarrierComplete {
176            prev_epoch: command_ctx.barrier_info.prev_epoch(),
177            cur_epoch: command_ctx.barrier_info.curr_epoch.value().0,
178            duration_sec,
179            command: command_ctx
180                .command
181                .as_ref()
182                .map(|command| command.to_string())
183                .unwrap_or_else(|| "barrier".to_owned()),
184            barrier_kind: command_ctx.barrier_info.kind.as_str_name().to_owned(),
185        };
186        if cfg!(debug_assertions) || Deployment::current().is_ci() {
187            // Add a warning log so that debug mode / CI can observe it
188            if duration_sec > 5.0 {
189                tracing::warn!(event = ?event,"high barrier latency observed!")
190            }
191        }
192        env.event_log_manager_ref()
193            .add_event_logs(vec![event_log::Event::BarrierComplete(event)]);
194    }
195}
196
197pub(super) struct BarrierCompleteOutput {
198    #[expect(clippy::type_complexity)]
199    /// `database_id` -> (`Some(database_graph_committed_epoch)`, [(`creating_job_id`, `creating_job_committed_epoch`)])
200    pub epochs_to_ack: HashMap<DatabaseId, (Option<u64>, Vec<(TableId, u64)>)>,
201    pub hummock_version_stats: HummockVersionStats,
202}
203
204impl CompletingTask {
205    pub(super) fn next_completed_barrier<'a>(
206        &'a mut self,
207        periodic_barriers: &mut PeriodicBarriers,
208        checkpoint_control: &mut CheckpointControl,
209        control_stream_manager: &mut ControlStreamManager,
210        context: &Arc<impl GlobalBarrierWorkerContext>,
211        env: &MetaSrvEnv,
212    ) -> impl Future<Output = MetaResult<BarrierCompleteOutput>> + 'a {
213        // If there is no completing barrier, try to start completing the earliest barrier if
214        // it has been collected.
215        if let CompletingTask::None = self
216            && let Some(task) = checkpoint_control
217                .next_complete_barrier_task(Some((periodic_barriers, control_stream_manager)))
218        {
219            {
220                let epochs_to_ack = task.epochs_to_ack();
221                let context = context.clone();
222                let await_tree_reg = env.await_tree_reg().clone();
223                let env = env.clone();
224
225                let fut = async move { task.complete_barrier(&*context, env).await };
226                let fut = await_tree_reg
227                    .register_derived_root("Barrier Completion Task")
228                    .instrument(fut);
229                let join_handle = tokio::spawn(fut);
230
231                *self = CompletingTask::Completing {
232                    epochs_to_ack,
233                    join_handle,
234                };
235            }
236        }
237
238        async move {
239            if !matches!(self, CompletingTask::Completing { .. }) {
240                return pending().await;
241            };
242            self.next_completed_barrier_inner().await
243        }
244    }
245
246    #[await_tree::instrument]
247    pub(super) async fn wait_completing_task(
248        &mut self,
249    ) -> MetaResult<Option<BarrierCompleteOutput>> {
250        match self {
251            CompletingTask::None => Ok(None),
252            CompletingTask::Completing { .. } => {
253                self.next_completed_barrier_inner().await.map(Some)
254            }
255            CompletingTask::Err(_) => {
256                unreachable!("should not be called on previous err")
257            }
258        }
259    }
260
261    async fn next_completed_barrier_inner(&mut self) -> MetaResult<BarrierCompleteOutput> {
262        let CompletingTask::Completing { join_handle, .. } = self else {
263            unreachable!()
264        };
265
266        {
267            {
268                let join_result: MetaResult<_> = try {
269                    join_handle
270                        .await
271                        .context("failed to join completing command")??
272                };
273                // It's important to reset the completing_command after await no matter the result is err
274                // or not, and otherwise the join handle will be polled again after ready.
275                let next_completing_command_status = if let Err(e) = &join_result {
276                    CompletingTask::Err(e.clone())
277                } else {
278                    CompletingTask::None
279                };
280                let completed_command = replace(self, next_completing_command_status);
281                let hummock_version_stats = join_result?;
282
283                must_match!(completed_command, CompletingTask::Completing {
284                    epochs_to_ack,
285                    ..
286                } => {
287                    Ok(BarrierCompleteOutput {
288                        epochs_to_ack,
289                        hummock_version_stats,
290                    })
291                })
292            }
293        }
294    }
295}