risingwave_frontend/optimizer/plan_node/
convert.rs

1// Copyright 2025 RisingWave Labs
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use std::collections::HashMap;
16
17use risingwave_common::catalog::FieldDisplay;
18use risingwave_pb::stream_plan::StreamScanType;
19
20use super::*;
21use crate::optimizer::property::RequiredDist;
22
23/// `ToStream` converts a logical plan node to streaming physical node
24/// with an optional required distribution.
25///
26/// when implement this trait you can choose the two ways
27/// - Implement `to_stream` and use the default implementation of `to_stream_with_dist_required`
28/// - Or, if the required distribution is given, there will be a better plan. For example a hash
29///   join with hash-key(a,b) and the plan is required hash-distributed by (a,b,c). you can
30///   implement `to_stream_with_dist_required`, and implement `to_stream` with
31///   `to_stream_with_dist_required(RequiredDist::Any)`. you can see [`LogicalProject`] as an
32///   example.
33pub trait ToStream {
34    /// `logical_rewrite_for_stream` will rewrite the logical node, and return (`new_plan_node`,
35    /// `col_mapping`), the `col_mapping` is for original columns have been changed into some other
36    /// position.
37    ///
38    /// Now it is used to:
39    /// 1. ensure every plan node's output having pk column
40    /// 2. add `row_count`() in every Agg
41    fn logical_rewrite_for_stream(
42        &self,
43        ctx: &mut RewriteStreamContext,
44    ) -> Result<(LogicalPlanRef, ColIndexMapping)>;
45
46    /// `to_stream` is equivalent to `to_stream_with_dist_required(RequiredDist::Any)`
47    fn to_stream(&self, ctx: &mut ToStreamContext) -> Result<StreamPlanRef>;
48
49    /// convert the plan to streaming physical plan and satisfy the required distribution
50    fn to_stream_with_dist_required(
51        &self,
52        required_dist: &RequiredDist,
53        ctx: &mut ToStreamContext,
54    ) -> Result<StreamPlanRef> {
55        let ret = self.to_stream(ctx)?;
56        required_dist.streaming_enforce_if_not_satisfies(ret)
57    }
58
59    fn try_better_locality(&self, _columns: &[usize]) -> Option<LogicalPlanRef> {
60        None
61    }
62}
63
64/// Try to enforce the locality requirement on the given columns.
65/// If a better plan can be found, return the better plan.
66/// If no better plan can be found, and locality backfill is enabled, wrap the plan
67/// with `LogicalLocalityProvider`.
68/// Otherwise, return the plan as is.
69pub fn try_enforce_locality_requirement(plan: LogicalPlanRef, columns: &[usize]) -> LogicalPlanRef {
70    assert!(!columns.is_empty());
71    if let Some(better_plan) = plan.try_better_locality(columns) {
72        better_plan
73    } else if plan.ctx().session_ctx().config().enable_locality_backfill() {
74        LogicalLocalityProvider::new(plan, columns.to_owned()).into()
75    } else {
76        plan
77    }
78}
79
80pub fn stream_enforce_eowc_requirement(
81    ctx: OptimizerContextRef,
82    plan: StreamPlanRef,
83    emit_on_window_close: bool,
84) -> Result<StreamPlanRef> {
85    if emit_on_window_close && !plan.emit_on_window_close() {
86        let watermark_groups = plan.watermark_columns().grouped();
87        let n_watermark_groups = watermark_groups.len();
88        if n_watermark_groups == 0 {
89            Err(ErrorCode::NotSupported(
90                "The query cannot be executed in Emit-On-Window-Close mode.".to_owned(),
91                "Try define a watermark column in the source, or avoid aggregation without GROUP BY".to_owned(),
92            )
93            .into())
94        } else {
95            let first_watermark_group = watermark_groups.values().next().unwrap();
96            let watermark_col_idx = first_watermark_group.indices().next().unwrap();
97            if n_watermark_groups > 1 {
98                ctx.warn_to_user(format!(
99                    "There are multiple unrelated watermark columns in the query, the first one `{}` is used.",
100                    FieldDisplay(&plan.schema()[watermark_col_idx])
101                ));
102            }
103            Ok(StreamEowcSort::new(plan, watermark_col_idx).into())
104        }
105    } else {
106        Ok(plan)
107    }
108}
109
110#[derive(Debug, Clone, Default)]
111pub struct RewriteStreamContext {
112    share_rewrite_map: HashMap<PlanNodeId, (LogicalPlanRef, ColIndexMapping)>,
113}
114
115impl RewriteStreamContext {
116    pub fn add_rewrite_result(
117        &mut self,
118        plan_node_id: PlanNodeId,
119        plan_ref: LogicalPlanRef,
120        col_change: ColIndexMapping,
121    ) {
122        let prev = self
123            .share_rewrite_map
124            .insert(plan_node_id, (plan_ref, col_change));
125        assert!(prev.is_none());
126    }
127
128    pub fn get_rewrite_result(
129        &self,
130        plan_node_id: PlanNodeId,
131    ) -> Option<&(LogicalPlanRef, ColIndexMapping)> {
132        self.share_rewrite_map.get(&plan_node_id)
133    }
134}
135
136#[derive(Debug, Clone, Copy, Eq, PartialEq)]
137pub enum BackfillType {
138    UpstreamOnly,
139    Backfill,
140    ArrangementBackfill,
141    SnapshotBackfill,
142}
143
144impl BackfillType {
145    pub fn to_stream_scan_type(self) -> StreamScanType {
146        match self {
147            BackfillType::UpstreamOnly => StreamScanType::UpstreamOnly,
148            BackfillType::Backfill => StreamScanType::Backfill,
149            BackfillType::ArrangementBackfill => StreamScanType::ArrangementBackfill,
150            BackfillType::SnapshotBackfill => StreamScanType::SnapshotBackfill,
151        }
152    }
153}
154
155#[derive(Debug, Clone)]
156pub struct ToStreamContext {
157    share_to_stream_map: HashMap<PlanNodeId, StreamPlanRef>,
158    emit_on_window_close: bool,
159    backfill_type: BackfillType,
160}
161
162impl ToStreamContext {
163    pub fn new(emit_on_window_close: bool) -> Self {
164        Self::new_with_backfill_type(emit_on_window_close, BackfillType::Backfill)
165    }
166
167    pub fn new_with_backfill_type(emit_on_window_close: bool, backfill_type: BackfillType) -> Self {
168        Self {
169            share_to_stream_map: HashMap::new(),
170            emit_on_window_close,
171            backfill_type,
172        }
173    }
174
175    pub fn backfill_type(&self) -> BackfillType {
176        self.backfill_type
177    }
178
179    pub fn add_to_stream_result(&mut self, plan_node_id: PlanNodeId, plan_ref: StreamPlanRef) {
180        self.share_to_stream_map
181            .try_insert(plan_node_id, plan_ref)
182            .unwrap();
183    }
184
185    pub fn get_to_stream_result(&self, plan_node_id: PlanNodeId) -> Option<&StreamPlanRef> {
186        self.share_to_stream_map.get(&plan_node_id)
187    }
188
189    pub fn emit_on_window_close(&self) -> bool {
190        self.emit_on_window_close
191    }
192}
193
194/// `ToBatch` allows to convert a logical plan node to batch physical node
195/// with an optional required order.
196///
197/// The generated plan has single distribution and doesn't have any exchange nodes inserted.
198/// Use either [`ToLocalBatch`] or [`ToDistributedBatch`] after `ToBatch` to get a distributed plan.
199///
200/// To implement this trait you can choose one of the two ways:
201/// - Implement `to_batch` and use the default implementation of `to_batch_with_order_required`
202/// - Or, if a better plan can be generated when a required order is given, you can implement
203///   `to_batch_with_order_required`, and implement `to_batch` with
204///   `to_batch_with_order_required(&Order::any())`.
205pub trait ToBatch {
206    /// `to_batch` is equivalent to `to_batch_with_order_required(&Order::any())`
207    fn to_batch(&self) -> Result<BatchPlanRef>;
208    /// convert the plan to batch physical plan and satisfy the required Order
209    fn to_batch_with_order_required(&self, required_order: &Order) -> Result<BatchPlanRef> {
210        let ret = self.to_batch()?;
211        required_order.enforce_if_not_satisfies(ret)
212    }
213}
214
215/// Converts a batch physical plan to local plan for local execution.
216///
217/// This is quite similar to `ToBatch`, but different in several ways. For example it converts
218/// scan to exchange + scan.
219pub trait ToLocalBatch {
220    fn to_local(&self) -> Result<BatchPlanRef>;
221
222    /// Convert the plan to batch local physical plan and satisfy the required Order
223    fn to_local_with_order_required(&self, required_order: &Order) -> Result<BatchPlanRef> {
224        let ret = self.to_local()?;
225        required_order.enforce_if_not_satisfies(ret)
226    }
227}
228
229/// `ToDistributedBatch` allows to convert a batch physical plan to distributed batch plan, by
230/// insert exchange node, with an optional required order and distributed.
231///
232/// To implement this trait you can choose one of the two ways:
233/// - Implement `to_distributed` and use the default implementation of
234///   `to_distributed_with_required`
235/// - Or, if a better plan can be generated when a required order is given, you can implement
236///   `to_distributed_with_required`, and implement `to_distributed` with
237///   `to_distributed_with_required(&Order::any(), &RequiredDist::Any)`
238pub trait ToDistributedBatch {
239    /// `to_distributed` is equivalent to `to_distributed_with_required(&Order::any(),
240    /// &RequiredDist::Any)`
241    fn to_distributed(&self) -> Result<BatchPlanRef>;
242    /// insert the exchange in batch physical plan to satisfy the required Distribution and Order.
243    fn to_distributed_with_required(
244        &self,
245        required_order: &Order,
246        required_dist: &RequiredDist,
247    ) -> Result<BatchPlanRef> {
248        let ret = self.to_distributed()?;
249        let ret = required_order.enforce_if_not_satisfies(ret)?;
250        required_dist.batch_enforce_if_not_satisfies(ret, required_order)
251    }
252}