risingwave_frontend/optimizer/plan_node/
convert.rs

1// Copyright 2022 RisingWave Labs
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use std::collections::HashMap;
16
17use risingwave_common::catalog::FieldDisplay;
18use risingwave_pb::stream_plan::StreamScanType;
19
20use super::*;
21use crate::optimizer::property::RequiredDist;
22
23/// `ToStream` converts a logical plan node to streaming physical node
24/// with an optional required distribution.
25///
26/// when implement this trait you can choose the two ways
27/// - Implement `to_stream` and use the default implementation of `to_stream_with_dist_required`
28/// - Or, if the required distribution is given, there will be a better plan. For example a hash
29///   join with hash-key(a,b) and the plan is required hash-distributed by (a,b,c). you can
30///   implement `to_stream_with_dist_required`, and implement `to_stream` with
31///   `to_stream_with_dist_required(RequiredDist::Any)`. you can see [`LogicalProject`] as an
32///   example.
33pub trait ToStream {
34    /// `logical_rewrite_for_stream` will rewrite the logical node, and return (`new_plan_node`,
35    /// `col_mapping`), the `col_mapping` is for original columns have been changed into some other
36    /// position.
37    ///
38    /// Now it is used to:
39    /// 1. ensure every plan node's output having pk column
40    /// 2. add `row_count`() in every Agg
41    fn logical_rewrite_for_stream(
42        &self,
43        ctx: &mut RewriteStreamContext,
44    ) -> Result<(LogicalPlanRef, ColIndexMapping)>;
45
46    /// `to_stream` is equivalent to `to_stream_with_dist_required(RequiredDist::Any)`
47    fn to_stream(&self, ctx: &mut ToStreamContext) -> Result<StreamPlanRef>;
48
49    /// convert the plan to streaming physical plan and satisfy the required distribution
50    fn to_stream_with_dist_required(
51        &self,
52        required_dist: &RequiredDist,
53        ctx: &mut ToStreamContext,
54    ) -> Result<StreamPlanRef> {
55        let ret = self.to_stream(ctx)?;
56        required_dist.streaming_enforce_if_not_satisfies(ret)
57    }
58
59    fn try_better_locality(&self, _columns: &[usize]) -> Option<LogicalPlanRef> {
60        None
61    }
62}
63
64/// Try to enforce the locality requirement on the given columns.
65/// If a better plan can be found, return the better plan.
66/// If no better plan can be found, and locality backfill is enabled, wrap the plan
67/// with `LogicalLocalityProvider`.
68/// Otherwise, return the plan as is.
69pub fn try_enforce_locality_requirement(plan: LogicalPlanRef, columns: &[usize]) -> LogicalPlanRef {
70    assert!(!columns.is_empty());
71    if let Some(better_plan) = plan.try_better_locality(columns) {
72        better_plan
73    } else if plan.ctx().session_ctx().config().enable_locality_backfill() {
74        LogicalLocalityProvider::new(plan, columns.to_owned()).into()
75    } else {
76        // TODO: remove this when locality backfill is enabled by default
77        plan.ctx().inc_missed_locality_providers();
78        plan
79    }
80}
81
82pub fn stream_enforce_eowc_requirement(
83    ctx: OptimizerContextRef,
84    plan: StreamPlanRef,
85    emit_on_window_close: bool,
86) -> Result<StreamPlanRef> {
87    if emit_on_window_close && !plan.emit_on_window_close() {
88        let watermark_groups = plan.watermark_columns().grouped();
89        let n_watermark_groups = watermark_groups.len();
90        if n_watermark_groups == 0 {
91            Err(ErrorCode::NotSupported(
92                "The query cannot be executed in Emit-On-Window-Close mode.".to_owned(),
93                "Try define a watermark column in the source, or avoid aggregation without GROUP BY".to_owned(),
94            )
95            .into())
96        } else {
97            let first_watermark_group = watermark_groups.values().next().unwrap();
98            let watermark_col_idx = first_watermark_group.indices().next().unwrap();
99            if n_watermark_groups > 1 {
100                ctx.warn_to_user(format!(
101                    "There are multiple unrelated watermark columns in the query, the first one `{}` is used.",
102                    FieldDisplay(&plan.schema()[watermark_col_idx])
103                ));
104            }
105            Ok(StreamEowcSort::new(plan, watermark_col_idx).into())
106        }
107    } else {
108        Ok(plan)
109    }
110}
111
112#[derive(Debug, Clone, Default)]
113pub struct RewriteStreamContext {
114    share_rewrite_map: HashMap<PlanNodeId, (LogicalPlanRef, ColIndexMapping)>,
115}
116
117impl RewriteStreamContext {
118    pub fn add_rewrite_result(
119        &mut self,
120        plan_node_id: PlanNodeId,
121        plan_ref: LogicalPlanRef,
122        col_change: ColIndexMapping,
123    ) {
124        let prev = self
125            .share_rewrite_map
126            .insert(plan_node_id, (plan_ref, col_change));
127        assert!(prev.is_none());
128    }
129
130    pub fn get_rewrite_result(
131        &self,
132        plan_node_id: PlanNodeId,
133    ) -> Option<&(LogicalPlanRef, ColIndexMapping)> {
134        self.share_rewrite_map.get(&plan_node_id)
135    }
136}
137
138#[derive(Debug, Clone, Copy, Eq, PartialEq)]
139pub enum BackfillType {
140    UpstreamOnly,
141    Backfill,
142    ArrangementBackfill,
143    SnapshotBackfill,
144}
145
146impl BackfillType {
147    pub fn to_stream_scan_type(self) -> StreamScanType {
148        match self {
149            BackfillType::UpstreamOnly => StreamScanType::UpstreamOnly,
150            BackfillType::Backfill => StreamScanType::Backfill,
151            BackfillType::ArrangementBackfill => StreamScanType::ArrangementBackfill,
152            BackfillType::SnapshotBackfill => StreamScanType::SnapshotBackfill,
153        }
154    }
155}
156
157#[derive(Debug, Clone)]
158pub struct ToStreamContext {
159    share_to_stream_map: HashMap<PlanNodeId, StreamPlanRef>,
160    emit_on_window_close: bool,
161    backfill_type: BackfillType,
162}
163
164impl ToStreamContext {
165    pub fn new(emit_on_window_close: bool) -> Self {
166        Self::new_with_backfill_type(emit_on_window_close, BackfillType::Backfill)
167    }
168
169    pub fn new_with_backfill_type(emit_on_window_close: bool, backfill_type: BackfillType) -> Self {
170        Self {
171            share_to_stream_map: HashMap::new(),
172            emit_on_window_close,
173            backfill_type,
174        }
175    }
176
177    pub fn backfill_type(&self) -> BackfillType {
178        self.backfill_type
179    }
180
181    pub fn add_to_stream_result(&mut self, plan_node_id: PlanNodeId, plan_ref: StreamPlanRef) {
182        self.share_to_stream_map
183            .try_insert(plan_node_id, plan_ref)
184            .unwrap();
185    }
186
187    pub fn get_to_stream_result(&self, plan_node_id: PlanNodeId) -> Option<&StreamPlanRef> {
188        self.share_to_stream_map.get(&plan_node_id)
189    }
190
191    pub fn emit_on_window_close(&self) -> bool {
192        self.emit_on_window_close
193    }
194}
195
196/// `ToBatch` allows to convert a logical plan node to batch physical node
197/// with an optional required order.
198///
199/// The generated plan has single distribution and doesn't have any exchange nodes inserted.
200/// Use either [`ToLocalBatch`] or [`ToDistributedBatch`] after `ToBatch` to get a distributed plan.
201///
202/// To implement this trait you can choose one of the two ways:
203/// - Implement `to_batch` and use the default implementation of `to_batch_with_order_required`
204/// - Or, if a better plan can be generated when a required order is given, you can implement
205///   `to_batch_with_order_required`, and implement `to_batch` with
206///   `to_batch_with_order_required(&Order::any())`.
207pub trait ToBatch {
208    /// `to_batch` is equivalent to `to_batch_with_order_required(&Order::any())`
209    fn to_batch(&self) -> Result<BatchPlanRef>;
210    /// convert the plan to batch physical plan and satisfy the required Order
211    fn to_batch_with_order_required(&self, required_order: &Order) -> Result<BatchPlanRef> {
212        let ret = self.to_batch()?;
213        required_order.enforce_if_not_satisfies(ret)
214    }
215}
216
217/// Converts a batch physical plan to local plan for local execution.
218///
219/// This is quite similar to `ToBatch`, but different in several ways. For example it converts
220/// scan to exchange + scan.
221pub trait ToLocalBatch {
222    fn to_local(&self) -> Result<BatchPlanRef>;
223
224    /// Convert the plan to batch local physical plan and satisfy the required Order
225    fn to_local_with_order_required(&self, required_order: &Order) -> Result<BatchPlanRef> {
226        let ret = self.to_local()?;
227        required_order.enforce_if_not_satisfies(ret)
228    }
229}
230
231/// `ToDistributedBatch` allows to convert a batch physical plan to distributed batch plan, by
232/// insert exchange node, with an optional required order and distributed.
233///
234/// To implement this trait you can choose one of the two ways:
235/// - Implement `to_distributed` and use the default implementation of
236///   `to_distributed_with_required`
237/// - Or, if a better plan can be generated when a required order is given, you can implement
238///   `to_distributed_with_required`, and implement `to_distributed` with
239///   `to_distributed_with_required(&Order::any(), &RequiredDist::Any)`
240pub trait ToDistributedBatch {
241    /// `to_distributed` is equivalent to `to_distributed_with_required(&Order::any(),
242    /// &RequiredDist::Any)`
243    fn to_distributed(&self) -> Result<BatchPlanRef>;
244    /// insert the exchange in batch physical plan to satisfy the required Distribution and Order.
245    fn to_distributed_with_required(
246        &self,
247        required_order: &Order,
248        required_dist: &RequiredDist,
249    ) -> Result<BatchPlanRef> {
250        let ret = self.to_distributed()?;
251        let ret = required_order.enforce_if_not_satisfies(ret)?;
252        required_dist.batch_enforce_if_not_satisfies(ret, required_order)
253    }
254}