risingwave_frontend/optimizer/plan_node/
mod.rs

1// Copyright 2022 RisingWave Labs
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15//! Defines all kinds of node in the plan tree, each node represent a relational expression.
16//!
17//! We use a immutable style tree structure, every Node are immutable and cannot be modified after
18//! it has been created. If you want to modify the node, such as rewriting the expression in a
19//! `ProjectNode` or changing a node's input node, you need to create a new node. We use Rc as the
20//! node's reference, and a node just storage its inputs' reference, so change a node just need
21//! create one new node but not the entire sub-tree.
22//!
23//! So when you want to add a new node, make sure:
24//! - each field in the node struct are private
25//! - recommend to implement the construction of Node in a unified `new()` function, if have multi
26//!   methods to construct, make they have a consistent behavior
27//! - all field should be valued in construction, so the properties' derivation should be finished
28//!   in the `new()` function.
29
30use std::collections::HashMap;
31use std::fmt::Debug;
32use std::hash::Hash;
33use std::marker::PhantomData;
34use std::ops::Deref;
35use std::rc::Rc;
36
37use downcast_rs::{Downcast, impl_downcast};
38use dyn_clone::DynClone;
39use itertools::Itertools;
40use paste::paste;
41use petgraph::dot::{Config, Dot};
42use petgraph::graph::Graph;
43use pretty_xmlish::{Pretty, PrettyConfig};
44use risingwave_common::catalog::Schema;
45use risingwave_common::util::recursive::{self, Recurse};
46use risingwave_pb::batch_plan::PlanNode as PbBatchPlan;
47use risingwave_pb::stream_plan::StreamNode as PbStreamPlan;
48use serde::Serialize;
49
50use self::batch::BatchPlanNodeMetadata;
51use self::generic::{GenericPlanRef, PhysicalPlanRef};
52use self::stream::StreamPlanNodeMetadata;
53use self::utils::Distill;
54use super::property::{
55    Distribution, FunctionalDependencySet, MonotonicityMap, Order, WatermarkColumns,
56};
57use crate::error::{ErrorCode, Result};
58use crate::optimizer::property::StreamKind;
59use crate::optimizer::{ExpressionSimplifyRewriter, PlanVisitor};
60use crate::session::current::notice_to_user;
61use crate::utils::{PrettySerde, build_graph_from_pretty};
62
63/// A marker trait for different conventions, used for enforcing type safety.
64///
65/// Implementors are [`Logical`], [`Batch`], and [`Stream`].
66pub trait ConventionMarker: 'static + Sized + Clone + Debug + Eq + PartialEq + Hash {
67    /// The extra fields in the [`PlanBase`] of this convention.
68    type Extra: 'static + Eq + Hash + Clone + Debug;
69    type ShareNode: ShareNode<Self>;
70    type PlanRefDyn: PlanNodeCommon<Self> + Eq + Hash + ?Sized;
71    type PlanNodeType;
72
73    fn as_share(plan: &Self::PlanRefDyn) -> Option<&Self::ShareNode>;
74}
75
76pub trait ShareNode<C: ConventionMarker>:
77    AnyPlanNodeMeta<C> + PlanTreeNodeUnary<C> + 'static
78{
79    fn new_share(share: generic::Share<PlanRef<C>>) -> PlanRef<C>;
80    fn replace_input(&self, plan: PlanRef<C>);
81}
82
83pub struct NoShareNode<C: ConventionMarker>(!, PhantomData<C>);
84
85impl<C: ConventionMarker> ShareNode<C> for NoShareNode<C> {
86    fn new_share(_plan: generic::Share<PlanRef<C>>) -> PlanRef<C> {
87        unreachable!()
88    }
89
90    fn replace_input(&self, _plan: PlanRef<C>) {
91        unreachable!()
92    }
93}
94
95impl<C: ConventionMarker> PlanTreeNodeUnary<C> for NoShareNode<C> {
96    fn input(&self) -> PlanRef<C> {
97        unreachable!()
98    }
99
100    fn clone_with_input(&self, _input: PlanRef<C>) -> Self {
101        unreachable!()
102    }
103}
104
105impl<C: ConventionMarker> AnyPlanNodeMeta<C> for NoShareNode<C> {
106    fn node_type(&self) -> C::PlanNodeType {
107        unreachable!()
108    }
109
110    fn plan_base(&self) -> &PlanBase<C> {
111        unreachable!()
112    }
113}
114
115/// The marker for logical convention.
116#[derive(Clone, Debug, Eq, PartialEq, Hash)]
117pub struct Logical;
118impl ConventionMarker for Logical {
119    type Extra = plan_base::NoExtra;
120    type PlanNodeType = LogicalPlanNodeType;
121    type PlanRefDyn = dyn LogicalPlanNode;
122    type ShareNode = LogicalShare;
123
124    fn as_share(plan: &Self::PlanRefDyn) -> Option<&Self::ShareNode> {
125        plan.as_logical_share()
126    }
127}
128
129/// The marker for batch convention.
130#[derive(Clone, Debug, Eq, PartialEq, Hash)]
131pub struct Batch;
132impl ConventionMarker for Batch {
133    type Extra = plan_base::BatchExtra;
134    type PlanNodeType = BatchPlanNodeType;
135    type PlanRefDyn = dyn BatchPlanNode;
136    type ShareNode = NoShareNode<Batch>;
137
138    fn as_share(_plan: &Self::PlanRefDyn) -> Option<&Self::ShareNode> {
139        None
140    }
141}
142
143/// The marker for stream convention.
144#[derive(Clone, Debug, Eq, PartialEq, Hash)]
145pub struct Stream;
146impl ConventionMarker for Stream {
147    type Extra = plan_base::StreamExtra;
148    type PlanNodeType = StreamPlanNodeType;
149    type PlanRefDyn = dyn StreamPlanNode;
150    type ShareNode = StreamShare;
151
152    fn as_share(plan: &Self::PlanRefDyn) -> Option<&Self::ShareNode> {
153        plan.as_stream_share()
154    }
155}
156
157/// The trait for accessing the meta data and [`PlanBase`] for plan nodes.
158pub trait PlanNodeMeta {
159    type Convention: ConventionMarker;
160    const NODE_TYPE: <Self::Convention as ConventionMarker>::PlanNodeType;
161    /// Get the reference to the [`PlanBase`] with corresponding convention.
162    fn plan_base(&self) -> &PlanBase<Self::Convention>;
163}
164
165// Intentionally made private.
166mod plan_node_meta {
167    use super::*;
168
169    /// The object-safe version of [`PlanNodeMeta`], used as a super trait of `PlanNode`.
170    ///
171    /// Check [`PlanNodeMeta`] for more details.
172    pub trait AnyPlanNodeMeta<C: ConventionMarker> {
173        fn node_type(&self) -> C::PlanNodeType;
174        fn plan_base(&self) -> &PlanBase<C>;
175    }
176
177    /// Implement [`AnyPlanNodeMeta`] for all [`PlanNodeMeta`].
178    impl<P> AnyPlanNodeMeta<P::Convention> for P
179    where
180        P: PlanNodeMeta,
181    {
182        fn node_type(&self) -> <P::Convention as ConventionMarker>::PlanNodeType {
183            P::NODE_TYPE
184        }
185
186        fn plan_base(&self) -> &PlanBase<P::Convention> {
187            <Self as PlanNodeMeta>::plan_base(self)
188        }
189    }
190}
191use plan_node_meta::AnyPlanNodeMeta;
192
193pub trait PlanNodeCommon<C: ConventionMarker> = PlanTreeNode<C>
194    + DynClone
195    + DynEq
196    + DynHash
197    + Distill
198    + Debug
199    + Downcast
200    + ExprRewritable<C>
201    + ExprVisitable
202    + AnyPlanNodeMeta<C>;
203
204/// The common trait over all plan nodes. Used by optimizer framework which will treat all node as
205/// `dyn PlanNode`
206///
207/// We split the trait into lots of sub-trait so that we can easily use macro to impl them.
208pub trait StreamPlanNode: PlanNodeCommon<Stream> + TryToStreamPb {}
209pub trait BatchPlanNode:
210    PlanNodeCommon<Batch> + ToDistributedBatch + ToLocalBatch + TryToBatchPb
211{
212}
213pub trait LogicalPlanNode:
214    PlanNodeCommon<Logical> + ColPrunable + PredicatePushdown + ToBatch + ToStream
215{
216}
217
218macro_rules! impl_trait {
219    ($($convention:ident),+) => {
220        paste! {
221            $(
222                impl Hash for dyn [<$convention  PlanNode>] {
223                    fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
224                        self.dyn_hash(state);
225                    }
226                }
227
228                impl PartialEq for dyn [<$convention  PlanNode>] {
229                    fn eq(&self, other: &Self) -> bool {
230                        self.dyn_eq(other.as_dyn_eq())
231                    }
232                }
233
234                impl Eq for dyn [<$convention  PlanNode>] {}
235            )+
236        }
237    };
238}
239
240impl_trait!(Batch, Stream, Logical);
241impl_downcast!(BatchPlanNode);
242impl_downcast!(LogicalPlanNode);
243impl_downcast!(StreamPlanNode);
244
245// Using a new type wrapper allows direct function implementation on `PlanRef`,
246// and we currently need a manual implementation of `PartialEq` for `PlanRef`.
247#[allow(clippy::derived_hash_with_manual_eq)]
248#[derive(Debug, Eq, Hash)]
249pub struct PlanRef<C: ConventionMarker>(Rc<C::PlanRefDyn>);
250
251impl<C: ConventionMarker> Clone for PlanRef<C> {
252    fn clone(&self) -> Self {
253        Self(self.0.clone())
254    }
255}
256
257pub type LogicalPlanRef = PlanRef<Logical>;
258pub type StreamPlanRef = PlanRef<Stream>;
259pub type BatchPlanRef = PlanRef<Batch>;
260
261// Cannot use the derived implementation for now.
262// See https://github.com/rust-lang/rust/issues/31740
263#[allow(clippy::op_ref)]
264impl<C: ConventionMarker> PartialEq for PlanRef<C> {
265    fn eq(&self, other: &Self) -> bool {
266        &self.0 == &other.0
267    }
268}
269
270impl<C: ConventionMarker> Deref for PlanRef<C> {
271    type Target = C::PlanRefDyn;
272
273    fn deref(&self) -> &Self::Target {
274        self.0.deref()
275    }
276}
277
278impl<T: LogicalPlanNode> From<T> for PlanRef<Logical> {
279    fn from(value: T) -> Self {
280        PlanRef(Rc::new(value) as _)
281    }
282}
283
284impl<T: StreamPlanNode> From<T> for PlanRef<Stream> {
285    fn from(value: T) -> Self {
286        PlanRef(Rc::new(value) as _)
287    }
288}
289
290impl<T: BatchPlanNode> From<T> for PlanRef<Batch> {
291    fn from(value: T) -> Self {
292        PlanRef(Rc::new(value) as _)
293    }
294}
295
296impl<C: ConventionMarker> Layer for PlanRef<C> {
297    type Sub = Self;
298
299    fn map<F>(self, f: F) -> Self
300    where
301        F: FnMut(Self::Sub) -> Self::Sub,
302    {
303        self.clone_root_with_inputs(&self.inputs().into_iter().map(f).collect_vec())
304    }
305
306    fn descent<F>(&self, f: F)
307    where
308        F: FnMut(&Self::Sub),
309    {
310        self.inputs().iter().for_each(f);
311    }
312}
313
314#[derive(Clone, Debug, Copy, Serialize, Hash, Eq, PartialEq, PartialOrd, Ord)]
315pub struct PlanNodeId(pub i32);
316
317impl PlanNodeId {
318    pub fn to_stream_node_operator_id(self) -> StreamNodeLocalOperatorId {
319        StreamNodeLocalOperatorId::new(self.0 as _)
320    }
321}
322
323/// A more sophisticated `Endo` taking into account of the DAG structure of `PlanRef`.
324/// In addition to `Endo`, one have to specify the `cached` function
325/// to persist transformed `LogicalShare` and their results,
326/// and the `dag_apply` function will take care to only transform every `LogicalShare` nodes once.
327///
328/// Note: Due to the way super trait is designed in rust,
329/// one need to have separate implementation blocks of `Endo<PlanRef>` and `EndoPlan`.
330/// And conventionally the real transformation `apply` is under `Endo<PlanRef>`,
331/// although one can refer to `dag_apply` in the implementation of `apply`.
332pub trait EndoPlan: Endo<LogicalPlanRef> {
333    // Return the cached result of `plan` if present,
334    // otherwise store and return the value provided by `f`.
335    // Notice that to allow mutable access of `self` in `f`,
336    // we let `f` to take `&mut Self` as its first argument.
337    fn cached<F>(&mut self, plan: LogicalPlanRef, f: F) -> LogicalPlanRef
338    where
339        F: FnMut(&mut Self) -> LogicalPlanRef;
340
341    fn dag_apply(&mut self, plan: LogicalPlanRef) -> LogicalPlanRef {
342        match plan.as_logical_share() {
343            Some(_) => self.cached(plan.clone(), |this| this.tree_apply(plan.clone())),
344            None => self.tree_apply(plan),
345        }
346    }
347}
348
349/// A more sophisticated `Visit` taking into account of the DAG structure of `PlanRef`.
350/// In addition to `Visit`, one have to specify `visited`
351/// to store and report visited `LogicalShare` nodes,
352/// and the `dag_visit` function will take care to only visit every `LogicalShare` nodes once.
353/// See also `EndoPlan`.
354pub trait VisitPlan: Visit<LogicalPlanRef> {
355    // Skip visiting `plan` if visited, otherwise run the traversal provided by `f`.
356    // Notice that to allow mutable access of `self` in `f`,
357    // we let `f` to take `&mut Self` as its first argument.
358    fn visited<F>(&mut self, plan: &LogicalPlanRef, f: F)
359    where
360        F: FnMut(&mut Self);
361
362    fn dag_visit(&mut self, plan: &LogicalPlanRef) {
363        match plan.as_logical_share() {
364            Some(_) => self.visited(plan, |this| this.tree_visit(plan)),
365            None => self.tree_visit(plan),
366        }
367    }
368}
369
370impl<C: ConventionMarker> PlanRef<C> {
371    pub fn rewrite_exprs_recursive(&self, r: &mut impl ExprRewriter) -> PlanRef<C> {
372        let new = self.rewrite_exprs(r);
373        let inputs: Vec<PlanRef<C>> = new
374            .inputs()
375            .iter()
376            .map(|plan_ref| plan_ref.rewrite_exprs_recursive(r))
377            .collect();
378        new.clone_root_with_inputs(&inputs[..])
379    }
380}
381
382pub(crate) trait VisitExprsRecursive {
383    fn visit_exprs_recursive(&self, r: &mut impl ExprVisitor);
384}
385
386impl<C: ConventionMarker> VisitExprsRecursive for PlanRef<C> {
387    fn visit_exprs_recursive(&self, r: &mut impl ExprVisitor) {
388        self.visit_exprs(r);
389        self.inputs()
390            .iter()
391            .for_each(|plan_ref| plan_ref.visit_exprs_recursive(r));
392    }
393}
394
395impl<C: ConventionMarker> PlanRef<C> {
396    pub fn expect_stream_key(&self) -> &[usize] {
397        self.stream_key().unwrap_or_else(|| {
398            panic!(
399                "a stream key is expected but not exist, plan:\n{}",
400                self.explain_to_string()
401            )
402        })
403    }
404}
405
406impl LogicalPlanRef {
407    fn prune_col_inner(
408        &self,
409        required_cols: &[usize],
410        ctx: &mut ColumnPruningContext,
411    ) -> LogicalPlanRef {
412        if let Some(logical_share) = self.as_logical_share() {
413            // Check the share cache first. If cache exists, it means this is the second round of
414            // column pruning.
415            if let Some((new_share, merge_required_cols)) = ctx.get_share_cache(self.id()) {
416                // Piggyback share remove if its has only one parent.
417                if ctx.get_parent_num(logical_share) == 1 {
418                    let input: LogicalPlanRef = logical_share.input();
419                    return input.prune_col(required_cols, ctx);
420                }
421
422                // If it is the first visit, recursively call `prune_col` for its input and
423                // replace it.
424                if ctx.visit_share_at_first_round(self.id()) {
425                    let new_logical_share: &LogicalShare = new_share
426                        .as_logical_share()
427                        .expect("must be share operator");
428                    let new_share_input = new_logical_share.input().prune_col(
429                        &(0..new_logical_share.base.schema().len()).collect_vec(),
430                        ctx,
431                    );
432                    new_logical_share.replace_input(new_share_input);
433                }
434
435                // Calculate the new required columns based on the new share.
436                let new_required_cols: Vec<usize> = required_cols
437                    .iter()
438                    .map(|col| merge_required_cols.iter().position(|x| x == col).unwrap())
439                    .collect_vec();
440                let mapping = ColIndexMapping::with_remaining_columns(
441                    &new_required_cols,
442                    new_share.schema().len(),
443                );
444                return LogicalProject::with_mapping(new_share, mapping).into();
445            }
446
447            // `LogicalShare` can't clone, so we implement column pruning for `LogicalShare`
448            // here.
449            // Basically, we need to wait for all parents of `LogicalShare` to prune columns before
450            // we merge the required columns and prune.
451            let parent_has_pushed = ctx.add_required_cols(self.id(), required_cols.into());
452            if parent_has_pushed == ctx.get_parent_num(logical_share) {
453                let merge_require_cols = ctx
454                    .take_required_cols(self.id())
455                    .expect("must have required columns")
456                    .into_iter()
457                    .flat_map(|x| x.into_iter())
458                    .sorted()
459                    .dedup()
460                    .collect_vec();
461                let input: LogicalPlanRef = logical_share.input();
462                let input = input.prune_col(&merge_require_cols, ctx);
463
464                // Cache the new share operator for the second round.
465                let new_logical_share = LogicalShare::create(input.clone());
466                ctx.add_share_cache(self.id(), new_logical_share, merge_require_cols.clone());
467
468                let exprs = logical_share
469                    .base
470                    .schema()
471                    .fields
472                    .iter()
473                    .enumerate()
474                    .map(|(i, field)| {
475                        if let Some(pos) = merge_require_cols.iter().position(|x| *x == i) {
476                            ExprImpl::InputRef(Box::new(InputRef::new(
477                                pos,
478                                field.data_type.clone(),
479                            )))
480                        } else {
481                            ExprImpl::Literal(Box::new(Literal::new(None, field.data_type.clone())))
482                        }
483                    })
484                    .collect_vec();
485                let project = LogicalProject::create(input, exprs);
486                logical_share.replace_input(project);
487            }
488            let mapping =
489                ColIndexMapping::with_remaining_columns(required_cols, self.schema().len());
490            LogicalProject::with_mapping(self.clone(), mapping).into()
491        } else {
492            // Dispatch to dyn PlanNode instead of PlanRef.
493            let dyn_t = self.deref();
494            dyn_t.prune_col(required_cols, ctx)
495        }
496    }
497
498    fn predicate_pushdown_inner(
499        &self,
500        predicate: Condition,
501        ctx: &mut PredicatePushdownContext,
502    ) -> LogicalPlanRef {
503        if let Some(logical_share) = self.as_logical_share() {
504            // Piggyback share remove if its has only one parent.
505            if ctx.get_parent_num(logical_share) == 1 {
506                let input: LogicalPlanRef = logical_share.input();
507                return input.predicate_pushdown(predicate, ctx);
508            }
509
510            // `LogicalShare` can't clone, so we implement predicate pushdown for `LogicalShare`
511            // here.
512            // Basically, we need to wait for all parents of `LogicalShare` to push down the
513            // predicate before we merge the predicates and pushdown.
514            let parent_has_pushed = ctx.add_predicate(self.id(), predicate.clone());
515            if parent_has_pushed == ctx.get_parent_num(logical_share) {
516                let merge_predicate = ctx
517                    .take_predicate(self.id())
518                    .expect("must have predicate")
519                    .into_iter()
520                    .map(|mut c| Condition {
521                        conjunctions: c
522                            .conjunctions
523                            .extract_if(.., |e| {
524                                // If predicates contain now, impure or correlated input ref, don't push through share operator.
525                                // The predicate with now() function is regarded as a temporal filter predicate, which will be transformed to a temporal filter operator and can not do the OR operation with other predicates.
526                                let mut finder = ExprCorrelatedIdFinder::default();
527                                finder.visit_expr(e);
528                                e.count_nows() == 0
529                                    && e.is_pure()
530                                    && !finder.has_correlated_input_ref()
531                            })
532                            .collect(),
533                    })
534                    .reduce(|a, b| a.or(b))
535                    .unwrap();
536
537                // rewrite the *entire* predicate for `LogicalShare`
538                // before pushing down to whatever plan node(s)
539                // ps: the reason here contains a "special" optimization
540                // rather than directly apply explicit rule in stream or
541                // batch plan optimization, is because predicate push down
542                // will *instantly* push down all predicates, and rule(s)
543                // can not be applied in the middle.
544                // thus we need some on-the-fly (in the middle) rewrite
545                // technique to help with this kind of optimization.
546                let mut expr_rewriter = ExpressionSimplifyRewriter {};
547                let mut new_predicate = Condition::true_cond();
548
549                for c in merge_predicate.conjunctions {
550                    let c = Condition::with_expr(expr_rewriter.rewrite_cond(c));
551                    // rebuild the conjunctions
552                    new_predicate = new_predicate.and(c);
553                }
554
555                let input: LogicalPlanRef = logical_share.input();
556                let input = input.predicate_pushdown(new_predicate, ctx);
557                logical_share.replace_input(input);
558            }
559            LogicalFilter::create(self.clone(), predicate)
560        } else {
561            // Dispatch to dyn PlanNode instead of PlanRef.
562            let dyn_t = self.deref();
563            dyn_t.predicate_pushdown(predicate, ctx)
564        }
565    }
566
567    pub fn forbid_snapshot_backfill(&self) -> Option<String> {
568        struct ForbidSnapshotBackfill {
569            warning_msg: Option<String>,
570        }
571        impl LogicalPlanVisitor for ForbidSnapshotBackfill {
572            type Result = ();
573
574            type DefaultBehavior = impl DefaultBehavior<Self::Result>;
575
576            fn default_behavior() -> Self::DefaultBehavior {
577                DefaultValue
578            }
579
580            fn visit_logical_join(&mut self, plan: &LogicalJoin) -> Self::Result {
581                self.visit(plan.left());
582                self.visit(plan.right());
583                if self.warning_msg.is_none() && plan.should_be_temporal_join() {
584                    self.warning_msg =
585                        Some("snapshot backfill disabled due to temporal join".to_owned());
586                }
587            }
588
589            fn visit_logical_source(&mut self, plan: &LogicalSource) -> Self::Result {
590                if self.warning_msg.is_none() && plan.is_shared_source() {
591                    self.warning_msg = Some(format!(
592                        "snapshot backfill disabled due to using shared source {:?}",
593                        plan.core.catalog.as_ref().map(|c| &c.name)
594                    ));
595                }
596            }
597        }
598        let mut forbid_snapshot = ForbidSnapshotBackfill { warning_msg: None };
599        forbid_snapshot.visit(self.clone());
600        forbid_snapshot.warning_msg
601    }
602}
603
604impl ColPrunable for LogicalPlanRef {
605    #[allow(clippy::let_and_return)]
606    fn prune_col(&self, required_cols: &[usize], ctx: &mut ColumnPruningContext) -> LogicalPlanRef {
607        let res = self.prune_col_inner(required_cols, ctx);
608        #[cfg(debug_assertions)]
609        super::heuristic_optimizer::HeuristicOptimizer::check_equivalent_plan(
610            "column pruning",
611            &LogicalProject::with_out_col_idx(self.clone(), required_cols.iter().cloned()).into(),
612            &res,
613        );
614        res
615    }
616}
617
618impl PredicatePushdown for LogicalPlanRef {
619    #[allow(clippy::let_and_return)]
620    fn predicate_pushdown(
621        &self,
622        predicate: Condition,
623        ctx: &mut PredicatePushdownContext,
624    ) -> LogicalPlanRef {
625        #[cfg(debug_assertions)]
626        let predicate_clone = predicate.clone();
627
628        let res = self.predicate_pushdown_inner(predicate, ctx);
629
630        #[cfg(debug_assertions)]
631        super::heuristic_optimizer::HeuristicOptimizer::check_equivalent_plan(
632            "predicate push down",
633            &LogicalFilter::new(self.clone(), predicate_clone).into(),
634            &res,
635        );
636
637        res
638    }
639}
640
641impl<C: ConventionMarker> PlanRef<C> {
642    pub fn clone_root_with_inputs(&self, inputs: &[PlanRef<C>]) -> PlanRef<C> {
643        if let Some(share) = self.as_share_node() {
644            assert_eq!(inputs.len(), 1);
645            // We can't clone `LogicalShare`, but only can replace input instead.
646            share.replace_input(inputs[0].clone());
647            self.clone()
648        } else {
649            // Dispatch to dyn PlanNode instead of PlanRef.
650            let dyn_t = self.deref();
651            dyn_t.clone_with_inputs(inputs)
652        }
653    }
654}
655
656/// Implement again for the `dyn` newtype wrapper.
657impl<C: ConventionMarker> PlanRef<C> {
658    pub fn node_type(&self) -> C::PlanNodeType {
659        self.0.node_type()
660    }
661
662    pub fn plan_base(&self) -> &PlanBase<C> {
663        self.0.plan_base()
664    }
665}
666
667/// Allow access to all fields defined in [`GenericPlanRef`] for the type-erased plan node.
668// TODO: may also implement on `dyn PlanNode` directly.
669impl<C: ConventionMarker> GenericPlanRef for PlanRef<C> {
670    fn id(&self) -> PlanNodeId {
671        self.plan_base().id()
672    }
673
674    fn schema(&self) -> &Schema {
675        self.plan_base().schema()
676    }
677
678    fn stream_key(&self) -> Option<&[usize]> {
679        self.plan_base().stream_key()
680    }
681
682    fn ctx(&self) -> OptimizerContextRef {
683        self.plan_base().ctx()
684    }
685
686    fn functional_dependency(&self) -> &FunctionalDependencySet {
687        self.plan_base().functional_dependency()
688    }
689}
690
691/// Allow access to all fields defined in [`PhysicalPlanRef`] for the type-erased plan node.
692// TODO: may also implement on `dyn PlanNode` directly.
693impl PhysicalPlanRef for BatchPlanRef {
694    fn distribution(&self) -> &Distribution {
695        self.plan_base().distribution()
696    }
697}
698
699impl PhysicalPlanRef for StreamPlanRef {
700    fn distribution(&self) -> &Distribution {
701        self.plan_base().distribution()
702    }
703}
704
705/// Allow access to all fields defined in [`StreamPlanNodeMetadata`] for the type-erased plan node.
706// TODO: may also implement on `dyn PlanNode` directly.
707impl StreamPlanNodeMetadata for StreamPlanRef {
708    fn stream_kind(&self) -> StreamKind {
709        self.plan_base().stream_kind()
710    }
711
712    fn emit_on_window_close(&self) -> bool {
713        self.plan_base().emit_on_window_close()
714    }
715
716    fn watermark_columns(&self) -> &WatermarkColumns {
717        self.plan_base().watermark_columns()
718    }
719
720    fn columns_monotonicity(&self) -> &MonotonicityMap {
721        self.plan_base().columns_monotonicity()
722    }
723}
724
725/// Allow access to all fields defined in [`BatchPlanNodeMetadata`] for the type-erased plan node.
726// TODO: may also implement on `dyn PlanNode` directly.
727impl BatchPlanNodeMetadata for BatchPlanRef {
728    fn order(&self) -> &Order {
729        self.plan_base().order()
730    }
731}
732
733/// In order to let expression display id started from 1 for explaining, hidden column names and
734/// other places. We will reset expression display id to 0 and clone the whole plan to reset the
735/// schema.
736pub fn reorganize_elements_id<C: ConventionMarker>(plan: PlanRef<C>) -> PlanRef<C> {
737    let backup = plan.ctx().backup_elem_ids();
738    plan.ctx().reset_elem_ids();
739    let plan = PlanCloner::clone_whole_plan(plan);
740    plan.ctx().restore_elem_ids(backup);
741    plan
742}
743
744pub trait Explain {
745    /// Write explain the whole plan tree.
746    fn explain<'a>(&self) -> Pretty<'a>;
747
748    /// Write explain the whole plan tree with node id.
749    fn explain_with_id<'a>(&self) -> Pretty<'a>;
750
751    /// Explain the plan node and return a string.
752    fn explain_to_string(&self) -> String;
753
754    /// Explain the plan node and return a json string.
755    fn explain_to_json(&self) -> String;
756
757    /// Explain the plan node and return a xml string.
758    fn explain_to_xml(&self) -> String;
759
760    /// Explain the plan node and return a yaml string.
761    fn explain_to_yaml(&self) -> String;
762
763    /// Explain the plan node and return a dot format string.
764    fn explain_to_dot(&self) -> String;
765}
766
767impl<C: ConventionMarker> Explain for PlanRef<C> {
768    /// Write explain the whole plan tree.
769    fn explain<'a>(&self) -> Pretty<'a> {
770        let mut node = self.distill();
771        let inputs = self.inputs();
772        for input in inputs.iter().peekable() {
773            node.children.push(input.explain());
774        }
775        Pretty::Record(node)
776    }
777
778    /// Write explain the whole plan tree with node id.
779    fn explain_with_id<'a>(&self) -> Pretty<'a> {
780        let node_id = self.id();
781        let mut node = self.distill();
782        // NOTE(kwannoel): Can lead to poor performance if plan is very large,
783        // but we want to show the id first.
784        node.fields
785            .insert(0, ("id".into(), Pretty::display(&node_id.0)));
786        let inputs = self.inputs();
787        for input in inputs.iter().peekable() {
788            node.children.push(input.explain_with_id());
789        }
790        Pretty::Record(node)
791    }
792
793    /// Explain the plan node and return a string.
794    fn explain_to_string(&self) -> String {
795        let plan = reorganize_elements_id(self.clone());
796
797        let mut output = String::with_capacity(2048);
798        let mut config = pretty_config();
799        config.unicode(&mut output, &plan.explain());
800        output
801    }
802
803    /// Explain the plan node and return a json string.
804    fn explain_to_json(&self) -> String {
805        let plan = reorganize_elements_id(self.clone());
806        let explain_ir = plan.explain();
807        serde_json::to_string_pretty(&PrettySerde(explain_ir, true))
808            .expect("failed to serialize plan to json")
809    }
810
811    /// Explain the plan node and return a xml string.
812    fn explain_to_xml(&self) -> String {
813        let plan = reorganize_elements_id(self.clone());
814        let explain_ir = plan.explain();
815        quick_xml::se::to_string(&PrettySerde(explain_ir, true))
816            .expect("failed to serialize plan to xml")
817    }
818
819    /// Explain the plan node and return a yaml string.
820    fn explain_to_yaml(&self) -> String {
821        let plan = reorganize_elements_id(self.clone());
822        let explain_ir = plan.explain();
823        serde_yaml::to_string(&PrettySerde(explain_ir, true))
824            .expect("failed to serialize plan to yaml")
825    }
826
827    /// Explain the plan node and return a dot format string.
828    fn explain_to_dot(&self) -> String {
829        let plan = reorganize_elements_id(self.clone());
830        let explain_ir = plan.explain_with_id();
831        let mut graph = Graph::<String, String>::new();
832        let mut nodes = HashMap::new();
833        build_graph_from_pretty(&explain_ir, &mut graph, &mut nodes, None);
834        let dot = Dot::with_config(&graph, &[Config::EdgeNoLabel]);
835        dot.to_string()
836    }
837}
838
839impl<C: ConventionMarker> PlanRef<C> {
840    pub fn as_share_node(&self) -> Option<&C::ShareNode> {
841        C::as_share(self)
842    }
843}
844
845pub(crate) fn pretty_config() -> PrettyConfig {
846    PrettyConfig {
847        indent: 3,
848        need_boundaries: false,
849        width: 2048,
850        reduced_spaces: true,
851    }
852}
853
854macro_rules! impl_generic_plan_ref_method {
855    ($($convention:ident),+) => {
856        paste! {
857            $(
858                /// Directly implement methods for `PlanNode` to access the fields defined in [`GenericPlanRef`].
859                impl dyn [<$convention PlanNode>] {
860                    pub fn id(&self) -> PlanNodeId {
861                        self.plan_base().id()
862                    }
863
864                    pub fn ctx(&self) -> OptimizerContextRef {
865                        self.plan_base().ctx().clone()
866                    }
867
868                    pub fn schema(&self) -> &Schema {
869                        self.plan_base().schema()
870                    }
871
872                    pub fn stream_key(&self) -> Option<&[usize]> {
873                        self.plan_base().stream_key()
874                    }
875
876                    pub fn functional_dependency(&self) -> &FunctionalDependencySet {
877                        self.plan_base().functional_dependency()
878                    }
879
880                    pub fn explain_myself_to_string(&self) -> String {
881                        self.distill_to_string()
882                    }
883                }
884            )+
885        }
886    };
887}
888
889impl_generic_plan_ref_method!(Batch, Stream, Logical);
890
891/// Recursion depth threshold for plan node visitor to send notice to user.
892pub const PLAN_DEPTH_THRESHOLD: usize = 30;
893/// Notice message for plan node visitor to send to user when the depth threshold is reached.
894pub const PLAN_TOO_DEEP_NOTICE: &str = "The plan is too deep. \
895Consider simplifying or splitting the query if you encounter any issues.";
896
897impl dyn StreamPlanNode {
898    /// Serialize the plan node and its children to a stream plan proto.
899    ///
900    /// Note that some operators has their own implementation of `to_stream_prost`. We have a
901    /// hook inside to do some ad-hoc things.
902    pub fn to_stream_prost(
903        &self,
904        state: &mut BuildFragmentGraphState,
905    ) -> SchedulerResult<PbStreamPlan> {
906        recursive::tracker!().recurse(|t| {
907            if t.depth_reaches(PLAN_DEPTH_THRESHOLD) {
908                notice_to_user(PLAN_TOO_DEEP_NOTICE);
909            }
910
911            use stream::prelude::*;
912
913            if let Some(stream_table_scan) = self.as_stream_table_scan() {
914                return stream_table_scan.adhoc_to_stream_prost(state);
915            }
916            if let Some(stream_cdc_table_scan) = self.as_stream_cdc_table_scan() {
917                return stream_cdc_table_scan.adhoc_to_stream_prost(state);
918            }
919            if let Some(stream_source_scan) = self.as_stream_source_scan() {
920                return stream_source_scan.adhoc_to_stream_prost(state);
921            }
922            if let Some(stream_share) = self.as_stream_share() {
923                return stream_share.adhoc_to_stream_prost(state);
924            }
925
926            let node = Some(self.try_to_stream_prost_body(state)?);
927            let input = self
928                .inputs()
929                .into_iter()
930                .map(|plan| plan.to_stream_prost(state))
931                .try_collect()?;
932            // TODO: support pk_indices and operator_id
933            Ok(PbStreamPlan {
934                input,
935                identity: self.explain_myself_to_string(),
936                node_body: node,
937                operator_id: self.id().to_stream_node_operator_id(),
938                stream_key: self
939                    .stream_key()
940                    .unwrap_or_default()
941                    .iter()
942                    .map(|x| *x as u32)
943                    .collect(),
944                fields: self.schema().to_prost(),
945                stream_kind: self.plan_base().stream_kind().to_protobuf() as i32,
946            })
947        })
948    }
949}
950
951impl dyn BatchPlanNode {
952    /// Serialize the plan node and its children to a batch plan proto.
953    pub fn to_batch_prost(&self) -> SchedulerResult<PbBatchPlan> {
954        self.to_batch_prost_identity(true)
955    }
956
957    /// Serialize the plan node and its children to a batch plan proto without the identity field
958    /// (for testing).
959    pub fn to_batch_prost_identity(&self, identity: bool) -> SchedulerResult<PbBatchPlan> {
960        recursive::tracker!().recurse(|t| {
961            if t.depth_reaches(PLAN_DEPTH_THRESHOLD) {
962                notice_to_user(PLAN_TOO_DEEP_NOTICE);
963            }
964
965            let node_body = Some(self.try_to_batch_prost_body()?);
966            let children = self
967                .inputs()
968                .into_iter()
969                .map(|plan| plan.to_batch_prost_identity(identity))
970                .try_collect()?;
971            Ok(PbBatchPlan {
972                children,
973                identity: if identity {
974                    self.explain_myself_to_string()
975                } else {
976                    "".into()
977                },
978                node_body,
979            })
980        })
981    }
982}
983
984mod plan_base;
985pub use plan_base::*;
986#[macro_use]
987mod plan_tree_node;
988pub use plan_tree_node::*;
989mod col_pruning;
990pub use col_pruning::*;
991mod expr_rewritable;
992pub use expr_rewritable::*;
993mod expr_visitable;
994
995mod convert;
996pub use convert::*;
997mod eq_join_predicate;
998pub use eq_join_predicate::*;
999mod to_prost;
1000pub use to_prost::*;
1001mod predicate_pushdown;
1002pub use predicate_pushdown::*;
1003mod merge_eq_nodes;
1004pub use merge_eq_nodes::*;
1005
1006pub mod batch;
1007pub mod generic;
1008pub mod stream;
1009
1010pub use generic::{PlanAggCall, PlanAggCallDisplay};
1011
1012mod batch_delete;
1013mod batch_exchange;
1014mod batch_expand;
1015mod batch_filter;
1016mod batch_get_channel_delta_stats;
1017mod batch_group_topn;
1018mod batch_hash_agg;
1019mod batch_hash_join;
1020mod batch_hop_window;
1021mod batch_insert;
1022mod batch_limit;
1023mod batch_log_seq_scan;
1024mod batch_lookup_join;
1025mod batch_max_one_row;
1026mod batch_nested_loop_join;
1027mod batch_over_window;
1028mod batch_project;
1029mod batch_project_set;
1030mod batch_seq_scan;
1031mod batch_simple_agg;
1032mod batch_sort;
1033mod batch_sort_agg;
1034mod batch_source;
1035mod batch_sys_seq_scan;
1036mod batch_table_function;
1037mod batch_topn;
1038mod batch_union;
1039mod batch_update;
1040mod batch_values;
1041mod logical_agg;
1042mod logical_apply;
1043mod logical_cdc_scan;
1044mod logical_changelog;
1045mod logical_cte_ref;
1046mod logical_dedup;
1047mod logical_delete;
1048mod logical_except;
1049mod logical_expand;
1050mod logical_filter;
1051mod logical_gap_fill;
1052mod logical_get_channel_delta_stats;
1053mod logical_hop_window;
1054mod logical_insert;
1055mod logical_intersect;
1056mod logical_join;
1057mod logical_kafka_scan;
1058mod logical_limit;
1059mod logical_locality_provider;
1060mod logical_max_one_row;
1061mod logical_multi_join;
1062mod logical_now;
1063mod logical_over_window;
1064mod logical_project;
1065mod logical_project_set;
1066mod logical_recursive_union;
1067mod logical_scan;
1068mod logical_share;
1069mod logical_source;
1070mod logical_sys_scan;
1071mod logical_table_function;
1072mod logical_topn;
1073mod logical_union;
1074mod logical_update;
1075mod logical_values;
1076mod stream_asof_join;
1077mod stream_changelog;
1078mod stream_dedup;
1079mod stream_delta_join;
1080mod stream_dml;
1081mod stream_dynamic_filter;
1082mod stream_eowc_gap_fill;
1083mod stream_eowc_over_window;
1084mod stream_exchange;
1085mod stream_expand;
1086mod stream_filter;
1087mod stream_fs_fetch;
1088mod stream_gap_fill;
1089mod stream_global_approx_percentile;
1090mod stream_group_topn;
1091mod stream_hash_agg;
1092mod stream_hash_join;
1093mod stream_hop_window;
1094mod stream_join_common;
1095mod stream_local_approx_percentile;
1096mod stream_locality_provider;
1097mod stream_materialize;
1098mod stream_materialized_exprs;
1099mod stream_now;
1100mod stream_over_window;
1101mod stream_project;
1102mod stream_project_set;
1103mod stream_row_id_gen;
1104mod stream_row_merge;
1105mod stream_simple_agg;
1106mod stream_sink;
1107mod stream_sort;
1108mod stream_source;
1109mod stream_source_scan;
1110mod stream_stateless_simple_agg;
1111mod stream_sync_log_store;
1112mod stream_table_scan;
1113mod stream_topn;
1114mod stream_union;
1115mod stream_values;
1116mod stream_watermark_filter;
1117
1118mod batch_file_scan;
1119mod batch_iceberg_scan;
1120mod batch_kafka_scan;
1121mod batch_postgres_query;
1122
1123mod batch_mysql_query;
1124mod derive;
1125mod logical_file_scan;
1126mod logical_iceberg_scan;
1127mod logical_postgres_query;
1128
1129mod batch_vector_search;
1130mod logical_mysql_query;
1131mod logical_vector_search;
1132mod logical_vector_search_lookup_join;
1133mod stream_cdc_table_scan;
1134mod stream_share;
1135mod stream_temporal_join;
1136mod stream_upstream_sink_union;
1137mod stream_vector_index_lookup_join;
1138mod stream_vector_index_write;
1139pub mod utils;
1140
1141pub use batch_delete::BatchDelete;
1142pub use batch_exchange::BatchExchange;
1143pub use batch_expand::BatchExpand;
1144pub use batch_file_scan::BatchFileScan;
1145pub use batch_filter::BatchFilter;
1146pub use batch_get_channel_delta_stats::BatchGetChannelDeltaStats;
1147pub use batch_group_topn::BatchGroupTopN;
1148pub use batch_hash_agg::BatchHashAgg;
1149pub use batch_hash_join::BatchHashJoin;
1150pub use batch_hop_window::BatchHopWindow;
1151pub use batch_iceberg_scan::BatchIcebergScan;
1152pub use batch_insert::BatchInsert;
1153pub use batch_kafka_scan::BatchKafkaScan;
1154pub use batch_limit::BatchLimit;
1155pub use batch_log_seq_scan::BatchLogSeqScan;
1156pub use batch_lookup_join::BatchLookupJoin;
1157pub use batch_max_one_row::BatchMaxOneRow;
1158pub use batch_mysql_query::BatchMySqlQuery;
1159pub use batch_nested_loop_join::BatchNestedLoopJoin;
1160pub use batch_over_window::BatchOverWindow;
1161pub use batch_postgres_query::BatchPostgresQuery;
1162pub use batch_project::BatchProject;
1163pub use batch_project_set::BatchProjectSet;
1164pub use batch_seq_scan::BatchSeqScan;
1165pub use batch_simple_agg::BatchSimpleAgg;
1166pub use batch_sort::BatchSort;
1167pub use batch_sort_agg::BatchSortAgg;
1168pub use batch_source::BatchSource;
1169pub use batch_sys_seq_scan::BatchSysSeqScan;
1170pub use batch_table_function::BatchTableFunction;
1171pub use batch_topn::BatchTopN;
1172pub use batch_union::BatchUnion;
1173pub use batch_update::BatchUpdate;
1174pub use batch_values::BatchValues;
1175pub use batch_vector_search::BatchVectorSearch;
1176pub use logical_agg::LogicalAgg;
1177pub use logical_apply::LogicalApply;
1178pub use logical_cdc_scan::LogicalCdcScan;
1179pub use logical_changelog::LogicalChangeLog;
1180pub use logical_cte_ref::LogicalCteRef;
1181pub use logical_dedup::LogicalDedup;
1182pub use logical_delete::LogicalDelete;
1183pub use logical_except::LogicalExcept;
1184pub use logical_expand::LogicalExpand;
1185pub use logical_file_scan::LogicalFileScan;
1186pub use logical_filter::LogicalFilter;
1187pub use logical_gap_fill::LogicalGapFill;
1188pub use logical_get_channel_delta_stats::LogicalGetChannelDeltaStats;
1189pub use logical_hop_window::LogicalHopWindow;
1190pub use logical_iceberg_scan::LogicalIcebergScan;
1191pub use logical_insert::LogicalInsert;
1192pub use logical_intersect::LogicalIntersect;
1193pub use logical_join::LogicalJoin;
1194pub use logical_kafka_scan::LogicalKafkaScan;
1195pub use logical_limit::LogicalLimit;
1196pub use logical_locality_provider::LogicalLocalityProvider;
1197pub use logical_max_one_row::LogicalMaxOneRow;
1198pub use logical_multi_join::{LogicalMultiJoin, LogicalMultiJoinBuilder};
1199pub use logical_mysql_query::LogicalMySqlQuery;
1200pub use logical_now::LogicalNow;
1201pub use logical_over_window::LogicalOverWindow;
1202pub use logical_postgres_query::LogicalPostgresQuery;
1203pub use logical_project::LogicalProject;
1204pub use logical_project_set::LogicalProjectSet;
1205pub use logical_recursive_union::LogicalRecursiveUnion;
1206pub use logical_scan::LogicalScan;
1207pub use logical_share::LogicalShare;
1208pub use logical_source::LogicalSource;
1209pub use logical_sys_scan::LogicalSysScan;
1210pub use logical_table_function::LogicalTableFunction;
1211pub use logical_topn::LogicalTopN;
1212pub use logical_union::LogicalUnion;
1213pub use logical_update::LogicalUpdate;
1214pub use logical_values::LogicalValues;
1215pub use logical_vector_search::LogicalVectorSearch;
1216pub use logical_vector_search_lookup_join::LogicalVectorSearchLookupJoin;
1217use risingwave_pb::id::StreamNodeLocalOperatorId;
1218pub use stream_asof_join::StreamAsOfJoin;
1219pub use stream_cdc_table_scan::StreamCdcTableScan;
1220pub use stream_changelog::StreamChangeLog;
1221pub use stream_dedup::StreamDedup;
1222pub use stream_delta_join::StreamDeltaJoin;
1223pub use stream_dml::StreamDml;
1224pub use stream_dynamic_filter::StreamDynamicFilter;
1225pub use stream_eowc_gap_fill::StreamEowcGapFill;
1226pub use stream_eowc_over_window::StreamEowcOverWindow;
1227pub use stream_exchange::StreamExchange;
1228pub use stream_expand::StreamExpand;
1229pub use stream_filter::StreamFilter;
1230pub use stream_fs_fetch::StreamFsFetch;
1231pub use stream_gap_fill::StreamGapFill;
1232pub use stream_global_approx_percentile::StreamGlobalApproxPercentile;
1233pub use stream_group_topn::StreamGroupTopN;
1234pub use stream_hash_agg::StreamHashAgg;
1235pub use stream_hash_join::StreamHashJoin;
1236pub use stream_hop_window::StreamHopWindow;
1237use stream_join_common::StreamJoinCommon;
1238pub use stream_local_approx_percentile::StreamLocalApproxPercentile;
1239pub use stream_locality_provider::StreamLocalityProvider;
1240pub use stream_materialize::StreamMaterialize;
1241pub use stream_materialized_exprs::StreamMaterializedExprs;
1242pub use stream_now::StreamNow;
1243pub use stream_over_window::StreamOverWindow;
1244pub use stream_project::StreamProject;
1245pub use stream_project_set::StreamProjectSet;
1246pub use stream_row_id_gen::StreamRowIdGen;
1247pub use stream_row_merge::StreamRowMerge;
1248pub use stream_share::StreamShare;
1249pub use stream_simple_agg::StreamSimpleAgg;
1250pub use stream_sink::{IcebergPartitionInfo, PartitionComputeInfo, StreamSink};
1251pub use stream_sort::StreamEowcSort;
1252pub use stream_source::StreamSource;
1253pub use stream_source_scan::StreamSourceScan;
1254pub use stream_stateless_simple_agg::StreamStatelessSimpleAgg;
1255pub use stream_sync_log_store::StreamSyncLogStore;
1256pub use stream_table_scan::StreamTableScan;
1257pub use stream_temporal_join::StreamTemporalJoin;
1258pub use stream_topn::StreamTopN;
1259pub use stream_union::StreamUnion;
1260pub use stream_upstream_sink_union::StreamUpstreamSinkUnion;
1261pub use stream_values::StreamValues;
1262pub use stream_vector_index_lookup_join::StreamVectorIndexLookupJoin;
1263pub use stream_vector_index_write::StreamVectorIndexWrite;
1264pub use stream_watermark_filter::StreamWatermarkFilter;
1265
1266use crate::expr::{ExprImpl, ExprRewriter, ExprVisitor, InputRef, Literal};
1267use crate::optimizer::optimizer_context::OptimizerContextRef;
1268use crate::optimizer::plan_node::expr_visitable::ExprVisitable;
1269use crate::optimizer::plan_rewriter::PlanCloner;
1270use crate::optimizer::plan_visitor::{
1271    DefaultBehavior, DefaultValue, ExprCorrelatedIdFinder, LogicalPlanVisitor,
1272};
1273use crate::scheduler::SchedulerResult;
1274use crate::stream_fragmenter::BuildFragmentGraphState;
1275use crate::utils::{ColIndexMapping, Condition, DynEq, DynHash, Endo, Layer, Visit};
1276
1277/// `for_all_plan_nodes` includes all plan nodes. If you added a new plan node
1278/// inside the project, be sure to add here and in its conventions like `for_logical_plan_nodes`
1279///
1280/// Every tuple has two elements, where `{ convention, name }`
1281/// You can use it as follows
1282/// ```rust
1283/// macro_rules! use_plan {
1284///     ($({ $convention:ident, $name:ident }),*) => {};
1285/// }
1286/// risingwave_frontend::for_all_plan_nodes! { use_plan }
1287/// ```
1288/// See the following implementations for example.
1289#[macro_export]
1290macro_rules! for_all_plan_nodes {
1291    ($macro:path $(,$rest:tt)*) => {
1292        $macro! {
1293              { Logical, Agg }
1294            , { Logical, Apply }
1295            , { Logical, Filter }
1296            , { Logical, Project }
1297            , { Logical, Scan }
1298            , { Logical, CdcScan }
1299            , { Logical, SysScan }
1300            , { Logical, Source }
1301            , { Logical, Insert }
1302            , { Logical, Delete }
1303            , { Logical, Update }
1304            , { Logical, Join }
1305            , { Logical, Values }
1306            , { Logical, Limit }
1307            , { Logical, TopN }
1308            , { Logical, HopWindow }
1309            , { Logical, TableFunction }
1310            , { Logical, MultiJoin }
1311            , { Logical, Expand }
1312            , { Logical, ProjectSet }
1313            , { Logical, Union }
1314            , { Logical, OverWindow }
1315            , { Logical, Share }
1316            , { Logical, Now }
1317            , { Logical, Dedup }
1318            , { Logical, Intersect }
1319            , { Logical, Except }
1320            , { Logical, MaxOneRow }
1321            , { Logical, KafkaScan }
1322            , { Logical, IcebergScan }
1323            , { Logical, RecursiveUnion }
1324            , { Logical, CteRef }
1325            , { Logical, ChangeLog }
1326            , { Logical, FileScan }
1327            , { Logical, PostgresQuery }
1328            , { Logical, MySqlQuery }
1329            , { Logical, GapFill }
1330            , { Logical, VectorSearch }
1331            , { Logical, GetChannelDeltaStats }
1332            , { Logical, LocalityProvider }
1333            , { Logical, VectorSearchLookupJoin }
1334            , { Batch, SimpleAgg }
1335            , { Batch, HashAgg }
1336            , { Batch, SortAgg }
1337            , { Batch, Project }
1338            , { Batch, Filter }
1339            , { Batch, Insert }
1340            , { Batch, Delete }
1341            , { Batch, Update }
1342            , { Batch, SeqScan }
1343            , { Batch, SysSeqScan }
1344            , { Batch, LogSeqScan }
1345            , { Batch, HashJoin }
1346            , { Batch, NestedLoopJoin }
1347            , { Batch, Values }
1348            , { Batch, Sort }
1349            , { Batch, Exchange }
1350            , { Batch, Limit }
1351            , { Batch, TopN }
1352            , { Batch, HopWindow }
1353            , { Batch, TableFunction }
1354            , { Batch, Expand }
1355            , { Batch, LookupJoin }
1356            , { Batch, ProjectSet }
1357            , { Batch, Union }
1358            , { Batch, GroupTopN }
1359            , { Batch, Source }
1360            , { Batch, OverWindow }
1361            , { Batch, MaxOneRow }
1362            , { Batch, KafkaScan }
1363            , { Batch, IcebergScan }
1364            , { Batch, FileScan }
1365            , { Batch, PostgresQuery }
1366            , { Batch, MySqlQuery }
1367            , { Batch, GetChannelDeltaStats }
1368            , { Batch, VectorSearch }
1369            , { Stream, Project }
1370            , { Stream, Filter }
1371            , { Stream, TableScan }
1372            , { Stream, CdcTableScan }
1373            , { Stream, Sink }
1374            , { Stream, Source }
1375            , { Stream, SourceScan }
1376            , { Stream, HashJoin }
1377            , { Stream, Exchange }
1378            , { Stream, HashAgg }
1379            , { Stream, SimpleAgg }
1380            , { Stream, StatelessSimpleAgg }
1381            , { Stream, Materialize }
1382            , { Stream, TopN }
1383            , { Stream, HopWindow }
1384            , { Stream, DeltaJoin }
1385            , { Stream, Expand }
1386            , { Stream, DynamicFilter }
1387            , { Stream, ProjectSet }
1388            , { Stream, GroupTopN }
1389            , { Stream, Union }
1390            , { Stream, RowIdGen }
1391            , { Stream, Dml }
1392            , { Stream, Now }
1393            , { Stream, Share }
1394            , { Stream, WatermarkFilter }
1395            , { Stream, TemporalJoin }
1396            , { Stream, Values }
1397            , { Stream, Dedup }
1398            , { Stream, EowcOverWindow }
1399            , { Stream, EowcSort }
1400            , { Stream, OverWindow }
1401            , { Stream, FsFetch }
1402            , { Stream, ChangeLog }
1403            , { Stream, GlobalApproxPercentile }
1404            , { Stream, LocalApproxPercentile }
1405            , { Stream, RowMerge }
1406            , { Stream, AsOfJoin }
1407            , { Stream, SyncLogStore }
1408            , { Stream, MaterializedExprs }
1409            , { Stream, VectorIndexWrite }
1410            , { Stream, VectorIndexLookupJoin }
1411            , { Stream, UpstreamSinkUnion }
1412            , { Stream, LocalityProvider }
1413            , { Stream, EowcGapFill }
1414            , { Stream, GapFill }
1415            $(,$rest)*
1416        }
1417    };
1418}
1419
1420#[macro_export]
1421macro_rules! for_each_convention_all_plan_nodes {
1422    ($macro:path $(,$rest:tt)*) => {
1423        $crate::for_all_plan_nodes! {
1424            $crate::for_each_convention_all_plan_nodes
1425            , $macro
1426            $(,$rest)*
1427        }
1428    };
1429    (
1430        $( { Logical, $logical_name:ident } ),*
1431        , $( { Batch, $batch_name:ident } ),*
1432        , $( { Stream, $stream_name:ident } ),*
1433        , $macro:path $(,$rest:tt)*
1434    ) => {
1435        $macro! {
1436            {
1437                Logical, { $( $logical_name ),* },
1438                Batch, { $( $batch_name ),* },
1439                Stream, { $( $stream_name ),* }
1440            }
1441            $(,$rest)*
1442        }
1443    }
1444}
1445
1446/// impl `PlanNodeType` fn for each node.
1447macro_rules! impl_plan_node_meta {
1448    ({
1449        $( $convention:ident, { $( $name:ident ),* }),*
1450    }) => {
1451        paste!{
1452            $(
1453                /// each enum value represent a `PlanNode` struct type, help us to dispatch and downcast
1454                #[derive(Copy, Clone, PartialEq, Debug, Hash, Eq, Serialize)]
1455                pub enum [<$convention PlanNodeType>] {
1456                    $( [<$convention $name>] ),*
1457                }
1458            )*
1459            $(
1460                $(impl PlanNodeMeta for [<$convention $name>] {
1461                    type Convention = $convention;
1462                    const NODE_TYPE: [<$convention PlanNodeType>] = [<$convention PlanNodeType>]::[<$convention $name>];
1463
1464                    fn plan_base(&self) -> &PlanBase<$convention> {
1465                        &self.base
1466                    }
1467                }
1468
1469                impl Deref for [<$convention $name>] {
1470                    type Target = PlanBase<$convention>;
1471
1472                    fn deref(&self) -> &Self::Target {
1473                        &self.base
1474                    }
1475                })*
1476            )*
1477        }
1478    }
1479}
1480
1481for_each_convention_all_plan_nodes! { impl_plan_node_meta }
1482
1483macro_rules! impl_plan_node {
1484    ($({ $convention:ident, $name:ident }),*) => {
1485        paste!{
1486            $(impl [<$convention PlanNode>] for [<$convention $name>] { })*
1487        }
1488    }
1489}
1490
1491for_all_plan_nodes! { impl_plan_node }
1492
1493/// impl plan node downcast fn for each node.
1494macro_rules! impl_down_cast_fn {
1495    ({
1496        $( $convention:ident, { $( $name:ident ),* }),*
1497    }) => {
1498        paste!{
1499            $(
1500                impl dyn [<$convention PlanNode>] {
1501                    $( pub fn [< as_ $convention:snake _ $name:snake>](&self) -> Option<&[<$convention $name>]> {
1502                        self.downcast_ref::<[<$convention $name>]>()
1503                    } )*
1504                }
1505            )*
1506        }
1507    }
1508}
1509
1510for_each_convention_all_plan_nodes! { impl_down_cast_fn }