risingwave_frontend/optimizer/plan_node/
mod.rs

1// Copyright 2025 RisingWave Labs
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15//! Defines all kinds of node in the plan tree, each node represent a relational expression.
16//!
17//! We use a immutable style tree structure, every Node are immutable and cannot be modified after
18//! it has been created. If you want to modify the node, such as rewriting the expression in a
19//! `ProjectNode` or changing a node's input node, you need to create a new node. We use Rc as the
20//! node's reference, and a node just storage its inputs' reference, so change a node just need
21//! create one new node but not the entire sub-tree.
22//!
23//! So when you want to add a new node, make sure:
24//! - each field in the node struct are private
25//! - recommend to implement the construction of Node in a unified `new()` function, if have multi
26//!   methods to construct, make they have a consistent behavior
27//! - all field should be valued in construction, so the properties' derivation should be finished
28//!   in the `new()` function.
29
30use std::collections::HashMap;
31use std::fmt::Debug;
32use std::hash::Hash;
33use std::marker::PhantomData;
34use std::ops::Deref;
35use std::rc::Rc;
36
37use downcast_rs::{Downcast, impl_downcast};
38use dyn_clone::DynClone;
39use itertools::Itertools;
40use paste::paste;
41use petgraph::dot::{Config, Dot};
42use petgraph::graph::Graph;
43use pretty_xmlish::{Pretty, PrettyConfig};
44use risingwave_common::catalog::Schema;
45use risingwave_common::util::recursive::{self, Recurse};
46use risingwave_pb::batch_plan::PlanNode as PbBatchPlan;
47use risingwave_pb::stream_plan::StreamNode as PbStreamPlan;
48use serde::Serialize;
49
50use self::batch::BatchPlanNodeMetadata;
51use self::generic::{GenericPlanRef, PhysicalPlanRef};
52use self::stream::StreamPlanNodeMetadata;
53use self::utils::Distill;
54use super::property::{
55    Distribution, FunctionalDependencySet, MonotonicityMap, Order, WatermarkColumns,
56};
57use crate::error::{ErrorCode, Result};
58use crate::optimizer::ExpressionSimplifyRewriter;
59use crate::optimizer::property::StreamKind;
60use crate::session::current::notice_to_user;
61use crate::utils::{PrettySerde, build_graph_from_pretty};
62
63/// A marker trait for different conventions, used for enforcing type safety.
64///
65/// Implementors are [`Logical`], [`Batch`], and [`Stream`].
66pub trait ConventionMarker: 'static + Sized + Clone + Debug + Eq + PartialEq + Hash {
67    /// The extra fields in the [`PlanBase`] of this convention.
68    type Extra: 'static + Eq + Hash + Clone + Debug;
69    type ShareNode: ShareNode<Self>;
70    type PlanRefDyn: PlanNodeCommon<Self> + Eq + Hash + ?Sized;
71    type PlanNodeType;
72
73    fn as_share(plan: &Self::PlanRefDyn) -> Option<&Self::ShareNode>;
74}
75
76pub trait ShareNode<C: ConventionMarker>:
77    AnyPlanNodeMeta<C> + PlanTreeNodeUnary<C> + 'static
78{
79    fn new_share(share: generic::Share<PlanRef<C>>) -> PlanRef<C>;
80    fn replace_input(&self, plan: PlanRef<C>);
81}
82
83pub struct NoShareNode<C: ConventionMarker>(!, PhantomData<C>);
84
85impl<C: ConventionMarker> ShareNode<C> for NoShareNode<C> {
86    fn new_share(_plan: generic::Share<PlanRef<C>>) -> PlanRef<C> {
87        unreachable!()
88    }
89
90    fn replace_input(&self, _plan: PlanRef<C>) {
91        unreachable!()
92    }
93}
94
95impl<C: ConventionMarker> PlanTreeNodeUnary<C> for NoShareNode<C> {
96    fn input(&self) -> PlanRef<C> {
97        unreachable!()
98    }
99
100    fn clone_with_input(&self, _input: PlanRef<C>) -> Self {
101        unreachable!()
102    }
103}
104
105impl<C: ConventionMarker> AnyPlanNodeMeta<C> for NoShareNode<C> {
106    fn node_type(&self) -> C::PlanNodeType {
107        unreachable!()
108    }
109
110    fn plan_base(&self) -> &PlanBase<C> {
111        unreachable!()
112    }
113}
114
115/// The marker for logical convention.
116#[derive(Clone, Debug, Eq, PartialEq, Hash)]
117pub struct Logical;
118impl ConventionMarker for Logical {
119    type Extra = plan_base::NoExtra;
120    type PlanNodeType = LogicalPlanNodeType;
121    type PlanRefDyn = dyn LogicalPlanNode;
122    type ShareNode = LogicalShare;
123
124    fn as_share(plan: &Self::PlanRefDyn) -> Option<&Self::ShareNode> {
125        plan.as_logical_share()
126    }
127}
128
129/// The marker for batch convention.
130#[derive(Clone, Debug, Eq, PartialEq, Hash)]
131pub struct Batch;
132impl ConventionMarker for Batch {
133    type Extra = plan_base::BatchExtra;
134    type PlanNodeType = BatchPlanNodeType;
135    type PlanRefDyn = dyn BatchPlanNode;
136    type ShareNode = NoShareNode<Batch>;
137
138    fn as_share(_plan: &Self::PlanRefDyn) -> Option<&Self::ShareNode> {
139        None
140    }
141}
142
143/// The marker for stream convention.
144#[derive(Clone, Debug, Eq, PartialEq, Hash)]
145pub struct Stream;
146impl ConventionMarker for Stream {
147    type Extra = plan_base::StreamExtra;
148    type PlanNodeType = StreamPlanNodeType;
149    type PlanRefDyn = dyn StreamPlanNode;
150    type ShareNode = StreamShare;
151
152    fn as_share(plan: &Self::PlanRefDyn) -> Option<&Self::ShareNode> {
153        plan.as_stream_share()
154    }
155}
156
157/// The trait for accessing the meta data and [`PlanBase`] for plan nodes.
158pub trait PlanNodeMeta {
159    type Convention: ConventionMarker;
160    const NODE_TYPE: <Self::Convention as ConventionMarker>::PlanNodeType;
161    /// Get the reference to the [`PlanBase`] with corresponding convention.
162    fn plan_base(&self) -> &PlanBase<Self::Convention>;
163}
164
165// Intentionally made private.
166mod plan_node_meta {
167    use super::*;
168
169    /// The object-safe version of [`PlanNodeMeta`], used as a super trait of `PlanNode`.
170    ///
171    /// Check [`PlanNodeMeta`] for more details.
172    pub trait AnyPlanNodeMeta<C: ConventionMarker> {
173        fn node_type(&self) -> C::PlanNodeType;
174        fn plan_base(&self) -> &PlanBase<C>;
175    }
176
177    /// Implement [`AnyPlanNodeMeta`] for all [`PlanNodeMeta`].
178    impl<P> AnyPlanNodeMeta<P::Convention> for P
179    where
180        P: PlanNodeMeta,
181    {
182        fn node_type(&self) -> <P::Convention as ConventionMarker>::PlanNodeType {
183            P::NODE_TYPE
184        }
185
186        fn plan_base(&self) -> &PlanBase<P::Convention> {
187            <Self as PlanNodeMeta>::plan_base(self)
188        }
189    }
190}
191use plan_node_meta::AnyPlanNodeMeta;
192
193pub trait PlanNodeCommon<C: ConventionMarker> = PlanTreeNode<C>
194    + DynClone
195    + DynEq
196    + DynHash
197    + Distill
198    + Debug
199    + Downcast
200    + ExprRewritable<C>
201    + ExprVisitable
202    + AnyPlanNodeMeta<C>;
203
204/// The common trait over all plan nodes. Used by optimizer framework which will treat all node as
205/// `dyn PlanNode`
206///
207/// We split the trait into lots of sub-trait so that we can easily use macro to impl them.
208pub trait StreamPlanNode: PlanNodeCommon<Stream> + TryToStreamPb {}
209pub trait BatchPlanNode:
210    PlanNodeCommon<Batch> + ToDistributedBatch + ToLocalBatch + TryToBatchPb
211{
212}
213pub trait LogicalPlanNode:
214    PlanNodeCommon<Logical> + ColPrunable + PredicatePushdown + ToBatch + ToStream
215{
216}
217
218macro_rules! impl_trait {
219    ($($convention:ident),+) => {
220        paste! {
221            $(
222                impl Hash for dyn [<$convention  PlanNode>] {
223                    fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
224                        self.dyn_hash(state);
225                    }
226                }
227
228                impl PartialEq for dyn [<$convention  PlanNode>] {
229                    fn eq(&self, other: &Self) -> bool {
230                        self.dyn_eq(other.as_dyn_eq())
231                    }
232                }
233
234                impl Eq for dyn [<$convention  PlanNode>] {}
235            )+
236        }
237    };
238}
239
240impl_trait!(Batch, Stream, Logical);
241impl_downcast!(BatchPlanNode);
242impl_downcast!(LogicalPlanNode);
243impl_downcast!(StreamPlanNode);
244
245// Using a new type wrapper allows direct function implementation on `PlanRef`,
246// and we currently need a manual implementation of `PartialEq` for `PlanRef`.
247#[allow(clippy::derived_hash_with_manual_eq)]
248#[derive(Debug, Eq, Hash)]
249pub struct PlanRef<C: ConventionMarker>(Rc<C::PlanRefDyn>);
250
251impl<C: ConventionMarker> Clone for PlanRef<C> {
252    fn clone(&self) -> Self {
253        Self(self.0.clone())
254    }
255}
256
257pub type LogicalPlanRef = PlanRef<Logical>;
258pub type StreamPlanRef = PlanRef<Stream>;
259pub type BatchPlanRef = PlanRef<Batch>;
260
261// Cannot use the derived implementation for now.
262// See https://github.com/rust-lang/rust/issues/31740
263#[allow(clippy::op_ref)]
264impl<C: ConventionMarker> PartialEq for PlanRef<C> {
265    fn eq(&self, other: &Self) -> bool {
266        &self.0 == &other.0
267    }
268}
269
270impl<C: ConventionMarker> Deref for PlanRef<C> {
271    type Target = C::PlanRefDyn;
272
273    fn deref(&self) -> &Self::Target {
274        self.0.deref()
275    }
276}
277
278impl<T: LogicalPlanNode> From<T> for PlanRef<Logical> {
279    fn from(value: T) -> Self {
280        PlanRef(Rc::new(value) as _)
281    }
282}
283
284impl<T: StreamPlanNode> From<T> for PlanRef<Stream> {
285    fn from(value: T) -> Self {
286        PlanRef(Rc::new(value) as _)
287    }
288}
289
290impl<T: BatchPlanNode> From<T> for PlanRef<Batch> {
291    fn from(value: T) -> Self {
292        PlanRef(Rc::new(value) as _)
293    }
294}
295
296impl<C: ConventionMarker> Layer for PlanRef<C> {
297    type Sub = Self;
298
299    fn map<F>(self, f: F) -> Self
300    where
301        F: FnMut(Self::Sub) -> Self::Sub,
302    {
303        self.clone_root_with_inputs(&self.inputs().into_iter().map(f).collect_vec())
304    }
305
306    fn descent<F>(&self, f: F)
307    where
308        F: FnMut(&Self::Sub),
309    {
310        self.inputs().iter().for_each(f);
311    }
312}
313
314#[derive(Clone, Debug, Copy, Serialize, Hash, Eq, PartialEq, PartialOrd, Ord)]
315pub struct PlanNodeId(pub i32);
316
317impl PlanNodeId {
318    pub fn to_stream_node_operator_id(self) -> StreamNodeLocalOperatorId {
319        StreamNodeLocalOperatorId::new(self.0 as _)
320    }
321}
322
323/// A more sophisticated `Endo` taking into account of the DAG structure of `PlanRef`.
324/// In addition to `Endo`, one have to specify the `cached` function
325/// to persist transformed `LogicalShare` and their results,
326/// and the `dag_apply` function will take care to only transform every `LogicalShare` nodes once.
327///
328/// Note: Due to the way super trait is designed in rust,
329/// one need to have separate implementation blocks of `Endo<PlanRef>` and `EndoPlan`.
330/// And conventionally the real transformation `apply` is under `Endo<PlanRef>`,
331/// although one can refer to `dag_apply` in the implementation of `apply`.
332pub trait EndoPlan: Endo<LogicalPlanRef> {
333    // Return the cached result of `plan` if present,
334    // otherwise store and return the value provided by `f`.
335    // Notice that to allow mutable access of `self` in `f`,
336    // we let `f` to take `&mut Self` as its first argument.
337    fn cached<F>(&mut self, plan: LogicalPlanRef, f: F) -> LogicalPlanRef
338    where
339        F: FnMut(&mut Self) -> LogicalPlanRef;
340
341    fn dag_apply(&mut self, plan: LogicalPlanRef) -> LogicalPlanRef {
342        match plan.as_logical_share() {
343            Some(_) => self.cached(plan.clone(), |this| this.tree_apply(plan.clone())),
344            None => self.tree_apply(plan),
345        }
346    }
347}
348
349/// A more sophisticated `Visit` taking into account of the DAG structure of `PlanRef`.
350/// In addition to `Visit`, one have to specify `visited`
351/// to store and report visited `LogicalShare` nodes,
352/// and the `dag_visit` function will take care to only visit every `LogicalShare` nodes once.
353/// See also `EndoPlan`.
354pub trait VisitPlan: Visit<LogicalPlanRef> {
355    // Skip visiting `plan` if visited, otherwise run the traversal provided by `f`.
356    // Notice that to allow mutable access of `self` in `f`,
357    // we let `f` to take `&mut Self` as its first argument.
358    fn visited<F>(&mut self, plan: &LogicalPlanRef, f: F)
359    where
360        F: FnMut(&mut Self);
361
362    fn dag_visit(&mut self, plan: &LogicalPlanRef) {
363        match plan.as_logical_share() {
364            Some(_) => self.visited(plan, |this| this.tree_visit(plan)),
365            None => self.tree_visit(plan),
366        }
367    }
368}
369
370impl<C: ConventionMarker> PlanRef<C> {
371    pub fn rewrite_exprs_recursive(&self, r: &mut impl ExprRewriter) -> PlanRef<C> {
372        let new = self.rewrite_exprs(r);
373        let inputs: Vec<PlanRef<C>> = new
374            .inputs()
375            .iter()
376            .map(|plan_ref| plan_ref.rewrite_exprs_recursive(r))
377            .collect();
378        new.clone_root_with_inputs(&inputs[..])
379    }
380}
381
382pub(crate) trait VisitExprsRecursive {
383    fn visit_exprs_recursive(&self, r: &mut impl ExprVisitor);
384}
385
386impl<C: ConventionMarker> VisitExprsRecursive for PlanRef<C> {
387    fn visit_exprs_recursive(&self, r: &mut impl ExprVisitor) {
388        self.visit_exprs(r);
389        self.inputs()
390            .iter()
391            .for_each(|plan_ref| plan_ref.visit_exprs_recursive(r));
392    }
393}
394
395impl<C: ConventionMarker> PlanRef<C> {
396    pub fn expect_stream_key(&self) -> &[usize] {
397        self.stream_key().unwrap_or_else(|| {
398            panic!(
399                "a stream key is expected but not exist, plan:\n{}",
400                self.explain_to_string()
401            )
402        })
403    }
404}
405
406impl LogicalPlanRef {
407    fn prune_col_inner(
408        &self,
409        required_cols: &[usize],
410        ctx: &mut ColumnPruningContext,
411    ) -> LogicalPlanRef {
412        if let Some(logical_share) = self.as_logical_share() {
413            // Check the share cache first. If cache exists, it means this is the second round of
414            // column pruning.
415            if let Some((new_share, merge_required_cols)) = ctx.get_share_cache(self.id()) {
416                // Piggyback share remove if its has only one parent.
417                if ctx.get_parent_num(logical_share) == 1 {
418                    let input: LogicalPlanRef = logical_share.input();
419                    return input.prune_col(required_cols, ctx);
420                }
421
422                // If it is the first visit, recursively call `prune_col` for its input and
423                // replace it.
424                if ctx.visit_share_at_first_round(self.id()) {
425                    let new_logical_share: &LogicalShare = new_share
426                        .as_logical_share()
427                        .expect("must be share operator");
428                    let new_share_input = new_logical_share.input().prune_col(
429                        &(0..new_logical_share.base.schema().len()).collect_vec(),
430                        ctx,
431                    );
432                    new_logical_share.replace_input(new_share_input);
433                }
434
435                // Calculate the new required columns based on the new share.
436                let new_required_cols: Vec<usize> = required_cols
437                    .iter()
438                    .map(|col| merge_required_cols.iter().position(|x| x == col).unwrap())
439                    .collect_vec();
440                let mapping = ColIndexMapping::with_remaining_columns(
441                    &new_required_cols,
442                    new_share.schema().len(),
443                );
444                return LogicalProject::with_mapping(new_share, mapping).into();
445            }
446
447            // `LogicalShare` can't clone, so we implement column pruning for `LogicalShare`
448            // here.
449            // Basically, we need to wait for all parents of `LogicalShare` to prune columns before
450            // we merge the required columns and prune.
451            let parent_has_pushed = ctx.add_required_cols(self.id(), required_cols.into());
452            if parent_has_pushed == ctx.get_parent_num(logical_share) {
453                let merge_require_cols = ctx
454                    .take_required_cols(self.id())
455                    .expect("must have required columns")
456                    .into_iter()
457                    .flat_map(|x| x.into_iter())
458                    .sorted()
459                    .dedup()
460                    .collect_vec();
461                let input: LogicalPlanRef = logical_share.input();
462                let input = input.prune_col(&merge_require_cols, ctx);
463
464                // Cache the new share operator for the second round.
465                let new_logical_share = LogicalShare::create(input.clone());
466                ctx.add_share_cache(self.id(), new_logical_share, merge_require_cols.clone());
467
468                let exprs = logical_share
469                    .base
470                    .schema()
471                    .fields
472                    .iter()
473                    .enumerate()
474                    .map(|(i, field)| {
475                        if let Some(pos) = merge_require_cols.iter().position(|x| *x == i) {
476                            ExprImpl::InputRef(Box::new(InputRef::new(
477                                pos,
478                                field.data_type.clone(),
479                            )))
480                        } else {
481                            ExprImpl::Literal(Box::new(Literal::new(None, field.data_type.clone())))
482                        }
483                    })
484                    .collect_vec();
485                let project = LogicalProject::create(input, exprs);
486                logical_share.replace_input(project);
487            }
488            let mapping =
489                ColIndexMapping::with_remaining_columns(required_cols, self.schema().len());
490            LogicalProject::with_mapping(self.clone(), mapping).into()
491        } else {
492            // Dispatch to dyn PlanNode instead of PlanRef.
493            let dyn_t = self.deref();
494            dyn_t.prune_col(required_cols, ctx)
495        }
496    }
497
498    fn predicate_pushdown_inner(
499        &self,
500        predicate: Condition,
501        ctx: &mut PredicatePushdownContext,
502    ) -> LogicalPlanRef {
503        if let Some(logical_share) = self.as_logical_share() {
504            // Piggyback share remove if its has only one parent.
505            if ctx.get_parent_num(logical_share) == 1 {
506                let input: LogicalPlanRef = logical_share.input();
507                return input.predicate_pushdown(predicate, ctx);
508            }
509
510            // `LogicalShare` can't clone, so we implement predicate pushdown for `LogicalShare`
511            // here.
512            // Basically, we need to wait for all parents of `LogicalShare` to push down the
513            // predicate before we merge the predicates and pushdown.
514            let parent_has_pushed = ctx.add_predicate(self.id(), predicate.clone());
515            if parent_has_pushed == ctx.get_parent_num(logical_share) {
516                let merge_predicate = ctx
517                    .take_predicate(self.id())
518                    .expect("must have predicate")
519                    .into_iter()
520                    .map(|mut c| Condition {
521                        conjunctions: c
522                            .conjunctions
523                            .extract_if(.., |e| {
524                                // If predicates contain now, impure or correlated input ref, don't push through share operator.
525                                // The predicate with now() function is regarded as a temporal filter predicate, which will be transformed to a temporal filter operator and can not do the OR operation with other predicates.
526                                let mut finder = ExprCorrelatedIdFinder::default();
527                                finder.visit_expr(e);
528                                e.count_nows() == 0
529                                    && e.is_pure()
530                                    && !finder.has_correlated_input_ref()
531                            })
532                            .collect(),
533                    })
534                    .reduce(|a, b| a.or(b))
535                    .unwrap();
536
537                // rewrite the *entire* predicate for `LogicalShare`
538                // before pushing down to whatever plan node(s)
539                // ps: the reason here contains a "special" optimization
540                // rather than directly apply explicit rule in stream or
541                // batch plan optimization, is because predicate push down
542                // will *instantly* push down all predicates, and rule(s)
543                // can not be applied in the middle.
544                // thus we need some on-the-fly (in the middle) rewrite
545                // technique to help with this kind of optimization.
546                let mut expr_rewriter = ExpressionSimplifyRewriter {};
547                let mut new_predicate = Condition::true_cond();
548
549                for c in merge_predicate.conjunctions {
550                    let c = Condition::with_expr(expr_rewriter.rewrite_cond(c));
551                    // rebuild the conjunctions
552                    new_predicate = new_predicate.and(c);
553                }
554
555                let input: LogicalPlanRef = logical_share.input();
556                let input = input.predicate_pushdown(new_predicate, ctx);
557                logical_share.replace_input(input);
558            }
559            LogicalFilter::create(self.clone(), predicate)
560        } else {
561            // Dispatch to dyn PlanNode instead of PlanRef.
562            let dyn_t = self.deref();
563            dyn_t.predicate_pushdown(predicate, ctx)
564        }
565    }
566}
567
568impl ColPrunable for LogicalPlanRef {
569    #[allow(clippy::let_and_return)]
570    fn prune_col(&self, required_cols: &[usize], ctx: &mut ColumnPruningContext) -> LogicalPlanRef {
571        let res = self.prune_col_inner(required_cols, ctx);
572        #[cfg(debug_assertions)]
573        super::heuristic_optimizer::HeuristicOptimizer::check_equivalent_plan(
574            "column pruning",
575            &LogicalProject::with_out_col_idx(self.clone(), required_cols.iter().cloned()).into(),
576            &res,
577        );
578        res
579    }
580}
581
582impl PredicatePushdown for LogicalPlanRef {
583    #[allow(clippy::let_and_return)]
584    fn predicate_pushdown(
585        &self,
586        predicate: Condition,
587        ctx: &mut PredicatePushdownContext,
588    ) -> LogicalPlanRef {
589        #[cfg(debug_assertions)]
590        let predicate_clone = predicate.clone();
591
592        let res = self.predicate_pushdown_inner(predicate, ctx);
593
594        #[cfg(debug_assertions)]
595        super::heuristic_optimizer::HeuristicOptimizer::check_equivalent_plan(
596            "predicate push down",
597            &LogicalFilter::new(self.clone(), predicate_clone).into(),
598            &res,
599        );
600
601        res
602    }
603}
604
605impl<C: ConventionMarker> PlanRef<C> {
606    pub fn clone_root_with_inputs(&self, inputs: &[PlanRef<C>]) -> PlanRef<C> {
607        if let Some(share) = self.as_share_node() {
608            assert_eq!(inputs.len(), 1);
609            // We can't clone `LogicalShare`, but only can replace input instead.
610            share.replace_input(inputs[0].clone());
611            self.clone()
612        } else {
613            // Dispatch to dyn PlanNode instead of PlanRef.
614            let dyn_t = self.deref();
615            dyn_t.clone_with_inputs(inputs)
616        }
617    }
618}
619
620/// Implement again for the `dyn` newtype wrapper.
621impl<C: ConventionMarker> PlanRef<C> {
622    pub fn node_type(&self) -> C::PlanNodeType {
623        self.0.node_type()
624    }
625
626    pub fn plan_base(&self) -> &PlanBase<C> {
627        self.0.plan_base()
628    }
629}
630
631/// Allow access to all fields defined in [`GenericPlanRef`] for the type-erased plan node.
632// TODO: may also implement on `dyn PlanNode` directly.
633impl<C: ConventionMarker> GenericPlanRef for PlanRef<C> {
634    fn id(&self) -> PlanNodeId {
635        self.plan_base().id()
636    }
637
638    fn schema(&self) -> &Schema {
639        self.plan_base().schema()
640    }
641
642    fn stream_key(&self) -> Option<&[usize]> {
643        self.plan_base().stream_key()
644    }
645
646    fn ctx(&self) -> OptimizerContextRef {
647        self.plan_base().ctx()
648    }
649
650    fn functional_dependency(&self) -> &FunctionalDependencySet {
651        self.plan_base().functional_dependency()
652    }
653}
654
655/// Allow access to all fields defined in [`PhysicalPlanRef`] for the type-erased plan node.
656// TODO: may also implement on `dyn PlanNode` directly.
657impl PhysicalPlanRef for BatchPlanRef {
658    fn distribution(&self) -> &Distribution {
659        self.plan_base().distribution()
660    }
661}
662
663impl PhysicalPlanRef for StreamPlanRef {
664    fn distribution(&self) -> &Distribution {
665        self.plan_base().distribution()
666    }
667}
668
669/// Allow access to all fields defined in [`StreamPlanNodeMetadata`] for the type-erased plan node.
670// TODO: may also implement on `dyn PlanNode` directly.
671impl StreamPlanNodeMetadata for StreamPlanRef {
672    fn stream_kind(&self) -> StreamKind {
673        self.plan_base().stream_kind()
674    }
675
676    fn emit_on_window_close(&self) -> bool {
677        self.plan_base().emit_on_window_close()
678    }
679
680    fn watermark_columns(&self) -> &WatermarkColumns {
681        self.plan_base().watermark_columns()
682    }
683
684    fn columns_monotonicity(&self) -> &MonotonicityMap {
685        self.plan_base().columns_monotonicity()
686    }
687}
688
689/// Allow access to all fields defined in [`BatchPlanNodeMetadata`] for the type-erased plan node.
690// TODO: may also implement on `dyn PlanNode` directly.
691impl BatchPlanNodeMetadata for BatchPlanRef {
692    fn order(&self) -> &Order {
693        self.plan_base().order()
694    }
695}
696
697/// In order to let expression display id started from 1 for explaining, hidden column names and
698/// other places. We will reset expression display id to 0 and clone the whole plan to reset the
699/// schema.
700pub fn reorganize_elements_id<C: ConventionMarker>(plan: PlanRef<C>) -> PlanRef<C> {
701    let backup = plan.ctx().backup_elem_ids();
702    plan.ctx().reset_elem_ids();
703    let plan = PlanCloner::clone_whole_plan(plan);
704    plan.ctx().restore_elem_ids(backup);
705    plan
706}
707
708pub trait Explain {
709    /// Write explain the whole plan tree.
710    fn explain<'a>(&self) -> Pretty<'a>;
711
712    /// Write explain the whole plan tree with node id.
713    fn explain_with_id<'a>(&self) -> Pretty<'a>;
714
715    /// Explain the plan node and return a string.
716    fn explain_to_string(&self) -> String;
717
718    /// Explain the plan node and return a json string.
719    fn explain_to_json(&self) -> String;
720
721    /// Explain the plan node and return a xml string.
722    fn explain_to_xml(&self) -> String;
723
724    /// Explain the plan node and return a yaml string.
725    fn explain_to_yaml(&self) -> String;
726
727    /// Explain the plan node and return a dot format string.
728    fn explain_to_dot(&self) -> String;
729}
730
731impl<C: ConventionMarker> Explain for PlanRef<C> {
732    /// Write explain the whole plan tree.
733    fn explain<'a>(&self) -> Pretty<'a> {
734        let mut node = self.distill();
735        let inputs = self.inputs();
736        for input in inputs.iter().peekable() {
737            node.children.push(input.explain());
738        }
739        Pretty::Record(node)
740    }
741
742    /// Write explain the whole plan tree with node id.
743    fn explain_with_id<'a>(&self) -> Pretty<'a> {
744        let node_id = self.id();
745        let mut node = self.distill();
746        // NOTE(kwannoel): Can lead to poor performance if plan is very large,
747        // but we want to show the id first.
748        node.fields
749            .insert(0, ("id".into(), Pretty::display(&node_id.0)));
750        let inputs = self.inputs();
751        for input in inputs.iter().peekable() {
752            node.children.push(input.explain_with_id());
753        }
754        Pretty::Record(node)
755    }
756
757    /// Explain the plan node and return a string.
758    fn explain_to_string(&self) -> String {
759        let plan = reorganize_elements_id(self.clone());
760
761        let mut output = String::with_capacity(2048);
762        let mut config = pretty_config();
763        config.unicode(&mut output, &plan.explain());
764        output
765    }
766
767    /// Explain the plan node and return a json string.
768    fn explain_to_json(&self) -> String {
769        let plan = reorganize_elements_id(self.clone());
770        let explain_ir = plan.explain();
771        serde_json::to_string_pretty(&PrettySerde(explain_ir, true))
772            .expect("failed to serialize plan to json")
773    }
774
775    /// Explain the plan node and return a xml string.
776    fn explain_to_xml(&self) -> String {
777        let plan = reorganize_elements_id(self.clone());
778        let explain_ir = plan.explain();
779        quick_xml::se::to_string(&PrettySerde(explain_ir, true))
780            .expect("failed to serialize plan to xml")
781    }
782
783    /// Explain the plan node and return a yaml string.
784    fn explain_to_yaml(&self) -> String {
785        let plan = reorganize_elements_id(self.clone());
786        let explain_ir = plan.explain();
787        serde_yaml::to_string(&PrettySerde(explain_ir, true))
788            .expect("failed to serialize plan to yaml")
789    }
790
791    /// Explain the plan node and return a dot format string.
792    fn explain_to_dot(&self) -> String {
793        let plan = reorganize_elements_id(self.clone());
794        let explain_ir = plan.explain_with_id();
795        let mut graph = Graph::<String, String>::new();
796        let mut nodes = HashMap::new();
797        build_graph_from_pretty(&explain_ir, &mut graph, &mut nodes, None);
798        let dot = Dot::with_config(&graph, &[Config::EdgeNoLabel]);
799        dot.to_string()
800    }
801}
802
803impl<C: ConventionMarker> PlanRef<C> {
804    pub fn as_share_node(&self) -> Option<&C::ShareNode> {
805        C::as_share(self)
806    }
807}
808
809pub(crate) fn pretty_config() -> PrettyConfig {
810    PrettyConfig {
811        indent: 3,
812        need_boundaries: false,
813        width: 2048,
814        reduced_spaces: true,
815    }
816}
817
818macro_rules! impl_generic_plan_ref_method {
819    ($($convention:ident),+) => {
820        paste! {
821            $(
822                /// Directly implement methods for `PlanNode` to access the fields defined in [`GenericPlanRef`].
823                impl dyn [<$convention PlanNode>] {
824                    pub fn id(&self) -> PlanNodeId {
825                        self.plan_base().id()
826                    }
827
828                    pub fn ctx(&self) -> OptimizerContextRef {
829                        self.plan_base().ctx().clone()
830                    }
831
832                    pub fn schema(&self) -> &Schema {
833                        self.plan_base().schema()
834                    }
835
836                    pub fn stream_key(&self) -> Option<&[usize]> {
837                        self.plan_base().stream_key()
838                    }
839
840                    pub fn functional_dependency(&self) -> &FunctionalDependencySet {
841                        self.plan_base().functional_dependency()
842                    }
843
844                    pub fn explain_myself_to_string(&self) -> String {
845                        self.distill_to_string()
846                    }
847                }
848            )+
849        }
850    };
851}
852
853impl_generic_plan_ref_method!(Batch, Stream, Logical);
854
855/// Recursion depth threshold for plan node visitor to send notice to user.
856pub const PLAN_DEPTH_THRESHOLD: usize = 30;
857/// Notice message for plan node visitor to send to user when the depth threshold is reached.
858pub const PLAN_TOO_DEEP_NOTICE: &str = "The plan is too deep. \
859Consider simplifying or splitting the query if you encounter any issues.";
860
861impl dyn StreamPlanNode {
862    /// Serialize the plan node and its children to a stream plan proto.
863    ///
864    /// Note that some operators has their own implementation of `to_stream_prost`. We have a
865    /// hook inside to do some ad-hoc things.
866    pub fn to_stream_prost(
867        &self,
868        state: &mut BuildFragmentGraphState,
869    ) -> SchedulerResult<PbStreamPlan> {
870        recursive::tracker!().recurse(|t| {
871            if t.depth_reaches(PLAN_DEPTH_THRESHOLD) {
872                notice_to_user(PLAN_TOO_DEEP_NOTICE);
873            }
874
875            use stream::prelude::*;
876
877            if let Some(stream_table_scan) = self.as_stream_table_scan() {
878                return stream_table_scan.adhoc_to_stream_prost(state);
879            }
880            if let Some(stream_cdc_table_scan) = self.as_stream_cdc_table_scan() {
881                return stream_cdc_table_scan.adhoc_to_stream_prost(state);
882            }
883            if let Some(stream_source_scan) = self.as_stream_source_scan() {
884                return stream_source_scan.adhoc_to_stream_prost(state);
885            }
886            if let Some(stream_share) = self.as_stream_share() {
887                return stream_share.adhoc_to_stream_prost(state);
888            }
889
890            let node = Some(self.try_to_stream_prost_body(state)?);
891            let input = self
892                .inputs()
893                .into_iter()
894                .map(|plan| plan.to_stream_prost(state))
895                .try_collect()?;
896            // TODO: support pk_indices and operator_id
897            Ok(PbStreamPlan {
898                input,
899                identity: self.explain_myself_to_string(),
900                node_body: node,
901                operator_id: self.id().to_stream_node_operator_id(),
902                stream_key: self
903                    .stream_key()
904                    .unwrap_or_default()
905                    .iter()
906                    .map(|x| *x as u32)
907                    .collect(),
908                fields: self.schema().to_prost(),
909                stream_kind: self.plan_base().stream_kind().to_protobuf() as i32,
910            })
911        })
912    }
913}
914
915impl dyn BatchPlanNode {
916    /// Serialize the plan node and its children to a batch plan proto.
917    pub fn to_batch_prost(&self) -> SchedulerResult<PbBatchPlan> {
918        self.to_batch_prost_identity(true)
919    }
920
921    /// Serialize the plan node and its children to a batch plan proto without the identity field
922    /// (for testing).
923    pub fn to_batch_prost_identity(&self, identity: bool) -> SchedulerResult<PbBatchPlan> {
924        recursive::tracker!().recurse(|t| {
925            if t.depth_reaches(PLAN_DEPTH_THRESHOLD) {
926                notice_to_user(PLAN_TOO_DEEP_NOTICE);
927            }
928
929            let node_body = Some(self.try_to_batch_prost_body()?);
930            let children = self
931                .inputs()
932                .into_iter()
933                .map(|plan| plan.to_batch_prost_identity(identity))
934                .try_collect()?;
935            Ok(PbBatchPlan {
936                children,
937                identity: if identity {
938                    self.explain_myself_to_string()
939                } else {
940                    "".into()
941                },
942                node_body,
943            })
944        })
945    }
946}
947
948mod plan_base;
949pub use plan_base::*;
950#[macro_use]
951mod plan_tree_node;
952pub use plan_tree_node::*;
953mod col_pruning;
954pub use col_pruning::*;
955mod expr_rewritable;
956pub use expr_rewritable::*;
957mod expr_visitable;
958
959mod convert;
960pub use convert::*;
961mod eq_join_predicate;
962pub use eq_join_predicate::*;
963mod to_prost;
964pub use to_prost::*;
965mod predicate_pushdown;
966pub use predicate_pushdown::*;
967mod merge_eq_nodes;
968pub use merge_eq_nodes::*;
969
970pub mod batch;
971pub mod generic;
972pub mod stream;
973
974pub use generic::{PlanAggCall, PlanAggCallDisplay};
975
976mod batch_delete;
977mod batch_exchange;
978mod batch_expand;
979mod batch_filter;
980mod batch_get_channel_delta_stats;
981mod batch_group_topn;
982mod batch_hash_agg;
983mod batch_hash_join;
984mod batch_hop_window;
985mod batch_insert;
986mod batch_limit;
987mod batch_log_seq_scan;
988mod batch_lookup_join;
989mod batch_max_one_row;
990mod batch_nested_loop_join;
991mod batch_over_window;
992mod batch_project;
993mod batch_project_set;
994mod batch_seq_scan;
995mod batch_simple_agg;
996mod batch_sort;
997mod batch_sort_agg;
998mod batch_source;
999mod batch_sys_seq_scan;
1000mod batch_table_function;
1001mod batch_topn;
1002mod batch_union;
1003mod batch_update;
1004mod batch_values;
1005mod logical_agg;
1006mod logical_apply;
1007mod logical_cdc_scan;
1008mod logical_changelog;
1009mod logical_cte_ref;
1010mod logical_dedup;
1011mod logical_delete;
1012mod logical_except;
1013mod logical_expand;
1014mod logical_filter;
1015mod logical_gap_fill;
1016mod logical_get_channel_delta_stats;
1017mod logical_hop_window;
1018mod logical_insert;
1019mod logical_intersect;
1020mod logical_join;
1021mod logical_kafka_scan;
1022mod logical_limit;
1023mod logical_locality_provider;
1024mod logical_max_one_row;
1025mod logical_multi_join;
1026mod logical_now;
1027mod logical_over_window;
1028mod logical_project;
1029mod logical_project_set;
1030mod logical_recursive_union;
1031mod logical_scan;
1032mod logical_share;
1033mod logical_source;
1034mod logical_sys_scan;
1035mod logical_table_function;
1036mod logical_topn;
1037mod logical_union;
1038mod logical_update;
1039mod logical_values;
1040mod stream_asof_join;
1041mod stream_changelog;
1042mod stream_dedup;
1043mod stream_delta_join;
1044mod stream_dml;
1045mod stream_dynamic_filter;
1046mod stream_eowc_gap_fill;
1047mod stream_eowc_over_window;
1048mod stream_exchange;
1049mod stream_expand;
1050mod stream_filter;
1051mod stream_fs_fetch;
1052mod stream_gap_fill;
1053mod stream_global_approx_percentile;
1054mod stream_group_topn;
1055mod stream_hash_agg;
1056mod stream_hash_join;
1057mod stream_hop_window;
1058mod stream_join_common;
1059mod stream_local_approx_percentile;
1060mod stream_locality_provider;
1061mod stream_materialize;
1062mod stream_materialized_exprs;
1063mod stream_now;
1064mod stream_over_window;
1065mod stream_project;
1066mod stream_project_set;
1067mod stream_row_id_gen;
1068mod stream_row_merge;
1069mod stream_simple_agg;
1070mod stream_sink;
1071mod stream_sort;
1072mod stream_source;
1073mod stream_source_scan;
1074mod stream_stateless_simple_agg;
1075mod stream_sync_log_store;
1076mod stream_table_scan;
1077mod stream_topn;
1078mod stream_union;
1079mod stream_values;
1080mod stream_watermark_filter;
1081
1082mod batch_file_scan;
1083mod batch_iceberg_scan;
1084mod batch_kafka_scan;
1085mod batch_postgres_query;
1086
1087mod batch_mysql_query;
1088mod derive;
1089mod logical_file_scan;
1090mod logical_iceberg_scan;
1091mod logical_postgres_query;
1092
1093mod batch_vector_search;
1094mod logical_mysql_query;
1095mod logical_vector_search;
1096mod logical_vector_search_lookup_join;
1097mod stream_cdc_table_scan;
1098mod stream_share;
1099mod stream_temporal_join;
1100mod stream_upstream_sink_union;
1101mod stream_vector_index_lookup_join;
1102mod stream_vector_index_write;
1103pub mod utils;
1104
1105pub use batch_delete::BatchDelete;
1106pub use batch_exchange::BatchExchange;
1107pub use batch_expand::BatchExpand;
1108pub use batch_file_scan::BatchFileScan;
1109pub use batch_filter::BatchFilter;
1110pub use batch_get_channel_delta_stats::BatchGetChannelDeltaStats;
1111pub use batch_group_topn::BatchGroupTopN;
1112pub use batch_hash_agg::BatchHashAgg;
1113pub use batch_hash_join::BatchHashJoin;
1114pub use batch_hop_window::BatchHopWindow;
1115pub use batch_iceberg_scan::BatchIcebergScan;
1116pub use batch_insert::BatchInsert;
1117pub use batch_kafka_scan::BatchKafkaScan;
1118pub use batch_limit::BatchLimit;
1119pub use batch_log_seq_scan::BatchLogSeqScan;
1120pub use batch_lookup_join::BatchLookupJoin;
1121pub use batch_max_one_row::BatchMaxOneRow;
1122pub use batch_mysql_query::BatchMySqlQuery;
1123pub use batch_nested_loop_join::BatchNestedLoopJoin;
1124pub use batch_over_window::BatchOverWindow;
1125pub use batch_postgres_query::BatchPostgresQuery;
1126pub use batch_project::BatchProject;
1127pub use batch_project_set::BatchProjectSet;
1128pub use batch_seq_scan::BatchSeqScan;
1129pub use batch_simple_agg::BatchSimpleAgg;
1130pub use batch_sort::BatchSort;
1131pub use batch_sort_agg::BatchSortAgg;
1132pub use batch_source::BatchSource;
1133pub use batch_sys_seq_scan::BatchSysSeqScan;
1134pub use batch_table_function::BatchTableFunction;
1135pub use batch_topn::BatchTopN;
1136pub use batch_union::BatchUnion;
1137pub use batch_update::BatchUpdate;
1138pub use batch_values::BatchValues;
1139pub use batch_vector_search::BatchVectorSearch;
1140pub use logical_agg::LogicalAgg;
1141pub use logical_apply::LogicalApply;
1142pub use logical_cdc_scan::LogicalCdcScan;
1143pub use logical_changelog::LogicalChangeLog;
1144pub use logical_cte_ref::LogicalCteRef;
1145pub use logical_dedup::LogicalDedup;
1146pub use logical_delete::LogicalDelete;
1147pub use logical_except::LogicalExcept;
1148pub use logical_expand::LogicalExpand;
1149pub use logical_file_scan::LogicalFileScan;
1150pub use logical_filter::LogicalFilter;
1151pub use logical_gap_fill::LogicalGapFill;
1152pub use logical_get_channel_delta_stats::LogicalGetChannelDeltaStats;
1153pub use logical_hop_window::LogicalHopWindow;
1154pub use logical_iceberg_scan::LogicalIcebergScan;
1155pub use logical_insert::LogicalInsert;
1156pub use logical_intersect::LogicalIntersect;
1157pub use logical_join::LogicalJoin;
1158pub use logical_kafka_scan::LogicalKafkaScan;
1159pub use logical_limit::LogicalLimit;
1160pub use logical_locality_provider::LogicalLocalityProvider;
1161pub use logical_max_one_row::LogicalMaxOneRow;
1162pub use logical_multi_join::{LogicalMultiJoin, LogicalMultiJoinBuilder};
1163pub use logical_mysql_query::LogicalMySqlQuery;
1164pub use logical_now::LogicalNow;
1165pub use logical_over_window::LogicalOverWindow;
1166pub use logical_postgres_query::LogicalPostgresQuery;
1167pub use logical_project::LogicalProject;
1168pub use logical_project_set::LogicalProjectSet;
1169pub use logical_recursive_union::LogicalRecursiveUnion;
1170pub use logical_scan::LogicalScan;
1171pub use logical_share::LogicalShare;
1172pub use logical_source::LogicalSource;
1173pub use logical_sys_scan::LogicalSysScan;
1174pub use logical_table_function::LogicalTableFunction;
1175pub use logical_topn::LogicalTopN;
1176pub use logical_union::LogicalUnion;
1177pub use logical_update::LogicalUpdate;
1178pub use logical_values::LogicalValues;
1179pub use logical_vector_search::LogicalVectorSearch;
1180pub use logical_vector_search_lookup_join::LogicalVectorSearchLookupJoin;
1181use risingwave_pb::id::StreamNodeLocalOperatorId;
1182pub use stream_asof_join::StreamAsOfJoin;
1183pub use stream_cdc_table_scan::StreamCdcTableScan;
1184pub use stream_changelog::StreamChangeLog;
1185pub use stream_dedup::StreamDedup;
1186pub use stream_delta_join::StreamDeltaJoin;
1187pub use stream_dml::StreamDml;
1188pub use stream_dynamic_filter::StreamDynamicFilter;
1189pub use stream_eowc_gap_fill::StreamEowcGapFill;
1190pub use stream_eowc_over_window::StreamEowcOverWindow;
1191pub use stream_exchange::StreamExchange;
1192pub use stream_expand::StreamExpand;
1193pub use stream_filter::StreamFilter;
1194pub use stream_fs_fetch::StreamFsFetch;
1195pub use stream_gap_fill::StreamGapFill;
1196pub use stream_global_approx_percentile::StreamGlobalApproxPercentile;
1197pub use stream_group_topn::StreamGroupTopN;
1198pub use stream_hash_agg::StreamHashAgg;
1199pub use stream_hash_join::StreamHashJoin;
1200pub use stream_hop_window::StreamHopWindow;
1201use stream_join_common::StreamJoinCommon;
1202pub use stream_local_approx_percentile::StreamLocalApproxPercentile;
1203pub use stream_locality_provider::StreamLocalityProvider;
1204pub use stream_materialize::StreamMaterialize;
1205pub use stream_materialized_exprs::StreamMaterializedExprs;
1206pub use stream_now::StreamNow;
1207pub use stream_over_window::StreamOverWindow;
1208pub use stream_project::StreamProject;
1209pub use stream_project_set::StreamProjectSet;
1210pub use stream_row_id_gen::StreamRowIdGen;
1211pub use stream_row_merge::StreamRowMerge;
1212pub use stream_share::StreamShare;
1213pub use stream_simple_agg::StreamSimpleAgg;
1214pub use stream_sink::{IcebergPartitionInfo, PartitionComputeInfo, StreamSink};
1215pub use stream_sort::StreamEowcSort;
1216pub use stream_source::StreamSource;
1217pub use stream_source_scan::StreamSourceScan;
1218pub use stream_stateless_simple_agg::StreamStatelessSimpleAgg;
1219pub use stream_sync_log_store::StreamSyncLogStore;
1220pub use stream_table_scan::StreamTableScan;
1221pub use stream_temporal_join::StreamTemporalJoin;
1222pub use stream_topn::StreamTopN;
1223pub use stream_union::StreamUnion;
1224pub use stream_upstream_sink_union::StreamUpstreamSinkUnion;
1225pub use stream_values::StreamValues;
1226pub use stream_vector_index_lookup_join::StreamVectorIndexLookupJoin;
1227pub use stream_vector_index_write::StreamVectorIndexWrite;
1228pub use stream_watermark_filter::StreamWatermarkFilter;
1229
1230use crate::expr::{ExprImpl, ExprRewriter, ExprVisitor, InputRef, Literal};
1231use crate::optimizer::optimizer_context::OptimizerContextRef;
1232use crate::optimizer::plan_node::expr_visitable::ExprVisitable;
1233use crate::optimizer::plan_rewriter::PlanCloner;
1234use crate::optimizer::plan_visitor::ExprCorrelatedIdFinder;
1235use crate::scheduler::SchedulerResult;
1236use crate::stream_fragmenter::BuildFragmentGraphState;
1237use crate::utils::{ColIndexMapping, Condition, DynEq, DynHash, Endo, Layer, Visit};
1238
1239/// `for_all_plan_nodes` includes all plan nodes. If you added a new plan node
1240/// inside the project, be sure to add here and in its conventions like `for_logical_plan_nodes`
1241///
1242/// Every tuple has two elements, where `{ convention, name }`
1243/// You can use it as follows
1244/// ```rust
1245/// macro_rules! use_plan {
1246///     ($({ $convention:ident, $name:ident }),*) => {};
1247/// }
1248/// risingwave_frontend::for_all_plan_nodes! { use_plan }
1249/// ```
1250/// See the following implementations for example.
1251#[macro_export]
1252macro_rules! for_all_plan_nodes {
1253    ($macro:path $(,$rest:tt)*) => {
1254        $macro! {
1255              { Logical, Agg }
1256            , { Logical, Apply }
1257            , { Logical, Filter }
1258            , { Logical, Project }
1259            , { Logical, Scan }
1260            , { Logical, CdcScan }
1261            , { Logical, SysScan }
1262            , { Logical, Source }
1263            , { Logical, Insert }
1264            , { Logical, Delete }
1265            , { Logical, Update }
1266            , { Logical, Join }
1267            , { Logical, Values }
1268            , { Logical, Limit }
1269            , { Logical, TopN }
1270            , { Logical, HopWindow }
1271            , { Logical, TableFunction }
1272            , { Logical, MultiJoin }
1273            , { Logical, Expand }
1274            , { Logical, ProjectSet }
1275            , { Logical, Union }
1276            , { Logical, OverWindow }
1277            , { Logical, Share }
1278            , { Logical, Now }
1279            , { Logical, Dedup }
1280            , { Logical, Intersect }
1281            , { Logical, Except }
1282            , { Logical, MaxOneRow }
1283            , { Logical, KafkaScan }
1284            , { Logical, IcebergScan }
1285            , { Logical, RecursiveUnion }
1286            , { Logical, CteRef }
1287            , { Logical, ChangeLog }
1288            , { Logical, FileScan }
1289            , { Logical, PostgresQuery }
1290            , { Logical, MySqlQuery }
1291            , { Logical, GapFill }
1292            , { Logical, VectorSearch }
1293            , { Logical, GetChannelDeltaStats }
1294            , { Logical, LocalityProvider }
1295            , { Logical, VectorSearchLookupJoin }
1296            , { Batch, SimpleAgg }
1297            , { Batch, HashAgg }
1298            , { Batch, SortAgg }
1299            , { Batch, Project }
1300            , { Batch, Filter }
1301            , { Batch, Insert }
1302            , { Batch, Delete }
1303            , { Batch, Update }
1304            , { Batch, SeqScan }
1305            , { Batch, SysSeqScan }
1306            , { Batch, LogSeqScan }
1307            , { Batch, HashJoin }
1308            , { Batch, NestedLoopJoin }
1309            , { Batch, Values }
1310            , { Batch, Sort }
1311            , { Batch, Exchange }
1312            , { Batch, Limit }
1313            , { Batch, TopN }
1314            , { Batch, HopWindow }
1315            , { Batch, TableFunction }
1316            , { Batch, Expand }
1317            , { Batch, LookupJoin }
1318            , { Batch, ProjectSet }
1319            , { Batch, Union }
1320            , { Batch, GroupTopN }
1321            , { Batch, Source }
1322            , { Batch, OverWindow }
1323            , { Batch, MaxOneRow }
1324            , { Batch, KafkaScan }
1325            , { Batch, IcebergScan }
1326            , { Batch, FileScan }
1327            , { Batch, PostgresQuery }
1328            , { Batch, MySqlQuery }
1329            , { Batch, GetChannelDeltaStats }
1330            , { Batch, VectorSearch }
1331            , { Stream, Project }
1332            , { Stream, Filter }
1333            , { Stream, TableScan }
1334            , { Stream, CdcTableScan }
1335            , { Stream, Sink }
1336            , { Stream, Source }
1337            , { Stream, SourceScan }
1338            , { Stream, HashJoin }
1339            , { Stream, Exchange }
1340            , { Stream, HashAgg }
1341            , { Stream, SimpleAgg }
1342            , { Stream, StatelessSimpleAgg }
1343            , { Stream, Materialize }
1344            , { Stream, TopN }
1345            , { Stream, HopWindow }
1346            , { Stream, DeltaJoin }
1347            , { Stream, Expand }
1348            , { Stream, DynamicFilter }
1349            , { Stream, ProjectSet }
1350            , { Stream, GroupTopN }
1351            , { Stream, Union }
1352            , { Stream, RowIdGen }
1353            , { Stream, Dml }
1354            , { Stream, Now }
1355            , { Stream, Share }
1356            , { Stream, WatermarkFilter }
1357            , { Stream, TemporalJoin }
1358            , { Stream, Values }
1359            , { Stream, Dedup }
1360            , { Stream, EowcOverWindow }
1361            , { Stream, EowcSort }
1362            , { Stream, OverWindow }
1363            , { Stream, FsFetch }
1364            , { Stream, ChangeLog }
1365            , { Stream, GlobalApproxPercentile }
1366            , { Stream, LocalApproxPercentile }
1367            , { Stream, RowMerge }
1368            , { Stream, AsOfJoin }
1369            , { Stream, SyncLogStore }
1370            , { Stream, MaterializedExprs }
1371            , { Stream, VectorIndexWrite }
1372            , { Stream, VectorIndexLookupJoin }
1373            , { Stream, UpstreamSinkUnion }
1374            , { Stream, LocalityProvider }
1375            , { Stream, EowcGapFill }
1376            , { Stream, GapFill }
1377            $(,$rest)*
1378        }
1379    };
1380}
1381
1382#[macro_export]
1383macro_rules! for_each_convention_all_plan_nodes {
1384    ($macro:path $(,$rest:tt)*) => {
1385        $crate::for_all_plan_nodes! {
1386            $crate::for_each_convention_all_plan_nodes
1387            , $macro
1388            $(,$rest)*
1389        }
1390    };
1391    (
1392        $( { Logical, $logical_name:ident } ),*
1393        , $( { Batch, $batch_name:ident } ),*
1394        , $( { Stream, $stream_name:ident } ),*
1395        , $macro:path $(,$rest:tt)*
1396    ) => {
1397        $macro! {
1398            {
1399                Logical, { $( $logical_name ),* },
1400                Batch, { $( $batch_name ),* },
1401                Stream, { $( $stream_name ),* }
1402            }
1403            $(,$rest)*
1404        }
1405    }
1406}
1407
1408/// impl `PlanNodeType` fn for each node.
1409macro_rules! impl_plan_node_meta {
1410    ({
1411        $( $convention:ident, { $( $name:ident ),* }),*
1412    }) => {
1413        paste!{
1414            $(
1415                /// each enum value represent a `PlanNode` struct type, help us to dispatch and downcast
1416                #[derive(Copy, Clone, PartialEq, Debug, Hash, Eq, Serialize)]
1417                pub enum [<$convention PlanNodeType>] {
1418                    $( [<$convention $name>] ),*
1419                }
1420            )*
1421            $(
1422                $(impl PlanNodeMeta for [<$convention $name>] {
1423                    type Convention = $convention;
1424                    const NODE_TYPE: [<$convention PlanNodeType>] = [<$convention PlanNodeType>]::[<$convention $name>];
1425
1426                    fn plan_base(&self) -> &PlanBase<$convention> {
1427                        &self.base
1428                    }
1429                }
1430
1431                impl Deref for [<$convention $name>] {
1432                    type Target = PlanBase<$convention>;
1433
1434                    fn deref(&self) -> &Self::Target {
1435                        &self.base
1436                    }
1437                })*
1438            )*
1439        }
1440    }
1441}
1442
1443for_each_convention_all_plan_nodes! { impl_plan_node_meta }
1444
1445macro_rules! impl_plan_node {
1446    ($({ $convention:ident, $name:ident }),*) => {
1447        paste!{
1448            $(impl [<$convention PlanNode>] for [<$convention $name>] { })*
1449        }
1450    }
1451}
1452
1453for_all_plan_nodes! { impl_plan_node }
1454
1455/// impl plan node downcast fn for each node.
1456macro_rules! impl_down_cast_fn {
1457    ({
1458        $( $convention:ident, { $( $name:ident ),* }),*
1459    }) => {
1460        paste!{
1461            $(
1462                impl dyn [<$convention PlanNode>] {
1463                    $( pub fn [< as_ $convention:snake _ $name:snake>](&self) -> Option<&[<$convention $name>]> {
1464                        self.downcast_ref::<[<$convention $name>]>()
1465                    } )*
1466                }
1467            )*
1468        }
1469    }
1470}
1471
1472for_each_convention_all_plan_nodes! { impl_down_cast_fn }