risingwave_frontend/optimizer/plan_node/
stream_cdc_table_scan.rs

1// Copyright 2025 RisingWave Labs
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use itertools::Itertools;
16use pretty_xmlish::{Pretty, XmlNode};
17use risingwave_common::catalog::{ColumnCatalog, Field};
18use risingwave_common::types::DataType;
19use risingwave_common::util::sort_util::OrderType;
20use risingwave_pb::stream_plan::PbStreamNode;
21use risingwave_pb::stream_plan::stream_node::PbNodeBody;
22
23use super::stream::prelude::*;
24use super::utils::{Distill, childless_record};
25use super::{ExprRewritable, PlanBase, PlanRef, StreamNode, generic};
26use crate::catalog::ColumnId;
27use crate::expr::{Expr, ExprImpl, ExprRewriter, ExprType, ExprVisitor, FunctionCall, InputRef};
28use crate::optimizer::plan_node::expr_visitable::ExprVisitable;
29use crate::optimizer::plan_node::utils::{IndicesDisplay, TableCatalogBuilder};
30use crate::optimizer::property::{Distribution, DistributionDisplay};
31use crate::scheduler::SchedulerResult;
32use crate::stream_fragmenter::BuildFragmentGraphState;
33use crate::{Explain, TableCatalog};
34
35/// `StreamCdcTableScan` is a virtual plan node to represent a stream cdc table scan.
36/// It will be converted to cdc backfill + merge node (for upstream source)
37#[derive(Debug, Clone, PartialEq, Eq, Hash)]
38pub struct StreamCdcTableScan {
39    pub base: PlanBase<Stream>,
40    core: generic::CdcScan,
41}
42
43impl StreamCdcTableScan {
44    pub fn new(core: generic::CdcScan) -> Self {
45        let distribution = Distribution::SomeShard;
46        let base = PlanBase::new_stream_with_core(
47            &core,
48            distribution,
49            core.append_only(),
50            false,
51            core.watermark_columns(),
52            core.columns_monotonicity(),
53        );
54        Self { base, core }
55    }
56
57    pub fn table_name(&self) -> &str {
58        &self.core.table_name
59    }
60
61    pub fn core(&self) -> &generic::CdcScan {
62        &self.core
63    }
64
65    /// Build catalog for cdc backfill state
66    /// Right now we only persist whether the backfill is finished and the corresponding cdc offset
67    /// schema: | `split_id` | `pk...` | `backfill_finished` | `row_count` | `cdc_offset` |
68    pub fn build_backfill_state_catalog(
69        &self,
70        state: &mut BuildFragmentGraphState,
71    ) -> TableCatalog {
72        let mut catalog_builder = TableCatalogBuilder::default();
73        let upstream_schema = &self.core.get_table_columns();
74
75        // Use `split_id` as primary key in state table.
76        // Currently we only support single split for cdc backfill.
77        catalog_builder.add_column(&Field::with_name(DataType::Varchar, "split_id"));
78        catalog_builder.add_order_column(0, OrderType::ascending());
79
80        // pk columns
81        for col_order in self.core.primary_key() {
82            let col = &upstream_schema[col_order.column_index];
83            catalog_builder.add_column(&Field::from(col));
84        }
85
86        catalog_builder.add_column(&Field::with_name(DataType::Boolean, "backfill_finished"));
87
88        // `row_count` column, the number of rows read from snapshot
89        catalog_builder.add_column(&Field::with_name(DataType::Int64, "row_count"));
90
91        // The offset is only for observability, not for recovery right now
92        catalog_builder.add_column(&Field::with_name(DataType::Jsonb, "cdc_offset"));
93
94        // leave dist key empty, since the cdc backfill executor is singleton
95        catalog_builder
96            .build(vec![], 1)
97            .with_id(state.gen_table_id_wrapped())
98    }
99}
100
101impl_plan_tree_node_for_leaf! { StreamCdcTableScan }
102
103impl Distill for StreamCdcTableScan {
104    fn distill<'a>(&self) -> XmlNode<'a> {
105        let verbose = self.base.ctx().is_explain_verbose();
106        let mut vec = Vec::with_capacity(4);
107        vec.push(("table", Pretty::from(self.core.table_name.clone())));
108        vec.push(("columns", self.core.columns_pretty(verbose)));
109
110        if verbose {
111            let pk = IndicesDisplay {
112                indices: self.stream_key().unwrap_or_default(),
113                schema: self.base.schema(),
114            };
115            vec.push(("pk", pk.distill()));
116            let dist = Pretty::display(&DistributionDisplay {
117                distribution: self.distribution(),
118                input_schema: self.base.schema(),
119            });
120            vec.push(("dist", dist));
121        }
122
123        childless_record("StreamCdcTableScan", vec)
124    }
125}
126
127impl StreamNode for StreamCdcTableScan {
128    fn to_stream_prost_body(&self, _state: &mut BuildFragmentGraphState) -> PbNodeBody {
129        unreachable!(
130            "stream scan cannot be converted into a prost body -- call `adhoc_to_stream_prost` instead."
131        )
132    }
133}
134
135impl StreamCdcTableScan {
136    /// plan: merge -> filter -> exchange(simple) -> `stream_scan`
137    pub fn adhoc_to_stream_prost(
138        &self,
139        state: &mut BuildFragmentGraphState,
140    ) -> SchedulerResult<PbStreamNode> {
141        use risingwave_pb::stream_plan::*;
142
143        let stream_key = self
144            .stream_key()
145            .unwrap_or_else(|| {
146                panic!(
147                    "should always have a stream key in the stream plan but not, sub plan: {}",
148                    PlanRef::from(self.clone()).explain_to_string()
149                )
150            })
151            .iter()
152            .map(|x| *x as u32)
153            .collect_vec();
154
155        // The schema of the shared cdc source upstream is different from snapshot.
156        let cdc_source_schema = ColumnCatalog::debezium_cdc_source_cols()
157            .into_iter()
158            .map(|c| Field::from(c.column_desc).to_prost())
159            .collect_vec();
160
161        let catalog = self
162            .build_backfill_state_catalog(state)
163            .to_internal_table_prost();
164
165        // We need to pass the id of upstream source job here
166        let upstream_source_id = self.core.cdc_table_desc.source_id.table_id;
167
168        // filter upstream source chunk by the value of `_rw_table_name` column
169        let filter_expr =
170            Self::build_cdc_filter_expr(self.core.cdc_table_desc.external_table_name.as_str());
171
172        let filter_operator_id = self.core.ctx.next_plan_node_id();
173        // The filter node receive chunks in `(payload, _rw_offset, _rw_table_name)` schema
174        let filter_stream_node = StreamNode {
175            operator_id: filter_operator_id.0 as _,
176            input: vec![
177                // The merge node body will be filled by the `ActorBuilder` on the meta service.
178                PbStreamNode {
179                    node_body: Some(PbNodeBody::Merge(Default::default())),
180                    identity: "Upstream".into(),
181                    fields: cdc_source_schema.clone(),
182                    stream_key: vec![], // not used
183                    ..Default::default()
184                },
185            ],
186            stream_key: vec![], // not used
187            append_only: true,
188            identity: "StreamCdcFilter".to_owned(),
189            fields: cdc_source_schema.clone(),
190            node_body: Some(PbNodeBody::CdcFilter(Box::new(CdcFilterNode {
191                search_condition: Some(filter_expr.to_expr_proto()),
192                upstream_source_id,
193            }))),
194        };
195
196        let exchange_operator_id = self.core.ctx.next_plan_node_id();
197        // Add a simple exchange node between filter and stream scan
198        let exchange_stream_node = StreamNode {
199            operator_id: exchange_operator_id.0 as _,
200            input: vec![filter_stream_node],
201            stream_key: vec![], // not used
202            append_only: true,
203            identity: "Exchange".to_owned(),
204            fields: cdc_source_schema.clone(),
205            node_body: Some(PbNodeBody::Exchange(Box::new(ExchangeNode {
206                strategy: Some(DispatchStrategy {
207                    r#type: DispatcherType::Simple as _,
208                    dist_key_indices: vec![], // simple exchange doesn't need dist key
209                    output_indices: (0..cdc_source_schema.len() as u32).collect(),
210                }),
211            }))),
212        };
213
214        // The required columns from the external table
215        let upstream_column_ids = self
216            .core
217            .output_and_pk_column_ids()
218            .iter()
219            .map(ColumnId::get_id)
220            .collect_vec();
221
222        let output_indices = self
223            .core
224            .output_column_ids()
225            .iter()
226            .map(|i| {
227                upstream_column_ids
228                    .iter()
229                    .position(|&x| x == i.get_id())
230                    .unwrap() as u32
231            })
232            .collect_vec();
233
234        tracing::debug!(
235            output_column_ids=?self.core.output_column_ids(),
236            ?upstream_column_ids,
237            ?output_indices,
238            "stream cdc table scan output indices"
239        );
240
241        let options = self.core.options.to_proto();
242        let stream_scan_body = PbNodeBody::StreamCdcScan(Box::new(StreamCdcScanNode {
243            table_id: upstream_source_id,
244            upstream_column_ids,
245            output_indices,
246            // The table desc used by backfill executor
247            state_table: Some(catalog),
248            cdc_table_desc: Some(self.core.cdc_table_desc.to_protobuf()),
249            rate_limit: self.base.ctx().overwrite_options().backfill_rate_limit,
250            disable_backfill: options.disable_backfill,
251            options: Some(options),
252        }));
253
254        // plan: merge -> filter -> exchange(simple) -> stream_scan
255        Ok(PbStreamNode {
256            fields: self.schema().to_prost(),
257            input: vec![exchange_stream_node],
258            node_body: Some(stream_scan_body),
259            stream_key,
260            operator_id: self.base.id().0 as u64,
261            identity: self.distill_to_string(),
262            append_only: self.append_only(),
263        })
264    }
265
266    // The filter node receive input chunks in `(payload, _rw_offset, _rw_table_name)` schema
267    pub fn build_cdc_filter_expr(cdc_table_name: &str) -> ExprImpl {
268        // filter by the `_rw_table_name` column
269        FunctionCall::new(
270            ExprType::Equal,
271            vec![
272                InputRef::new(2, DataType::Varchar).into(),
273                ExprImpl::literal_varchar(cdc_table_name.into()),
274            ],
275        )
276        .unwrap()
277        .into()
278    }
279}
280
281impl ExprRewritable for StreamCdcTableScan {
282    fn has_rewritable_expr(&self) -> bool {
283        true
284    }
285
286    fn rewrite_exprs(&self, r: &mut dyn ExprRewriter) -> PlanRef {
287        let core = self.core.clone();
288        core.rewrite_exprs(r);
289        Self::new(core).into()
290    }
291}
292
293impl ExprVisitable for StreamCdcTableScan {
294    fn visit_exprs(&self, v: &mut dyn ExprVisitor) {
295        self.core.visit_exprs(v);
296    }
297}
298
299#[cfg(test)]
300mod tests {
301    use std::str::FromStr;
302
303    use risingwave_common::row::OwnedRow;
304    use risingwave_common::types::{JsonbVal, ScalarImpl};
305
306    use super::*;
307
308    #[tokio::test]
309    async fn test_cdc_filter_expr() {
310        let t1_json = JsonbVal::from_str(r#"{ "before": null, "after": { "v": 111, "v2": 222.2 }, "source": { "version": "2.2.0.Alpha3", "connector": "mysql", "name": "dbserver1", "ts_ms": 1678428689000, "snapshot": "false", "db": "inventory", "sequence": null, "table": "t1", "server_id": 223344, "gtid": null, "file": "mysql-bin.000003", "pos": 774, "row": 0, "thread": 8, "query": null }, "op": "c", "ts_ms": 1678428689389, "transaction": null }"#).unwrap();
311        let t2_json = JsonbVal::from_str(r#"{ "before": null, "after": { "v": 333, "v2": 666.6 }, "source": { "version": "2.2.0.Alpha3", "connector": "mysql", "name": "dbserver1", "ts_ms": 1678428689000, "snapshot": "false", "db": "inventory", "sequence": null, "table": "t2", "server_id": 223344, "gtid": null, "file": "mysql-bin.000003", "pos": 884, "row": 0, "thread": 8, "query": null }, "op": "c", "ts_ms": 1678428689389, "transaction": null }"#).unwrap();
312
313        // NOTES: transaction metadata column expects to be filtered out before going to cdc filter
314        let trx_json = JsonbVal::from_str(r#"{"data_collections": null, "event_count": null, "id": "35319:3962662584", "status": "BEGIN", "ts_ms": 1704263537068}"#).unwrap();
315        let row1 = OwnedRow::new(vec![
316            Some(t1_json.into()),
317            Some(r#"{"file": "1.binlog", "pos": 100}"#.into()),
318            Some("public.t2".into()),
319        ]);
320        let row2 = OwnedRow::new(vec![
321            Some(t2_json.into()),
322            Some(r#"{"file": "2.binlog", "pos": 100}"#.into()),
323            Some("abs.t2".into()),
324        ]);
325
326        let row3 = OwnedRow::new(vec![
327            Some(trx_json.into()),
328            Some(r#"{"file": "3.binlog", "pos": 100}"#.into()),
329            Some("public.t2".into()),
330        ]);
331
332        let filter_expr = StreamCdcTableScan::build_cdc_filter_expr("public.t2");
333        assert_eq!(
334            filter_expr.eval_row(&row1).await.unwrap(),
335            Some(ScalarImpl::Bool(true))
336        );
337        assert_eq!(
338            filter_expr.eval_row(&row2).await.unwrap(),
339            Some(ScalarImpl::Bool(false))
340        );
341        assert_eq!(
342            filter_expr.eval_row(&row3).await.unwrap(),
343            Some(ScalarImpl::Bool(true))
344        )
345    }
346}