risingwave_stream/executor/source/
reader_stream.rs

1// Copyright 2025 RisingWave Labs
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use std::collections::HashMap;
16use std::time::Duration;
17
18use itertools::Itertools;
19use risingwave_common::catalog::{ColumnId, TableId};
20use risingwave_common::metrics::GLOBAL_ERROR_METRICS;
21use risingwave_connector::parser::schema_change::SchemaChangeEnvelope;
22use risingwave_connector::source::reader::desc::SourceDesc;
23use risingwave_connector::source::{
24    BoxSourceChunkStream, CdcAutoSchemaChangeFailCallback, ConnectorState, CreateSplitReaderResult,
25    SourceContext, SourceCtrlOpts, SplitMetaData, StreamChunkWithState,
26};
27use thiserror_ext::AsReport;
28use tokio::sync::{mpsc, oneshot};
29
30use super::{apply_rate_limit, get_split_offset_col_idx};
31use crate::common::rate_limit::limited_chunk_size;
32use crate::executor::prelude::*;
33
34type AutoSchemaChangeSetup = (
35    Option<mpsc::Sender<(SchemaChangeEnvelope, oneshot::Sender<()>)>>,
36    Option<CdcAutoSchemaChangeFailCallback>,
37);
38
39pub(crate) struct StreamReaderBuilder {
40    pub source_desc: SourceDesc,
41    pub rate_limit: Option<u32>,
42    pub source_id: TableId,
43    pub source_name: String,
44    pub reader_stream: Option<BoxSourceChunkStream>,
45
46    // cdc related
47    pub is_auto_schema_change_enable: bool,
48    pub actor_ctx: ActorContextRef,
49}
50
51impl StreamReaderBuilder {
52    fn setup_auto_schema_change(&self) -> AutoSchemaChangeSetup {
53        if self.is_auto_schema_change_enable {
54            let (schema_change_tx, mut schema_change_rx) =
55                mpsc::channel::<(SchemaChangeEnvelope, oneshot::Sender<()>)>(16);
56            let meta_client = self.actor_ctx.meta_client.clone();
57            // spawn a task to handle schema change event from source parser
58            let _join_handle = tokio::task::spawn(async move {
59                while let Some((schema_change, finish_tx)) = schema_change_rx.recv().await {
60                    let table_ids = schema_change.table_ids();
61                    tracing::info!(
62                        target: "auto_schema_change",
63                        "recv a schema change event for tables: {:?}", table_ids);
64                    // TODO: retry on rpc error
65                    if let Some(ref meta_client) = meta_client {
66                        match meta_client
67                            .auto_schema_change(schema_change.to_protobuf())
68                            .await
69                        {
70                            Ok(_) => {
71                                tracing::info!(
72                                    target: "auto_schema_change",
73                                    "schema change success for tables: {:?}", table_ids);
74                                finish_tx.send(()).unwrap();
75                            }
76                            Err(e) => {
77                                tracing::error!(
78                                    target: "auto_schema_change",
79                                    error = %e.as_report(), "schema change error");
80
81                                finish_tx.send(()).unwrap();
82                            }
83                        }
84                    }
85                }
86            });
87
88            // Create callback function for reporting CDC auto schema change fail events
89            let on_cdc_auto_schema_change_failure = if let Some(ref meta_client) =
90                self.actor_ctx.meta_client
91            {
92                let meta_client = meta_client.clone();
93                let source_id = self.source_id;
94                Some(CdcAutoSchemaChangeFailCallback::new(
95                    move |table_id: u32,
96                          table_name: String,
97                          cdc_table_id: String,
98                          upstream_ddl: String,
99                          fail_info: String| {
100                        let meta_client = meta_client.clone();
101                        let source_id = source_id;
102                        tokio::spawn(async move {
103                            if let Err(e) = meta_client
104                                .add_cdc_auto_schema_change_fail_event(
105                                    table_id,
106                                    table_name,
107                                    cdc_table_id,
108                                    upstream_ddl,
109                                    fail_info,
110                                )
111                                .await
112                            {
113                                tracing::warn!(
114                                    error = %e.as_report(),
115                                    source_id = source_id.table_id,
116                                    "Failed to add CDC auto schema change fail event to event log."
117                                );
118                            }
119                        });
120                    },
121                ))
122            } else {
123                None
124            };
125
126            (Some(schema_change_tx), on_cdc_auto_schema_change_failure)
127        } else {
128            info!("auto schema change is disabled in config");
129            (None, None)
130        }
131    }
132
133    fn prepare_source_stream_build(&self) -> (Vec<ColumnId>, SourceContext) {
134        let column_ids = self
135            .source_desc
136            .columns
137            .iter()
138            .map(|column_desc| column_desc.column_id)
139            .collect_vec();
140
141        let (schema_change_tx, on_cdc_auto_schema_change_failure) = self.setup_auto_schema_change();
142
143        let source_ctx = SourceContext::new_with_auto_schema_change_callback(
144            self.actor_ctx.id,
145            self.source_id,
146            self.actor_ctx.fragment_id,
147            self.source_name.clone(),
148            self.source_desc.metrics.clone(),
149            SourceCtrlOpts {
150                chunk_size: limited_chunk_size(self.rate_limit),
151                split_txn: self.rate_limit.is_some(), // when rate limiting, we may split txn
152            },
153            self.source_desc.source.config.clone(),
154            schema_change_tx,
155            on_cdc_auto_schema_change_failure,
156        );
157
158        (column_ids, source_ctx)
159    }
160
161    pub(crate) async fn fetch_latest_splits(
162        &mut self,
163        state: ConnectorState,
164        seek_to_latest: bool,
165    ) -> StreamExecutorResult<CreateSplitReaderResult> {
166        let (column_ids, source_ctx) = self.prepare_source_stream_build();
167        let source_ctx_ref = Arc::new(source_ctx);
168        let (stream, res) = self
169            .source_desc
170            .source
171            .build_stream(
172                state.clone(),
173                column_ids.clone(),
174                source_ctx_ref.clone(),
175                seek_to_latest,
176            )
177            .await
178            .map_err(StreamExecutorError::connector_error)?;
179        self.reader_stream = Some(stream);
180        Ok(res)
181    }
182
183    #[try_stream(ok = StreamChunkWithState, error = StreamExecutorError)]
184    pub(crate) async fn into_retry_stream(mut self, state: ConnectorState, is_initial_build: bool) {
185        let (column_ids, source_ctx) = self.prepare_source_stream_build();
186        let source_ctx_ref = Arc::new(source_ctx);
187
188        let mut latest_splits_info = {
189            if let Some(splits) = state.as_ref() {
190                splits
191                    .iter()
192                    .map(|split| (split.id(), split.clone()))
193                    .collect::<HashMap<_, _>>()
194            } else {
195                HashMap::new()
196            }
197        };
198
199        let (Some(split_idx), Some(offset_idx)) =
200            get_split_offset_col_idx(&self.source_desc.columns)
201        else {
202            unreachable!("Partition and offset columns must be set.");
203        };
204
205        'build_consume_loop: loop {
206            let bootstrap_state = if latest_splits_info.is_empty() {
207                None
208            } else {
209                Some(latest_splits_info.values().cloned().collect_vec())
210            };
211            tracing::debug!(
212                "build stream source reader with state: {:?}",
213                bootstrap_state
214            );
215            let build_stream_result = if let Some(exist_stream) = self.reader_stream.take() {
216                Ok((exist_stream, CreateSplitReaderResult::default()))
217            } else {
218                self.source_desc
219                    .source
220                    .build_stream(
221                        bootstrap_state,
222                        column_ids.clone(),
223                        source_ctx_ref.clone(),
224                        // just `seek_to_latest` for initial build
225                        is_initial_build,
226                    )
227                    .await
228            };
229            if let Err(e) = build_stream_result {
230                if is_initial_build {
231                    return Err(StreamExecutorError::connector_error(e));
232                } else {
233                    tracing::error!(
234                        error = %e.as_report(),
235                        source_name = self.source_name,
236                        source_id = self.source_id.table_id,
237                        actor_id = self.actor_ctx.id,
238                        "build stream source reader error, retry in 1s"
239                    );
240                    GLOBAL_ERROR_METRICS.user_source_error.report([
241                        e.variant_name().to_owned(),
242                        self.source_id.table_id.to_string(),
243                        self.source_name.to_owned(),
244                        self.actor_ctx.fragment_id.to_string(),
245                    ]);
246                    tokio::time::sleep(Duration::from_secs(1)).await;
247                    continue 'build_consume_loop;
248                }
249            }
250
251            let (stream, _) = build_stream_result.unwrap();
252            let stream = apply_rate_limit(stream, self.rate_limit).boxed();
253            let mut is_error = false;
254            #[for_await]
255            'consume: for msg in stream {
256                match msg {
257                    Ok(msg) => {
258                        for (_, row) in msg.rows() {
259                            let split = row.datum_at(split_idx).unwrap().into_utf8();
260                            let offset = row.datum_at(offset_idx).unwrap().into_utf8();
261                            latest_splits_info
262                                .get_mut(&Arc::from(split.to_owned()))
263                                .map(|split_impl| split_impl.update_in_place(offset.to_owned()));
264                        }
265                        yield (msg, latest_splits_info.clone());
266                    }
267                    Err(e) => {
268                        tracing::error!(
269                            error = %e.as_report(),
270                            source_name = self.source_name,
271                            source_id = self.source_id.table_id,
272                            actor_id = self.actor_ctx.id,
273                            "stream source reader error"
274                        );
275                        GLOBAL_ERROR_METRICS.user_source_error.report([
276                            e.variant_name().to_owned(),
277                            self.source_id.table_id.to_string(),
278                            self.source_name.to_owned(),
279                            self.actor_ctx.fragment_id.to_string(),
280                        ]);
281                        is_error = true;
282                        break 'consume;
283                    }
284                }
285            }
286            if !is_error {
287                tracing::info!("stream source reader consume finished");
288                latest_splits_info.values_mut().for_each(|split_impl| {
289                    if let Some(mut batch_split) = split_impl.clone().into_batch_split() {
290                        batch_split.finish();
291                        *split_impl = batch_split.into();
292                    }
293                });
294                yield (
295                    StreamChunk::empty(
296                        self.source_desc
297                            .columns
298                            .iter()
299                            .map(|c| c.data_type.clone())
300                            .collect_vec()
301                            .as_slice(),
302                    ),
303                    latest_splits_info.clone(),
304                );
305                break 'build_consume_loop;
306            }
307            tracing::info!("stream source reader error, retry in 1s");
308            tokio::time::sleep(Duration::from_secs(1)).await;
309        }
310    }
311}