risingwave_stream/executor/backfill/cdc/upstream_table/
snapshot.rs1use std::future::Future;
16
17use futures::{Stream, pin_mut};
18use futures_async_stream::try_stream;
19use itertools::Itertools;
20use risingwave_common::array::StreamChunk;
21use risingwave_common::catalog::{ColumnDesc, Field};
22use risingwave_common::row::OwnedRow;
23use risingwave_common::types::{Scalar, ScalarImpl, Timestamptz};
24use risingwave_common::util::chunk_coalesce::DataChunkBuilder;
25use risingwave_common_rate_limit::RateLimiter;
26use risingwave_connector::source::cdc::external::{
27 CdcOffset, ExternalTableReader, ExternalTableReaderImpl, SchemaTableName,
28};
29use risingwave_pb::plan_common::additional_column::ColumnType;
30
31use super::external::ExternalStorageTable;
32use crate::common::rate_limit::limited_chunk_size;
33use crate::executor::backfill::utils::{get_new_pos, iter_chunks};
34use crate::executor::{StreamExecutorError, StreamExecutorResult};
35
36pub trait UpstreamTableRead {
37 fn snapshot_read_full_table(
38 &self,
39 args: SnapshotReadArgs,
40 batch_size: u32,
41 ) -> impl Stream<Item = StreamExecutorResult<Option<StreamChunk>>> + Send + '_;
42
43 fn current_cdc_offset(
44 &self,
45 ) -> impl Future<Output = StreamExecutorResult<Option<CdcOffset>>> + Send + '_;
46
47 async fn disconnect(self) -> StreamExecutorResult<()>;
48
49 fn snapshot_read_table_split(
50 &self,
51 args: SplitSnapshotReadArgs,
52 ) -> impl Stream<Item = StreamExecutorResult<Option<StreamChunk>>> + Send + '_;
53}
54
55#[derive(Debug, Clone)]
56pub struct SnapshotReadArgs {
57 pub current_pos: Option<OwnedRow>,
58 pub rate_limit_rps: Option<u32>,
59 pub pk_indices: Vec<usize>,
60 pub additional_columns: Vec<ColumnDesc>,
61 pub schema_table_name: SchemaTableName,
62 pub database_name: String,
63}
64
65impl SnapshotReadArgs {
66 pub fn new(
67 current_pos: Option<OwnedRow>,
68 rate_limit_rps: Option<u32>,
69 pk_indices: Vec<usize>,
70 additional_columns: Vec<ColumnDesc>,
71 schema_table_name: SchemaTableName,
72 database_name: String,
73 ) -> Self {
74 Self {
75 current_pos,
76 rate_limit_rps,
77 pk_indices,
78 additional_columns,
79 schema_table_name,
80 database_name,
81 }
82 }
83}
84
85#[derive(Debug, Clone)]
86pub struct SplitSnapshotReadArgs {
87 pub left_bound_inclusive: OwnedRow,
88 pub right_bound_exclusive: OwnedRow,
89 pub split_columns: Vec<Field>,
90 pub rate_limit_rps: Option<u32>,
91 pub additional_columns: Vec<ColumnDesc>,
92 pub schema_table_name: SchemaTableName,
93 pub database_name: String,
94}
95
96impl SplitSnapshotReadArgs {
97 pub fn new(
98 left_bound_inclusive: OwnedRow,
99 right_bound_exclusive: OwnedRow,
100 split_columns: Vec<Field>,
101 rate_limit_rps: Option<u32>,
102 additional_columns: Vec<ColumnDesc>,
103 schema_table_name: SchemaTableName,
104 database_name: String,
105 ) -> Self {
106 Self {
107 left_bound_inclusive,
108 right_bound_exclusive,
109 split_columns,
110 rate_limit_rps,
111 additional_columns,
112 schema_table_name,
113 database_name,
114 }
115 }
116}
117
118pub struct UpstreamTableReader<T> {
122 table: T,
123 pub(crate) reader: ExternalTableReaderImpl,
124}
125
126impl<T> UpstreamTableReader<T> {
127 pub fn new(table: T, reader: ExternalTableReaderImpl) -> Self {
128 Self { table, reader }
129 }
130}
131
132fn with_additional_columns(
134 snapshot_chunk: StreamChunk,
135 additional_columns: &[ColumnDesc],
136 schema_table_name: SchemaTableName,
137 database_name: String,
138) -> StreamChunk {
139 let (ops, mut columns, visibility) = snapshot_chunk.into_inner();
140 for desc in additional_columns {
141 let mut builder = desc.data_type.create_array_builder(visibility.len());
142 match *desc.additional_column.column_type.as_ref().unwrap() {
143 ColumnType::Timestamp(_) => builder.append_n(
145 visibility.len(),
146 Some(Timestamptz::default().to_scalar_value()),
147 ),
148 ColumnType::DatabaseName(_) => {
149 builder.append_n(
150 visibility.len(),
151 Some(ScalarImpl::from(database_name.clone())),
152 );
153 }
154 ColumnType::SchemaName(_) => {
155 builder.append_n(
156 visibility.len(),
157 Some(ScalarImpl::from(schema_table_name.schema_name.clone())),
158 );
159 }
160 ColumnType::TableName(_) => {
161 builder.append_n(
162 visibility.len(),
163 Some(ScalarImpl::from(schema_table_name.table_name.clone())),
164 );
165 }
166 _ => {
168 builder.append_n_null(visibility.len());
169 }
170 }
171 columns.push(builder.finish().into());
172 }
173 StreamChunk::with_visibility(ops, columns, visibility)
174}
175
176impl UpstreamTableRead for UpstreamTableReader<ExternalStorageTable> {
177 #[try_stream(ok = Option<StreamChunk>, error = StreamExecutorError)]
178 async fn snapshot_read_full_table(&self, args: SnapshotReadArgs, batch_size: u32) {
179 let primary_keys = self
180 .table
181 .pk_indices()
182 .iter()
183 .map(|idx| {
184 let f = &self.table.schema().fields[*idx];
185 f.name.clone()
186 })
187 .collect_vec();
188
189 if args.rate_limit_rps == Some(0) {
191 let future = futures::future::pending::<()>();
194 future.await;
195 unreachable!();
196 }
197
198 let rate_limiter = RateLimiter::new(
199 args.rate_limit_rps
200 .inspect(|limit| tracing::info!(rate_limit = limit, "rate limit applied"))
201 .into(),
202 );
203
204 let mut read_args = args;
205 let schema_table_name = read_args.schema_table_name.clone();
206 let database_name = read_args.database_name.clone();
207 loop {
209 tracing::debug!(
210 "snapshot_read primary keys: {:?}, current_pos: {:?}",
211 primary_keys,
212 read_args.current_pos
213 );
214
215 let mut read_count: usize = 0;
216 let row_stream = self.reader.snapshot_read(
217 self.table.schema_table_name(),
218 read_args.current_pos.clone(),
219 primary_keys.clone(),
220 batch_size,
221 );
222
223 pin_mut!(row_stream);
224 let mut builder = DataChunkBuilder::new(
225 self.table.schema().data_types(),
226 limited_chunk_size(read_args.rate_limit_rps),
227 );
228 let chunk_stream = iter_chunks(row_stream, &mut builder);
229 let mut current_pk_pos = read_args.current_pos.clone().unwrap_or_default();
230
231 #[for_await]
232 for chunk in chunk_stream {
233 let chunk = chunk?;
234 let chunk_size = chunk.capacity();
235 read_count += chunk.cardinality();
236 current_pk_pos = get_new_pos(&chunk, &read_args.pk_indices);
237
238 if read_args.rate_limit_rps.is_none() || chunk_size == 0 {
239 yield Some(with_additional_columns(
241 chunk,
242 &read_args.additional_columns,
243 schema_table_name.clone(),
244 database_name.clone(),
245 ));
246 continue;
247 } else {
248 let limit = read_args.rate_limit_rps.unwrap() as usize;
251
252 assert!(chunk_size <= limit);
255
256 rate_limiter.wait(chunk_size as _).await;
258 yield Some(with_additional_columns(
259 chunk,
260 &read_args.additional_columns,
261 schema_table_name.clone(),
262 database_name.clone(),
263 ));
264 }
265 }
266
267 if read_count < batch_size as _ {
269 tracing::debug!("finished loading of full table snapshot");
270 yield None;
271 unreachable!()
272 } else {
273 read_args.current_pos = Some(current_pk_pos);
275 }
276 }
277 }
278
279 #[try_stream(ok = Option<StreamChunk>, error = StreamExecutorError)]
280 async fn snapshot_read_table_split(&self, args: SplitSnapshotReadArgs) {
281 if args.rate_limit_rps == Some(0) {
283 let future = futures::future::pending::<()>();
286 future.await;
287 unreachable!();
288 }
289
290 let rate_limiter = RateLimiter::new(
291 args.rate_limit_rps
292 .inspect(|limit| tracing::info!(rate_limit = limit, "rate limit applied"))
293 .into(),
294 );
295
296 let read_args = args;
297 let schema_table_name = read_args.schema_table_name.clone();
298 let database_name = read_args.database_name.clone();
299 let row_stream = self.reader.split_snapshot_read(
302 self.table.schema_table_name(),
303 read_args.left_bound_inclusive.clone(),
304 read_args.right_bound_exclusive.clone(),
305 read_args.split_columns.clone(),
306 );
307
308 pin_mut!(row_stream);
309 let mut builder = DataChunkBuilder::new(
310 self.table.schema().data_types(),
311 limited_chunk_size(read_args.rate_limit_rps),
312 );
313 let chunk_stream = iter_chunks(row_stream, &mut builder);
314
315 #[for_await]
316 for chunk in chunk_stream {
317 let chunk = chunk?;
318 let chunk_size = chunk.capacity();
319
320 if read_args.rate_limit_rps.is_none() || chunk_size == 0 {
321 yield Some(with_additional_columns(
323 chunk,
324 &read_args.additional_columns,
325 schema_table_name.clone(),
326 database_name.clone(),
327 ));
328 continue;
329 } else {
330 let limit = read_args.rate_limit_rps.unwrap() as usize;
333
334 assert!(chunk_size <= limit);
337
338 rate_limiter.wait(chunk_size as _).await;
340 yield Some(with_additional_columns(
341 chunk,
342 &read_args.additional_columns,
343 schema_table_name.clone(),
344 database_name.clone(),
345 ));
346 }
347 }
348 yield None;
349 }
350
351 async fn current_cdc_offset(&self) -> StreamExecutorResult<Option<CdcOffset>> {
352 let binlog = self.reader.current_cdc_offset();
353 let binlog = binlog.await?;
354 Ok(Some(binlog))
355 }
356
357 async fn disconnect(self) -> StreamExecutorResult<()> {
358 self.reader.disconnect().await?;
359 Ok(())
360 }
361}
362
363#[cfg(test)]
364mod tests {
365 use std::collections::HashMap;
366
367 use futures::pin_mut;
368 use futures_async_stream::for_await;
369 use maplit::{convert_args, hashmap};
370 use risingwave_common::catalog::{ColumnDesc, ColumnId, Field, Schema};
371 use risingwave_common::row::OwnedRow;
372 use risingwave_common::types::{DataType, ScalarImpl};
373 use risingwave_common::util::chunk_coalesce::DataChunkBuilder;
374 use risingwave_connector::source::cdc::external::mysql::MySqlExternalTableReader;
375 use risingwave_connector::source::cdc::external::{
376 ExternalTableConfig, ExternalTableReader, SchemaTableName,
377 };
378
379 use crate::executor::backfill::utils::{get_new_pos, iter_chunks};
380
381 #[ignore]
382 #[tokio::test]
383 async fn test_mysql_table_reader() {
384 let columns = vec![
385 ColumnDesc::named("o_orderkey", ColumnId::new(1), DataType::Int64),
386 ColumnDesc::named("o_custkey", ColumnId::new(2), DataType::Int64),
387 ColumnDesc::named("o_orderstatus", ColumnId::new(3), DataType::Varchar),
388 ];
389 let rw_schema = Schema {
390 fields: columns.iter().map(Field::from).collect(),
391 };
392 let props: HashMap<String, String> = convert_args!(hashmap!(
393 "hostname" => "localhost",
394 "port" => "8306",
395 "username" => "root",
396 "password" => "123456",
397 "database.name" => "mydb",
398 "table.name" => "orders_rw"));
399
400 let config =
401 serde_json::from_value::<ExternalTableConfig>(serde_json::to_value(props).unwrap())
402 .unwrap();
403 let reader = MySqlExternalTableReader::new(config, rw_schema.clone()).unwrap();
404
405 let mut cnt: usize = 0;
406 let mut start_pk = Some(OwnedRow::new(vec![Some(ScalarImpl::Int64(0))]));
407 loop {
408 let row_stream = reader.snapshot_read(
409 SchemaTableName {
410 schema_name: "mydb".to_owned(),
411 table_name: "orders_rw".to_owned(),
412 },
413 start_pk.clone(),
414 vec!["o_orderkey".to_owned()],
415 1000,
416 );
417 let mut builder = DataChunkBuilder::new(rw_schema.clone().data_types(), 256);
418 let chunk_stream = iter_chunks(row_stream, &mut builder);
419 let pk_indices = vec![0];
420 pin_mut!(chunk_stream);
421 #[for_await]
422 for chunk in chunk_stream {
423 let chunk = chunk.expect("data");
424 start_pk = Some(get_new_pos(&chunk, &pk_indices));
425 cnt += chunk.capacity();
426 println!("cnt: {}", cnt);
428 }
429 if cnt >= 1499900 {
430 println!("bye!");
431 break;
432 }
433 }
434 }
435}