1use std::borrow::Cow;
16use std::cmp::{max, min};
17use std::collections::HashMap;
18use std::ops::Bound;
19
20use await_tree::InstrumentAwait;
21use futures::Stream;
22use futures::future::try_join_all;
23use futures_async_stream::try_stream;
24use risingwave_common::array::stream_record::Record;
25use risingwave_common::array::{Op, StreamChunk};
26use risingwave_common::bail;
27use risingwave_common::bitmap::BitmapBuilder;
28use risingwave_common::hash::{VirtualNode, VnodeBitmapExt};
29use risingwave_common::row::{OwnedRow, Row, RowExt};
30use risingwave_common::types::{DataType, Datum};
31use risingwave_common::util::chunk_coalesce::DataChunkBuilder;
32use risingwave_common::util::epoch::EpochPair;
33use risingwave_common::util::iter_util::ZipEqDebug;
34use risingwave_common::util::sort_util::{OrderType, cmp_datum_iter};
35use risingwave_common::util::value_encoding::BasicSerde;
36use risingwave_common_rate_limit::RateLimit;
37use risingwave_connector::error::ConnectorError;
38use risingwave_connector::source::cdc::external::{CdcOffset, CdcOffsetParseFunc};
39use risingwave_storage::StateStore;
40use risingwave_storage::row_serde::value_serde::ValueRowSerde;
41use risingwave_storage::table::collect_data_chunk_with_builder;
42
43use crate::common::table::state_table::{ReplicatedStateTable, StateTableInner};
44use crate::executor::{Message, StreamExecutorError, StreamExecutorResult, Watermark};
45
46pub const METADATA_STATE_LEN: usize = 3;
48
49#[derive(Clone, Debug)]
50pub struct BackfillState {
51 inner: HashMap<VirtualNode, BackfillStatePerVnode>,
54}
55
56impl BackfillState {
57 pub(crate) fn has_progress(&self) -> bool {
58 self.inner.values().any(|p| {
59 matches!(
60 p.current_state(),
61 &BackfillProgressPerVnode::InProgress { .. }
62 )
63 })
64 }
65
66 pub(crate) fn get_current_state(
67 &mut self,
68 vnode: &VirtualNode,
69 ) -> &mut BackfillProgressPerVnode {
70 &mut self.inner.get_mut(vnode).unwrap().current_state
71 }
72
73 pub(crate) fn get_progress(
75 &self,
76 vnode: &VirtualNode,
77 ) -> StreamExecutorResult<&BackfillProgressPerVnode> {
78 match self.inner.get(vnode) {
79 Some(p) => Ok(p.current_state()),
80 None => bail!(
81 "Backfill progress for vnode {:#?} not found, backfill_state not initialized properly",
82 vnode,
83 ),
84 }
85 }
86
87 pub(crate) fn update_progress(
88 &mut self,
89 vnode: VirtualNode,
90 new_pos: OwnedRow,
91 snapshot_row_count_delta: u64,
92 ) -> StreamExecutorResult<()> {
93 let state = self.get_current_state(&vnode);
94 match state {
95 BackfillProgressPerVnode::NotStarted => {
96 *state = BackfillProgressPerVnode::InProgress {
97 current_pos: new_pos,
98 snapshot_row_count: snapshot_row_count_delta,
99 };
100 }
101 BackfillProgressPerVnode::InProgress {
102 snapshot_row_count, ..
103 } => {
104 *state = BackfillProgressPerVnode::InProgress {
105 current_pos: new_pos,
106 snapshot_row_count: *snapshot_row_count + snapshot_row_count_delta,
107 };
108 }
109 BackfillProgressPerVnode::Completed { .. } => unreachable!(),
110 }
111 Ok(())
112 }
113
114 pub(crate) fn finish_progress(&mut self, vnode: VirtualNode, pos_len: usize) {
115 let finished_placeholder_position = construct_initial_finished_state(pos_len);
116 let current_state = self.get_current_state(&vnode);
117 let (new_pos, snapshot_row_count) = match current_state {
118 BackfillProgressPerVnode::NotStarted => (finished_placeholder_position, 0),
119 BackfillProgressPerVnode::InProgress {
120 current_pos,
121 snapshot_row_count,
122 } => (current_pos.clone(), *snapshot_row_count),
123 BackfillProgressPerVnode::Completed { .. } => {
124 return;
125 }
126 };
127 *current_state = BackfillProgressPerVnode::Completed {
128 current_pos: new_pos,
129 snapshot_row_count,
130 };
131 }
132
133 fn get_commit_state(&self, vnode: &VirtualNode) -> Option<(Option<Vec<Datum>>, Vec<Datum>)> {
135 let new_state = self.inner.get(vnode).unwrap().current_state().clone();
136 let new_encoded_state = match new_state {
137 BackfillProgressPerVnode::NotStarted => unreachable!(),
138 BackfillProgressPerVnode::InProgress {
139 current_pos,
140 snapshot_row_count,
141 } => {
142 let mut encoded_state = vec![None; current_pos.len() + METADATA_STATE_LEN];
143 encoded_state[0] = Some(vnode.to_scalar().into());
144 encoded_state[1..current_pos.len() + 1].clone_from_slice(current_pos.as_inner());
145 encoded_state[current_pos.len() + 1] = Some(false.into());
146 encoded_state[current_pos.len() + 2] = Some((snapshot_row_count as i64).into());
147 encoded_state
148 }
149 BackfillProgressPerVnode::Completed {
150 current_pos,
151 snapshot_row_count,
152 } => {
153 let mut encoded_state = vec![None; current_pos.len() + METADATA_STATE_LEN];
154 encoded_state[0] = Some(vnode.to_scalar().into());
155 encoded_state[1..current_pos.len() + 1].clone_from_slice(current_pos.as_inner());
156 encoded_state[current_pos.len() + 1] = Some(true.into());
157 encoded_state[current_pos.len() + 2] = Some((snapshot_row_count as i64).into());
158 encoded_state
159 }
160 };
161 let old_state = self.inner.get(vnode).unwrap().committed_state().clone();
162 let old_encoded_state = match old_state {
163 BackfillProgressPerVnode::NotStarted => None,
164 BackfillProgressPerVnode::InProgress {
165 current_pos,
166 snapshot_row_count,
167 } => {
168 let committed_pos = current_pos;
169 let mut encoded_state = vec![None; committed_pos.len() + METADATA_STATE_LEN];
170 encoded_state[0] = Some(vnode.to_scalar().into());
171 encoded_state[1..committed_pos.len() + 1]
172 .clone_from_slice(committed_pos.as_inner());
173 encoded_state[committed_pos.len() + 1] = Some(false.into());
174 encoded_state[committed_pos.len() + 2] = Some((snapshot_row_count as i64).into());
175 Some(encoded_state)
176 }
177 BackfillProgressPerVnode::Completed {
178 current_pos,
179 snapshot_row_count,
180 } => {
181 let committed_pos = current_pos;
182 let mut encoded_state = vec![None; committed_pos.len() + METADATA_STATE_LEN];
183 encoded_state[0] = Some(vnode.to_scalar().into());
184 encoded_state[1..committed_pos.len() + 1]
185 .clone_from_slice(committed_pos.as_inner());
186 encoded_state[committed_pos.len() + 1] = Some(true.into());
187 encoded_state[committed_pos.len() + 2] = Some((snapshot_row_count as i64).into());
188 Some(encoded_state)
189 }
190 };
191 Some((old_encoded_state, new_encoded_state))
192 }
193
194 fn need_commit(&self, vnode: &VirtualNode) -> bool {
197 let state = self.inner.get(vnode).unwrap();
198 match state.current_state() {
199 s @ BackfillProgressPerVnode::InProgress { .. }
201 | s @ BackfillProgressPerVnode::Completed { .. } => s != state.committed_state(),
202 BackfillProgressPerVnode::NotStarted => false,
203 }
204 }
205
206 fn mark_committed(&mut self, vnode: VirtualNode) {
207 let BackfillStatePerVnode {
208 committed_state,
209 current_state,
210 } = self.inner.get_mut(&vnode).unwrap();
211
212 assert!(matches!(
213 current_state,
214 BackfillProgressPerVnode::InProgress { .. }
215 | BackfillProgressPerVnode::Completed { .. }
216 ));
217 *committed_state = current_state.clone();
218 }
219
220 pub(crate) fn get_snapshot_row_count(&self) -> u64 {
221 self.inner
222 .values()
223 .map(|p| p.get_snapshot_row_count())
224 .sum()
225 }
226}
227
228#[derive(Clone, Debug, PartialEq, Eq)]
229pub struct BackfillStatePerVnode {
230 committed_state: BackfillProgressPerVnode,
231 current_state: BackfillProgressPerVnode,
232}
233
234impl BackfillStatePerVnode {
235 pub(crate) fn new(
236 committed_state: BackfillProgressPerVnode,
237 current_state: BackfillProgressPerVnode,
238 ) -> Self {
239 Self {
240 committed_state,
241 current_state,
242 }
243 }
244
245 pub(crate) fn committed_state(&self) -> &BackfillProgressPerVnode {
246 &self.committed_state
247 }
248
249 pub(crate) fn current_state(&self) -> &BackfillProgressPerVnode {
250 &self.current_state
251 }
252
253 pub(crate) fn get_snapshot_row_count(&self) -> u64 {
254 self.current_state().get_snapshot_row_count()
255 }
256}
257
258impl From<Vec<(VirtualNode, BackfillStatePerVnode)>> for BackfillState {
259 fn from(v: Vec<(VirtualNode, BackfillStatePerVnode)>) -> Self {
260 Self {
261 inner: v.into_iter().collect(),
262 }
263 }
264}
265
266#[derive(Clone, Eq, PartialEq, Debug)]
269pub enum BackfillProgressPerVnode {
270 NotStarted,
272 InProgress {
273 current_pos: OwnedRow,
275 snapshot_row_count: u64,
277 },
278 Completed {
279 current_pos: OwnedRow,
281 snapshot_row_count: u64,
283 },
284}
285
286impl BackfillProgressPerVnode {
287 fn get_snapshot_row_count(&self) -> u64 {
288 match self {
289 BackfillProgressPerVnode::NotStarted => 0,
290 BackfillProgressPerVnode::InProgress {
291 snapshot_row_count, ..
292 }
293 | BackfillProgressPerVnode::Completed {
294 snapshot_row_count, ..
295 } => *snapshot_row_count,
296 }
297 }
298}
299
300pub(crate) fn mark_chunk(
301 chunk: StreamChunk,
302 current_pos: &OwnedRow,
303 pk_in_output_indices: &[usize],
304 pk_order: &[OrderType],
305) -> StreamChunk {
306 let chunk = chunk.compact_vis();
307 mark_chunk_inner(chunk, current_pos, pk_in_output_indices, pk_order)
308}
309
310pub(crate) fn mark_cdc_chunk(
311 offset_parse_func: &CdcOffsetParseFunc,
312 chunk: StreamChunk,
313 current_pos: &OwnedRow,
314 pk_in_output_indices: &[usize],
315 pk_order: &[OrderType],
316 last_cdc_offset: Option<CdcOffset>,
317) -> StreamExecutorResult<StreamChunk> {
318 let chunk = chunk.compact_vis();
319 mark_cdc_chunk_inner(
320 offset_parse_func,
321 chunk,
322 current_pos,
323 last_cdc_offset,
324 pk_in_output_indices,
325 pk_order,
326 )
327}
328
329pub(crate) fn mark_chunk_ref_by_vnode<S: StateStore, SD: ValueRowSerde>(
334 chunk: &StreamChunk,
335 backfill_state: &BackfillState,
336 pk_in_output_indices: &[usize],
337 upstream_table: &ReplicatedStateTable<S, SD>,
338 pk_order: &[OrderType],
339) -> StreamExecutorResult<StreamChunk> {
340 let chunk = chunk.clone();
341 let (data, ops) = chunk.into_parts();
342 let mut new_visibility = BitmapBuilder::with_capacity(ops.len());
343
344 let mut new_ops: Cow<'_, [Op]> = Cow::Borrowed(ops.as_ref());
345 let mut unmatched_update_delete = false;
346 let mut visible_update_delete = false;
347 for (i, (op, row)) in ops.iter().zip_eq_debug(data.rows()).enumerate() {
348 let pk = row.project(pk_in_output_indices);
349 let vnode = upstream_table.compute_vnode_by_pk(pk);
350 let visible = match backfill_state.get_progress(&vnode)? {
351 BackfillProgressPerVnode::Completed { .. } => true,
353 BackfillProgressPerVnode::NotStarted => false,
355 BackfillProgressPerVnode::InProgress { current_pos, .. } => {
357 cmp_datum_iter(pk.iter(), current_pos.iter(), pk_order.iter().copied()).is_le()
358 }
359 };
360 if !visible {
361 tracing::trace!(
362 source = "upstream",
363 state = "process_barrier",
364 action = "mark_chunk",
365 ?vnode,
366 ?op,
367 ?pk,
368 ?row,
369 "update_filtered",
370 );
371 }
372 new_visibility.append(visible);
373
374 normalize_unmatched_updates(
375 &mut new_ops,
376 &mut unmatched_update_delete,
377 &mut visible_update_delete,
378 visible,
379 i,
380 op,
381 );
382 }
383 let (columns, _) = data.into_parts();
384 let chunk = StreamChunk::with_visibility(new_ops, columns, new_visibility.finish());
385 Ok(chunk)
386}
387
388fn mark_chunk_inner(
392 chunk: StreamChunk,
393 current_pos: &OwnedRow,
394 pk_in_output_indices: &[usize],
395 pk_order: &[OrderType],
396) -> StreamChunk {
397 let (data, ops) = chunk.into_parts();
398 let mut new_visibility = BitmapBuilder::with_capacity(ops.len());
399 let mut new_ops: Cow<'_, [Op]> = Cow::Borrowed(ops.as_ref());
400 let mut unmatched_update_delete = false;
401 let mut visible_update_delete = false;
402 for (i, (op, row)) in ops.iter().zip_eq_debug(data.rows()).enumerate() {
403 let lhs = row.project(pk_in_output_indices);
404 let rhs = current_pos;
405 let visible = cmp_datum_iter(lhs.iter(), rhs.iter(), pk_order.iter().copied()).is_le();
406 new_visibility.append(visible);
407
408 normalize_unmatched_updates(
409 &mut new_ops,
410 &mut unmatched_update_delete,
411 &mut visible_update_delete,
412 visible,
413 i,
414 op,
415 );
416 }
417 let (columns, _) = data.into_parts();
418 StreamChunk::with_visibility(new_ops, columns, new_visibility.finish())
419}
420
421fn normalize_unmatched_updates(
429 normalized_ops: &mut Cow<'_, [Op]>,
430 unmatched_update_delete: &mut bool,
431 visible_update_delete: &mut bool,
432 current_visibility: bool,
433 current_op_index: usize,
434 current_op: &Op,
435) {
436 if *unmatched_update_delete {
437 assert_eq!(*current_op, Op::UpdateInsert);
438 let visible_update_insert = current_visibility;
439 match (visible_update_delete, visible_update_insert) {
440 (true, false) => {
441 let ops = normalized_ops.to_mut();
443 ops[current_op_index - 1] = Op::Delete;
444 }
445 (false, true) => {
446 let ops = normalized_ops.to_mut();
448 ops[current_op_index] = Op::Insert;
449 }
450 (true, true) | (false, false) => {}
451 }
452 *unmatched_update_delete = false;
453 } else {
454 match current_op {
455 Op::UpdateDelete => {
456 *unmatched_update_delete = true;
457 *visible_update_delete = current_visibility;
458 }
459 Op::UpdateInsert => {
460 unreachable!("UpdateInsert should not be present without UpdateDelete")
461 }
462 _ => {}
463 }
464 }
465}
466
467fn mark_cdc_chunk_inner(
468 offset_parse_func: &CdcOffsetParseFunc,
469 chunk: StreamChunk,
470 current_pos: &OwnedRow,
471 last_cdc_offset: Option<CdcOffset>,
472 pk_in_output_indices: &[usize],
473 pk_order: &[OrderType],
474) -> StreamExecutorResult<StreamChunk> {
475 let (data, ops) = chunk.into_parts();
476 let mut new_visibility = BitmapBuilder::with_capacity(ops.len());
477
478 let offset_col_idx = data.dimension() - 1;
480 for v in data.rows().map(|row| {
481 let offset_datum = row.datum_at(offset_col_idx).unwrap();
482 let event_offset = (*offset_parse_func)(offset_datum.into_utf8())?;
483 let visible = {
484 let in_binlog_range = if let Some(binlog_low) = &last_cdc_offset {
486 binlog_low <= &event_offset
487 } else {
488 true
489 };
490
491 if in_binlog_range {
492 let lhs = row.project(pk_in_output_indices);
493 let rhs = current_pos;
494 cmp_datum_iter(lhs.iter(), rhs.iter(), pk_order.iter().copied()).is_le()
495 } else {
496 false
497 }
498 };
499 Ok::<_, ConnectorError>(visible)
500 }) {
501 new_visibility.append(v?);
502 }
503
504 let (columns, _) = data.into_parts();
505 Ok(StreamChunk::with_visibility(
506 ops,
507 columns,
508 new_visibility.finish(),
509 ))
510}
511
512pub(crate) fn mapping_chunk(chunk: StreamChunk, output_indices: &[usize]) -> StreamChunk {
514 let (ops, columns, visibility) = chunk.into_inner();
515 let mapped_columns = output_indices.iter().map(|&i| columns[i].clone()).collect();
516 StreamChunk::with_visibility(ops, mapped_columns, visibility)
517}
518
519fn mapping_watermark(watermark: Watermark, upstream_indices: &[usize]) -> Option<Watermark> {
520 watermark.transform_with_indices(upstream_indices)
521}
522
523pub(crate) fn mapping_message(msg: Message, upstream_indices: &[usize]) -> Option<Message> {
524 match msg {
525 Message::Barrier(_) => Some(msg),
526 Message::Watermark(watermark) => {
527 mapping_watermark(watermark, upstream_indices).map(Message::Watermark)
528 }
529 Message::Chunk(chunk) => Some(Message::Chunk(mapping_chunk(chunk, upstream_indices))),
530 }
531}
532
533pub(crate) async fn get_progress_per_vnode<S: StateStore, const IS_REPLICATED: bool>(
536 state_table: &StateTableInner<S, BasicSerde, IS_REPLICATED>,
537) -> StreamExecutorResult<Vec<(VirtualNode, BackfillStatePerVnode)>> {
538 debug_assert!(!state_table.vnodes().is_empty());
539 let vnodes = state_table.vnodes().iter_vnodes();
540 let mut result = Vec::with_capacity(state_table.vnodes().len());
541 let vnode_keys = vnodes.map(|vnode| {
543 let datum: [Datum; 1] = [Some(vnode.to_scalar().into())];
544 datum
545 });
546 let tasks = vnode_keys.map(|vnode_key| state_table.get_row(vnode_key));
547 let state_for_vnodes = try_join_all(tasks).await?;
551 for (vnode, state_for_vnode) in state_table
552 .vnodes()
553 .iter_vnodes()
554 .zip_eq_debug(state_for_vnodes)
555 {
556 let backfill_progress = match state_for_vnode {
557 Some(row) => {
559 let snapshot_row_count = row.as_inner().get(row.len() - 1).unwrap();
562 let snapshot_row_count = (*snapshot_row_count.as_ref().unwrap().as_int64()) as u64;
563
564 let vnode_is_finished = row.as_inner().get(row.len() - 2).unwrap();
567 let vnode_is_finished = vnode_is_finished.as_ref().unwrap();
568
569 let current_pos = row.as_inner().get(..row.len() - 2).unwrap();
571 let current_pos = current_pos.into_owned_row();
572
573 if *vnode_is_finished.as_bool() {
575 BackfillStatePerVnode::new(
576 BackfillProgressPerVnode::Completed {
577 current_pos: current_pos.clone(),
578 snapshot_row_count,
579 },
580 BackfillProgressPerVnode::Completed {
581 current_pos,
582 snapshot_row_count,
583 },
584 )
585 } else {
586 BackfillStatePerVnode::new(
587 BackfillProgressPerVnode::InProgress {
588 current_pos: current_pos.clone(),
589 snapshot_row_count,
590 },
591 BackfillProgressPerVnode::InProgress {
592 current_pos,
593 snapshot_row_count,
594 },
595 )
596 }
597 }
598 None => BackfillStatePerVnode::new(
600 BackfillProgressPerVnode::NotStarted,
601 BackfillProgressPerVnode::NotStarted,
602 ),
603 };
604 result.push((vnode, backfill_progress));
605 }
606 assert_eq!(result.len(), state_table.vnodes().count_ones());
607 Ok(result)
608}
609
610pub(crate) async fn flush_data<S: StateStore, const IS_REPLICATED: bool>(
612 table: &mut StateTableInner<S, BasicSerde, IS_REPLICATED>,
613 epoch: EpochPair,
614 old_state: &mut Option<Vec<Datum>>,
615 current_partial_state: &mut [Datum],
616) -> StreamExecutorResult<()> {
617 let vnodes = table.vnodes().clone();
618 if let Some(old_state) = old_state {
619 if old_state[1..] != current_partial_state[1..] {
620 vnodes.iter_vnodes_scalar().for_each(|vnode| {
621 let datum = Some(vnode.into());
622 current_partial_state[0].clone_from(&datum);
623 old_state[0] = datum;
624 table.write_record(Record::Update {
625 old_row: &old_state[..],
626 new_row: &(*current_partial_state),
627 })
628 });
629 }
630 } else {
631 vnodes.iter_vnodes_scalar().for_each(|vnode| {
633 let datum = Some(vnode.into());
634 current_partial_state[0] = datum;
636 table.write_record(Record::Insert {
637 new_row: &(*current_partial_state),
638 })
639 });
640 }
641 table.commit_assert_no_update_vnode_bitmap(epoch).await
642}
643
644pub(crate) fn build_temporary_state(
651 row_state: &mut [Datum],
652 is_finished: bool,
653 current_pos: &OwnedRow,
654 row_count: u64,
655) {
656 row_state[1..current_pos.len() + 1].clone_from_slice(current_pos.as_inner());
657 row_state[current_pos.len() + 1] = Some(is_finished.into());
658 row_state[current_pos.len() + 2] = Some((row_count as i64).into());
659}
660
661pub(crate) fn update_pos_by_vnode(
663 vnode: VirtualNode,
664 chunk: &StreamChunk,
665 pk_in_output_indices: &[usize],
666 backfill_state: &mut BackfillState,
667 snapshot_row_count_delta: u64,
668) -> StreamExecutorResult<()> {
669 let new_pos = get_new_pos(chunk, pk_in_output_indices);
670 assert_eq!(new_pos.len(), pk_in_output_indices.len());
671 backfill_state.update_progress(vnode, new_pos, snapshot_row_count_delta)?;
672 Ok(())
673}
674
675pub(crate) fn get_new_pos(chunk: &StreamChunk, pk_in_output_indices: &[usize]) -> OwnedRow {
678 chunk
679 .rows()
680 .last()
681 .unwrap()
682 .1
683 .project(pk_in_output_indices)
684 .into_owned_row()
685}
686
687pub(crate) fn get_cdc_chunk_last_offset(
688 offset_parse_func: &CdcOffsetParseFunc,
689 chunk: &StreamChunk,
690) -> StreamExecutorResult<Option<CdcOffset>> {
691 let row = chunk.rows().last().unwrap().1;
692 let offset_col = row.iter().last().unwrap();
693 let output =
694 offset_col.map(|scalar| Ok::<_, ConnectorError>((*offset_parse_func)(scalar.into_utf8()))?);
695 output.transpose().map_err(|e| e.into())
696}
697
698pub(crate) fn construct_initial_finished_state(pos_len: usize) -> OwnedRow {
704 OwnedRow::new(vec![None; pos_len])
705}
706
707pub(crate) fn compute_bounds(
708 pk_indices: &[usize],
709 current_pos: Option<OwnedRow>,
710) -> Option<(Bound<OwnedRow>, Bound<OwnedRow>)> {
711 if let Some(current_pos) = current_pos {
714 if current_pos.is_empty() {
718 assert!(pk_indices.is_empty());
719 return None;
720 }
721
722 Some((Bound::Excluded(current_pos), Bound::Unbounded))
723 } else {
724 Some((Bound::Unbounded, Bound::Unbounded))
725 }
726}
727
728#[try_stream(ok = StreamChunk, error = StreamExecutorError)]
729pub(crate) async fn iter_chunks<'a, S, E, R>(mut iter: S, builder: &'a mut DataChunkBuilder)
730where
731 StreamExecutorError: From<E>,
732 R: Row,
733 S: Stream<Item = Result<R, E>> + Unpin + 'a,
734{
735 while let Some(data_chunk) = collect_data_chunk_with_builder(&mut iter, builder)
736 .instrument_await("backfill_snapshot_read")
737 .await?
738 {
739 debug_assert!(data_chunk.cardinality() > 0);
740 let ops = vec![Op::Insert; data_chunk.capacity()];
741 let stream_chunk = StreamChunk::from_parts(ops, data_chunk);
742 yield stream_chunk;
743 }
744}
745
746pub(crate) async fn persist_state_per_vnode<S: StateStore, const IS_REPLICATED: bool>(
770 epoch: EpochPair,
771 table: &mut StateTableInner<S, BasicSerde, IS_REPLICATED>,
772 backfill_state: &mut BackfillState,
773 #[cfg(debug_assertions)] state_len: usize,
774 vnodes: impl Iterator<Item = VirtualNode>,
775) -> StreamExecutorResult<()> {
776 for vnode in vnodes {
777 if !backfill_state.need_commit(&vnode) {
778 continue;
779 }
780 let (encoded_prev_state, encoded_current_state) =
781 match backfill_state.get_commit_state(&vnode) {
782 Some((old_state, new_state)) => (old_state, new_state),
783 None => continue,
784 };
785 if let Some(encoded_prev_state) = encoded_prev_state {
786 #[cfg(debug_assertions)]
788 {
789 let pk: &[Datum; 1] = &[Some(vnode.to_scalar().into())];
790 let old_row = table.get_row(pk).await?;
792 match old_row {
793 Some(old_row) => {
794 let inner = old_row.as_inner();
795 assert_eq!(inner, &encoded_prev_state[1..]);
797 assert_ne!(inner, &encoded_current_state[1..]);
798 assert_eq!(old_row.len(), state_len - 1);
799 assert_eq!(encoded_current_state.len(), state_len);
800 }
801 None => {
802 bail!("row {:#?} not found", pk);
803 }
804 }
805 }
806 table.write_record(Record::Update {
807 old_row: &encoded_prev_state[..],
808 new_row: &encoded_current_state[..],
809 });
810 } else {
811 #[cfg(debug_assertions)]
813 {
814 let pk: &[Datum; 1] = &[Some(vnode.to_scalar().into())];
815 let row = table.get_row(pk).await?;
816 assert!(row.is_none(), "row {:#?}", row);
817 assert_eq!(encoded_current_state.len(), state_len);
818 }
819 table.write_record(Record::Insert {
820 new_row: &encoded_current_state[..],
821 });
822 }
823 backfill_state.mark_committed(vnode);
824 }
825
826 table.commit_assert_no_update_vnode_bitmap(epoch).await?;
827 Ok(())
828}
829
830pub(crate) async fn persist_state<S: StateStore, const IS_REPLICATED: bool>(
836 epoch: EpochPair,
837 table: &mut StateTableInner<S, BasicSerde, IS_REPLICATED>,
838 is_finished: bool,
839 current_pos: &Option<OwnedRow>,
840 row_count: u64,
841 old_state: &mut Option<Vec<Datum>>,
842 current_state: &mut [Datum],
843) -> StreamExecutorResult<()> {
844 if let Some(current_pos_inner) = current_pos {
845 build_temporary_state(current_state, is_finished, current_pos_inner, row_count);
847 flush_data(table, epoch, old_state, current_state).await?;
848 *old_state = Some(current_state.into());
849 } else {
850 table.commit_assert_no_update_vnode_bitmap(epoch).await?;
851 }
852 Ok(())
853}
854
855pub fn create_builder(
859 rate_limit: RateLimit,
860 chunk_size: usize,
861 data_types: Vec<DataType>,
862) -> DataChunkBuilder {
863 let batch_size = match rate_limit {
864 RateLimit::Disabled | RateLimit::Pause => chunk_size,
865 RateLimit::Fixed(limit) => min(limit.get() as usize, chunk_size),
866 };
867 let batch_size = max(2, batch_size);
869 DataChunkBuilder::new(data_types, batch_size)
870}
871
872#[cfg(test)]
873mod tests {
874 use std::sync::Arc;
875
876 use super::*;
877
878 #[test]
879 fn test_normalizing_unmatched_updates() {
880 let ops = vec![
881 Op::UpdateDelete,
882 Op::UpdateInsert,
883 Op::UpdateDelete,
884 Op::UpdateInsert,
885 ];
886 let ops: Arc<[Op]> = ops.into();
887
888 {
889 let mut new_ops: Cow<'_, [Op]> = Cow::Borrowed(ops.as_ref());
890 let mut unmatched_update_delete = true;
891 let mut visible_update_delete = true;
892 let current_visibility = true;
893 normalize_unmatched_updates(
894 &mut new_ops,
895 &mut unmatched_update_delete,
896 &mut visible_update_delete,
897 current_visibility,
898 1,
899 &Op::UpdateInsert,
900 );
901 assert_eq!(
902 &new_ops[..],
903 vec![
904 Op::UpdateDelete,
905 Op::UpdateInsert,
906 Op::UpdateDelete,
907 Op::UpdateInsert
908 ]
909 );
910 }
911 {
912 let mut new_ops: Cow<'_, [Op]> = Cow::Borrowed(ops.as_ref());
913 let mut unmatched_update_delete = true;
914 let mut visible_update_delete = false;
915 let current_visibility = false;
916 normalize_unmatched_updates(
917 &mut new_ops,
918 &mut unmatched_update_delete,
919 &mut visible_update_delete,
920 current_visibility,
921 1,
922 &Op::UpdateInsert,
923 );
924 assert_eq!(
925 &new_ops[..],
926 vec![
927 Op::UpdateDelete,
928 Op::UpdateInsert,
929 Op::UpdateDelete,
930 Op::UpdateInsert
931 ]
932 );
933 }
934 {
935 let mut new_ops: Cow<'_, [Op]> = Cow::Borrowed(ops.as_ref());
936 let mut unmatched_update_delete = true;
937 let mut visible_update_delete = true;
938 let current_visibility = false;
939 normalize_unmatched_updates(
940 &mut new_ops,
941 &mut unmatched_update_delete,
942 &mut visible_update_delete,
943 current_visibility,
944 1,
945 &Op::UpdateInsert,
946 );
947 assert_eq!(
948 &new_ops[..],
949 vec![
950 Op::Delete,
951 Op::UpdateInsert,
952 Op::UpdateDelete,
953 Op::UpdateInsert
954 ]
955 );
956 }
957 {
958 let mut new_ops: Cow<'_, [Op]> = Cow::Borrowed(ops.as_ref());
959 let mut unmatched_update_delete = true;
960 let mut visible_update_delete = false;
961 let current_visibility = true;
962 normalize_unmatched_updates(
963 &mut new_ops,
964 &mut unmatched_update_delete,
965 &mut visible_update_delete,
966 current_visibility,
967 1,
968 &Op::UpdateInsert,
969 );
970 assert_eq!(
971 &new_ops[..],
972 vec![
973 Op::UpdateDelete,
974 Op::Insert,
975 Op::UpdateDelete,
976 Op::UpdateInsert
977 ]
978 );
979 }
980 }
981}