risingwave_storage/table/batch_table/
mod.rs

1// Copyright 2025 RisingWave Labs
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use std::future::Future;
16use std::ops::Bound::{self, Excluded, Included, Unbounded};
17use std::ops::RangeBounds;
18use std::sync::Arc;
19
20use await_tree::{InstrumentAwait, SpanExt};
21use bytes::{Bytes, BytesMut};
22use foyer::Hint;
23use futures::future::try_join_all;
24use futures::{Stream, StreamExt, TryStreamExt};
25use futures_async_stream::try_stream;
26use itertools::Itertools;
27use more_asserts::assert_gt;
28use risingwave_common::array::{ArrayBuilderImpl, ArrayRef, DataChunk};
29use risingwave_common::bitmap::Bitmap;
30use risingwave_common::catalog::{ColumnDesc, ColumnId, Schema, TableId, TableOption};
31use risingwave_common::hash::{VirtualNode, VnodeBitmapExt};
32use risingwave_common::row::{self, OwnedRow, Row, RowExt};
33use risingwave_common::types::ToOwnedDatum;
34use risingwave_common::util::epoch::Epoch;
35use risingwave_common::util::row_serde::*;
36use risingwave_common::util::sort_util::OrderType;
37use risingwave_common::util::value_encoding::column_aware_row_encoding::ColumnAwareSerde;
38use risingwave_common::util::value_encoding::{BasicSerde, EitherSerde};
39use risingwave_hummock_sdk::HummockReadEpoch;
40use risingwave_hummock_sdk::key::{
41    CopyFromSlice, TableKeyRange, end_bound_of_prefix, next_key, prefixed_range_with_vnode,
42};
43use risingwave_pb::plan_common::StorageTableDesc;
44use tracing::trace;
45mod vector_index_reader;
46pub use vector_index_reader::VectorIndexReader;
47
48use crate::StateStore;
49use crate::error::{StorageError, StorageResult};
50use crate::hummock::CachePolicy;
51use crate::row_serde::row_serde_util::{serialize_pk, serialize_pk_with_vnode};
52use crate::row_serde::value_serde::{ValueRowSerde, ValueRowSerdeNew};
53use crate::row_serde::{ColumnMapping, find_columns_by_ids};
54use crate::store::{
55    NewReadSnapshotOptions, NextEpochOptions, PrefetchOptions, ReadLogOptions, ReadOptions,
56    StateStoreGet, StateStoreIter, StateStoreIterExt, StateStoreRead, TryWaitEpochOptions,
57};
58use crate::table::merge_sort::NodePeek;
59use crate::table::{ChangeLogRow, KeyedRow, TableDistribution, TableIter};
60
61/// [`BatchTableInner`] is the interface accessing relational data in KV(`StateStore`) with
62/// row-based encoding format, and is used in batch mode.
63#[derive(Clone)]
64pub struct BatchTableInner<S: StateStore, SD: ValueRowSerde> {
65    /// Id for this table.
66    table_id: TableId,
67
68    /// State store backend.
69    store: S,
70
71    /// The schema of the output columns, i.e., this table VIEWED BY some executor like
72    /// `RowSeqScanExecutor`.
73    schema: Schema,
74
75    /// Used for serializing and deserializing the primary key.
76    pk_serializer: OrderedRowSerde,
77
78    output_indices: Vec<usize>,
79
80    /// the key part of `output_indices`.
81    key_output_indices: Option<Vec<usize>>,
82
83    /// the value part of `output_indices`.
84    value_output_indices: Vec<usize>,
85
86    /// used for deserializing key part of output row from pk.
87    output_row_in_key_indices: Vec<usize>,
88
89    /// Mapping from column id to column index for deserializing the row.
90    mapping: Arc<ColumnMapping>,
91
92    /// The index of system column `_rw_timestamp` in the output columns.
93    epoch_idx: Option<usize>,
94
95    /// Row deserializer to deserialize the value in storage to a row.
96    /// The row can be either complete or partial, depending on whether the row encoding is versioned.
97    row_serde: Arc<SD>,
98
99    /// Indices of primary key.
100    /// Note that the index is based on the all columns of the table, instead of the output ones.
101    // FIXME: revisit constructions and usages.
102    pk_indices: Vec<usize>,
103
104    distribution: TableDistribution,
105
106    /// Used for catalog `table_properties`
107    table_option: TableOption,
108
109    read_prefix_len_hint: usize,
110}
111
112/// `BatchTable` will use [`EitherSerde`] as default so that we can support both versioned and
113/// non-versioned tables with the same type.
114pub type BatchTable<S> = BatchTableInner<S, EitherSerde>;
115
116impl<S: StateStore, SD: ValueRowSerde> std::fmt::Debug for BatchTableInner<S, SD> {
117    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
118        f.debug_struct("BatchTableInner").finish_non_exhaustive()
119    }
120}
121
122// init
123impl<S: StateStore> BatchTableInner<S, EitherSerde> {
124    /// Create a  [`BatchTableInner`] given a complete set of `columns` and a partial
125    /// set of `output_column_ids`.
126    /// When reading from the storage table,
127    /// the chunks or rows will only contain columns with the given ids (`output_column_ids`).
128    /// They will in the same order as the given `output_column_ids`.
129    ///
130    /// NOTE(kwannoel): The `output_column_ids` here may be slightly different
131    /// from those supplied to associated executors.
132    /// These `output_column_ids` may have `pk` appended, since they will be needed to scan from
133    /// storage. The associated executors may not have these `pk` fields.
134    pub fn new_partial(
135        store: S,
136        output_column_ids: Vec<ColumnId>,
137        vnodes: Option<Arc<Bitmap>>,
138        table_desc: &StorageTableDesc,
139    ) -> Self {
140        let table_id = TableId {
141            table_id: table_desc.table_id,
142        };
143        let column_descs = table_desc
144            .columns
145            .iter()
146            .map(ColumnDesc::from)
147            .collect_vec();
148        let order_types: Vec<OrderType> = table_desc
149            .pk
150            .iter()
151            .map(|order| OrderType::from_protobuf(order.get_order_type().unwrap()))
152            .collect();
153
154        let pk_indices = table_desc
155            .pk
156            .iter()
157            .map(|k| k.column_index as usize)
158            .collect_vec();
159
160        let table_option = TableOption {
161            retention_seconds: table_desc.retention_seconds,
162        };
163        let value_indices = table_desc
164            .get_value_indices()
165            .iter()
166            .map(|&k| k as usize)
167            .collect_vec();
168        let prefix_hint_len = table_desc.get_read_prefix_len_hint() as usize;
169        let versioned = table_desc.versioned;
170        let distribution = TableDistribution::new_from_storage_table_desc(vnodes, table_desc);
171
172        Self::new_inner(
173            store,
174            table_id,
175            column_descs,
176            output_column_ids,
177            order_types,
178            pk_indices,
179            distribution,
180            table_option,
181            value_indices,
182            prefix_hint_len,
183            versioned,
184        )
185    }
186
187    pub fn for_test_with_partial_columns(
188        store: S,
189        table_id: TableId,
190        columns: Vec<ColumnDesc>,
191        output_column_ids: Vec<ColumnId>,
192        order_types: Vec<OrderType>,
193        pk_indices: Vec<usize>,
194        value_indices: Vec<usize>,
195    ) -> Self {
196        Self::new_inner(
197            store,
198            table_id,
199            columns,
200            output_column_ids,
201            order_types,
202            pk_indices,
203            TableDistribution::singleton(),
204            Default::default(),
205            value_indices,
206            0,
207            false,
208        )
209    }
210
211    pub fn for_test(
212        store: S,
213        table_id: TableId,
214        columns: Vec<ColumnDesc>,
215        order_types: Vec<OrderType>,
216        pk_indices: Vec<usize>,
217        value_indices: Vec<usize>,
218    ) -> Self {
219        let output_column_ids = columns.iter().map(|c| c.column_id).collect();
220        Self::for_test_with_partial_columns(
221            store,
222            table_id,
223            columns,
224            output_column_ids,
225            order_types,
226            pk_indices,
227            value_indices,
228        )
229    }
230
231    #[allow(clippy::too_many_arguments)]
232    fn new_inner(
233        store: S,
234        table_id: TableId,
235        table_columns: Vec<ColumnDesc>,
236        output_column_ids: Vec<ColumnId>,
237        order_types: Vec<OrderType>,
238        pk_indices: Vec<usize>,
239        distribution: TableDistribution,
240        table_option: TableOption,
241        value_indices: Vec<usize>,
242        read_prefix_len_hint: usize,
243        versioned: bool,
244    ) -> Self {
245        assert_eq!(order_types.len(), pk_indices.len());
246
247        let (output_columns, output_indices) =
248            find_columns_by_ids(&table_columns, &output_column_ids);
249
250        let mut value_output_indices = vec![];
251        let mut key_output_indices = vec![];
252        // system column currently only contains `_rw_timestamp`
253        let mut epoch_idx = None;
254
255        for idx in &output_indices {
256            if value_indices.contains(idx) {
257                value_output_indices.push(*idx);
258            } else if pk_indices.contains(idx) {
259                key_output_indices.push(*idx);
260            } else {
261                assert!(epoch_idx.is_none());
262                epoch_idx = Some(*idx);
263            }
264        }
265
266        let output_row_in_key_indices = key_output_indices
267            .iter()
268            .map(|&di| pk_indices.iter().position(|&pi| di == pi).unwrap())
269            .collect_vec();
270        let schema = Schema::new(output_columns.iter().map(Into::into).collect());
271
272        let pk_data_types = pk_indices
273            .iter()
274            .map(|i| table_columns[*i].data_type.clone())
275            .collect();
276        let pk_serializer = OrderedRowSerde::new(pk_data_types, order_types);
277        let (row_serde, mapping) = {
278            if versioned {
279                let value_output_indices_dedup = value_output_indices
280                    .iter()
281                    .unique()
282                    .copied()
283                    .collect::<Vec<_>>();
284                let output_row_in_value_output_indices_dedup = value_output_indices
285                    .iter()
286                    .map(|&di| {
287                        value_output_indices_dedup
288                            .iter()
289                            .position(|&pi| di == pi)
290                            .unwrap()
291                    })
292                    .collect_vec();
293                let mapping = ColumnMapping::new(output_row_in_value_output_indices_dedup);
294                let serde =
295                    ColumnAwareSerde::new(value_output_indices_dedup.into(), table_columns.into());
296                (serde.into(), mapping)
297            } else {
298                let output_row_in_value_indices = value_output_indices
299                    .iter()
300                    .map(|&di| value_indices.iter().position(|&pi| di == pi).unwrap())
301                    .collect_vec();
302                let mapping = ColumnMapping::new(output_row_in_value_indices);
303                let serde = BasicSerde::new(value_indices.into(), table_columns.into());
304                (serde.into(), mapping)
305            }
306        };
307
308        let key_output_indices = match key_output_indices.is_empty() {
309            true => None,
310            false => Some(key_output_indices),
311        };
312        Self {
313            table_id,
314            store,
315            schema,
316            pk_serializer,
317            output_indices,
318            key_output_indices,
319            value_output_indices,
320            output_row_in_key_indices,
321            mapping: Arc::new(mapping),
322            epoch_idx,
323            row_serde: Arc::new(row_serde),
324            pk_indices,
325            distribution,
326            table_option,
327            read_prefix_len_hint,
328        }
329    }
330}
331
332impl<S: StateStore, SD: ValueRowSerde> BatchTableInner<S, SD> {
333    pub fn pk_serializer(&self) -> &OrderedRowSerde {
334        &self.pk_serializer
335    }
336
337    pub fn schema(&self) -> &Schema {
338        &self.schema
339    }
340
341    pub fn pk_indices(&self) -> &[usize] {
342        &self.pk_indices
343    }
344
345    pub fn output_indices(&self) -> &[usize] {
346        &self.output_indices
347    }
348
349    /// Get the indices of the primary key columns in the output columns.
350    ///
351    /// Returns `None` if any of the primary key columns is not in the output columns.
352    pub fn pk_in_output_indices(&self) -> Option<Vec<usize>> {
353        self.pk_indices
354            .iter()
355            .map(|&i| self.output_indices.iter().position(|&j| i == j))
356            .collect()
357    }
358
359    pub fn table_id(&self) -> TableId {
360        self.table_id
361    }
362
363    pub fn vnodes(&self) -> &Arc<Bitmap> {
364        self.distribution.vnodes()
365    }
366}
367/// Point get
368impl<S: StateStore, SD: ValueRowSerde> BatchTableInner<S, SD> {
369    /// Get a single row by point get
370    pub async fn get_row(
371        &self,
372        pk: impl Row,
373        wait_epoch: HummockReadEpoch,
374    ) -> StorageResult<Option<OwnedRow>> {
375        self.store
376            .try_wait_epoch(
377                wait_epoch,
378                TryWaitEpochOptions {
379                    table_id: self.table_id,
380                },
381            )
382            .await?;
383        let serialized_pk = serialize_pk_with_vnode(
384            &pk,
385            &self.pk_serializer,
386            self.distribution.compute_vnode_by_pk(&pk),
387        );
388        assert!(pk.len() <= self.pk_indices.len());
389
390        let prefix_hint = if self.read_prefix_len_hint != 0 && self.read_prefix_len_hint == pk.len()
391        {
392            Some(serialized_pk.slice(VirtualNode::SIZE..))
393        } else {
394            None
395        };
396
397        let read_options = ReadOptions {
398            prefix_hint,
399            retention_seconds: self.table_option.retention_seconds,
400            cache_policy: CachePolicy::Fill(Hint::Normal),
401            ..Default::default()
402        };
403        let read_snapshot = self
404            .store
405            .new_read_snapshot(
406                wait_epoch,
407                NewReadSnapshotOptions {
408                    table_id: self.table_id,
409                },
410            )
411            .await?;
412        match read_snapshot
413            .on_key_value(serialized_pk, read_options, move |key, value| {
414                let row = self.row_serde.deserialize(value)?;
415                Ok((key.epoch_with_gap.pure_epoch(), row))
416            })
417            .await?
418        {
419            Some((epoch, row)) => {
420                let result_row_in_value = self.mapping.project(OwnedRow::new(row));
421
422                match &self.key_output_indices {
423                    Some(key_output_indices) => {
424                        let result_row_in_key =
425                            pk.project(&self.output_row_in_key_indices).into_owned_row();
426                        let mut result_row_vec = vec![];
427                        for idx in &self.output_indices {
428                            if let Some(epoch_idx) = self.epoch_idx
429                                && *idx == epoch_idx
430                            {
431                                let epoch = Epoch::from(epoch);
432                                result_row_vec
433                                    .push(risingwave_common::types::Datum::from(epoch.as_scalar()));
434                            } else if self.value_output_indices.contains(idx) {
435                                let item_position_in_value_indices = &self
436                                    .value_output_indices
437                                    .iter()
438                                    .position(|p| idx == p)
439                                    .unwrap();
440                                result_row_vec.push(
441                                    result_row_in_value
442                                        .datum_at(*item_position_in_value_indices)
443                                        .to_owned_datum(),
444                                );
445                            } else {
446                                let item_position_in_pk_indices =
447                                    key_output_indices.iter().position(|p| idx == p).unwrap();
448                                result_row_vec.push(
449                                    result_row_in_key
450                                        .datum_at(item_position_in_pk_indices)
451                                        .to_owned_datum(),
452                                );
453                            }
454                        }
455                        let result_row = OwnedRow::new(result_row_vec);
456                        Ok(Some(result_row))
457                    }
458                    None => match &self.epoch_idx {
459                        Some(epoch_idx) => {
460                            let mut result_row_vec = vec![];
461                            for idx in &self.output_indices {
462                                if idx == epoch_idx {
463                                    let epoch = Epoch::from(epoch);
464                                    result_row_vec.push(risingwave_common::types::Datum::from(
465                                        epoch.as_scalar(),
466                                    ));
467                                } else {
468                                    let item_position_in_value_indices = &self
469                                        .value_output_indices
470                                        .iter()
471                                        .position(|p| idx == p)
472                                        .unwrap();
473                                    result_row_vec.push(
474                                        result_row_in_value
475                                            .datum_at(*item_position_in_value_indices)
476                                            .to_owned_datum(),
477                                    );
478                                }
479                            }
480                            let result_row = OwnedRow::new(result_row_vec);
481                            Ok(Some(result_row))
482                        }
483                        None => Ok(Some(result_row_in_value.into_owned_row())),
484                    },
485                }
486            }
487            _ => Ok(None),
488        }
489    }
490
491    /// Update the vnode bitmap of the storage table, returns the previous vnode bitmap.
492    #[must_use = "the executor should decide whether to manipulate the cache based on the previous vnode bitmap"]
493    pub fn update_vnode_bitmap(&mut self, new_vnodes: Arc<Bitmap>) -> Arc<Bitmap> {
494        self.distribution.update_vnode_bitmap(new_vnodes)
495    }
496}
497
498/// The row iterator of the storage table.
499/// The wrapper of stream item `StorageResult<OwnedRow>` if pk is not persisted.
500impl<S: Stream<Item = StorageResult<OwnedRow>> + Send + Unpin> TableIter for S {
501    async fn next_row(&mut self) -> StorageResult<Option<OwnedRow>> {
502        self.next().await.transpose()
503    }
504}
505
506mod merge_vnode_stream {
507
508    use bytes::Bytes;
509    use futures::{Stream, StreamExt, TryStreamExt};
510    use risingwave_hummock_sdk::key::TableKey;
511
512    use crate::error::StorageResult;
513    use crate::table::KeyedRow;
514    use crate::table::merge_sort::{NodePeek, merge_sort};
515
516    pub(super) enum VnodeStreamType<RowSt, KeyedRowSt> {
517        Single(RowSt),
518        Unordered(Vec<RowSt>),
519        Ordered(Vec<KeyedRowSt>),
520    }
521
522    pub(super) type MergedVnodeStream<
523        R: Send,
524        RowSt: Stream<Item = StorageResult<((), R)>> + Send,
525        KeyedRowSt: Stream<Item = StorageResult<(SortKeyType, R)>> + Send,
526    >
527    where
528        KeyedRow<SortKeyType, R>: NodePeek + Send + Sync,
529    = impl Stream<Item = StorageResult<R>> + Send;
530
531    pub(super) type SortKeyType = Bytes; // TODO: may use Vec
532
533    #[define_opaque(MergedVnodeStream)]
534    pub(super) fn merge_stream<
535        R: Send,
536        RowSt: Stream<Item = StorageResult<((), R)>> + Send,
537        KeyedRowSt: Stream<Item = StorageResult<(SortKeyType, R)>> + Send,
538    >(
539        stream: VnodeStreamType<RowSt, KeyedRowSt>,
540    ) -> MergedVnodeStream<R, RowSt, KeyedRowSt>
541    where
542        KeyedRow<SortKeyType, R>: NodePeek + Send + Sync,
543    {
544        #[auto_enums::auto_enum(futures03::Stream)]
545        match stream {
546            VnodeStreamType::Single(stream) => stream.map_ok(|(_, row)| row),
547            VnodeStreamType::Unordered(streams) => futures::stream::iter(
548                streams
549                    .into_iter()
550                    .map(|stream| Box::pin(stream.map_ok(|(_, row)| row))),
551            )
552            .flatten_unordered(1024),
553            VnodeStreamType::Ordered(streams) => merge_sort(streams.into_iter().map(|stream| {
554                Box::pin(stream.map_ok(|(key, row)| KeyedRow {
555                    vnode_prefixed_key: TableKey(key),
556                    row,
557                }))
558            }))
559            .map_ok(|keyed_row| keyed_row.row),
560        }
561    }
562}
563
564use merge_vnode_stream::*;
565
566async fn build_vnode_stream<
567    R: Send,
568    RowSt: Stream<Item = StorageResult<((), R)>> + Send,
569    KeyedRowSt: Stream<Item = StorageResult<(SortKeyType, R)>> + Send,
570    RowStFut: Future<Output = StorageResult<RowSt>>,
571    KeyedRowStFut: Future<Output = StorageResult<KeyedRowSt>>,
572>(
573    row_stream_fn: impl Fn(VirtualNode) -> RowStFut,
574    keyed_row_stream_fn: impl Fn(VirtualNode) -> KeyedRowStFut,
575    vnodes: &[VirtualNode],
576    ordered: bool,
577) -> StorageResult<MergedVnodeStream<R, RowSt, KeyedRowSt>>
578where
579    KeyedRow<SortKeyType, R>: NodePeek + Send + Sync,
580{
581    let stream = match vnodes {
582        [] => unreachable!(),
583        [vnode] => VnodeStreamType::Single(row_stream_fn(*vnode).await?),
584        // Concat all iterators if not to preserve order.
585        vnodes if !ordered => VnodeStreamType::Unordered(
586            try_join_all(vnodes.iter().map(|vnode| row_stream_fn(*vnode))).await?,
587        ),
588        // Merge all iterators if to preserve order.
589        vnodes => VnodeStreamType::Ordered(
590            try_join_all(vnodes.iter().map(|vnode| keyed_row_stream_fn(*vnode))).await?,
591        ),
592    };
593    Ok(merge_stream(stream))
594}
595
596/// Iterators
597impl<S: StateStore, SD: ValueRowSerde> BatchTableInner<S, SD> {
598    /// Get multiple stream item `StorageResult<OwnedRow>` based on the specified vnodes of this table with
599    /// `vnode_hint`, and merge or concat them by given `ordered`.
600    async fn iter_with_encoded_key_range(
601        &self,
602        prefix_hint: Option<Bytes>,
603        (start_bound, end_bound): (Bound<Bytes>, Bound<Bytes>),
604        wait_epoch: HummockReadEpoch,
605        vnode_hint: Option<VirtualNode>,
606        ordered: bool,
607        prefetch_options: PrefetchOptions,
608    ) -> StorageResult<impl Stream<Item = StorageResult<OwnedRow>> + Send + 'static + use<S, SD>>
609    {
610        let vnodes = match vnode_hint {
611            // If `vnode_hint` is set, we can only access this single vnode.
612            Some(vnode) => {
613                assert!(
614                    self.distribution.vnodes().is_set(vnode.to_index()),
615                    "vnode unset: {:?}, distribution: {:?}",
616                    vnode,
617                    self.distribution
618                );
619                vec![vnode]
620            }
621            // Otherwise, we need to access all vnodes of this table.
622            None => self.distribution.vnodes().iter_vnodes().collect_vec(),
623        };
624
625        let read_snapshot = self
626            .store
627            .new_read_snapshot(
628                wait_epoch,
629                NewReadSnapshotOptions {
630                    table_id: self.table_id,
631                },
632            )
633            .await?;
634
635        build_vnode_stream(
636            |vnode| {
637                self.iter_vnode_with_encoded_key_range(
638                    &read_snapshot,
639                    prefix_hint.clone(),
640                    (start_bound.as_ref(), end_bound.as_ref()),
641                    vnode,
642                    prefetch_options,
643                )
644            },
645            |vnode| {
646                self.iter_vnode_with_encoded_key_range(
647                    &read_snapshot,
648                    prefix_hint.clone(),
649                    (start_bound.as_ref(), end_bound.as_ref()),
650                    vnode,
651                    prefetch_options,
652                )
653            },
654            &vnodes,
655            ordered,
656        )
657        .await
658    }
659
660    async fn iter_vnode_with_encoded_key_range<K: CopyFromSlice>(
661        &self,
662        read_snapshot: &S::ReadSnapshot,
663        prefix_hint: Option<Bytes>,
664        encoded_key_range: (Bound<&Bytes>, Bound<&Bytes>),
665        vnode: VirtualNode,
666        prefetch_options: PrefetchOptions,
667    ) -> StorageResult<impl Stream<Item = StorageResult<(K, OwnedRow)>> + Send + use<K, S, SD>>
668    {
669        let cache_policy = match &encoded_key_range {
670            // To prevent unbounded range scan queries from polluting the block cache, use the
671            // low priority fill policy.
672            (Unbounded, _) | (_, Unbounded) => CachePolicy::Fill(Hint::Low),
673            _ => CachePolicy::Fill(Hint::Normal),
674        };
675
676        let table_key_range = prefixed_range_with_vnode::<&Bytes>(encoded_key_range, vnode);
677
678        {
679            let prefix_hint = prefix_hint.clone();
680            {
681                let read_options = ReadOptions {
682                    prefix_hint,
683                    retention_seconds: self.table_option.retention_seconds,
684                    prefetch_options,
685                    cache_policy,
686                };
687                let pk_serializer = match self.output_row_in_key_indices.is_empty() {
688                    true => None,
689                    false => Some(Arc::new(self.pk_serializer.clone())),
690                };
691                let iter = BatchTableInnerIterInner::new(
692                    read_snapshot,
693                    self.mapping.clone(),
694                    self.epoch_idx,
695                    pk_serializer,
696                    self.output_indices.clone(),
697                    self.key_output_indices.clone(),
698                    self.value_output_indices.clone(),
699                    self.output_row_in_key_indices.clone(),
700                    self.row_serde.clone(),
701                    table_key_range,
702                    read_options,
703                )
704                .await?
705                .into_stream::<K>();
706                Ok(iter)
707            }
708        }
709    }
710
711    // TODO: directly use `prefixed_range`.
712    fn serialize_pk_bound(
713        &self,
714        pk_prefix: impl Row,
715        range_bound: Bound<&OwnedRow>,
716        is_start_bound: bool,
717    ) -> Bound<Bytes> {
718        match range_bound {
719            Included(k) => {
720                let pk_prefix_serializer = self.pk_serializer.prefix(pk_prefix.len() + k.len());
721                let key = pk_prefix.chain(k);
722                let serialized_key = serialize_pk(&key, &pk_prefix_serializer);
723                if is_start_bound {
724                    Included(serialized_key)
725                } else {
726                    // Should use excluded next key for end bound.
727                    // Otherwise keys starting with the bound is not included.
728                    end_bound_of_prefix(&serialized_key)
729                }
730            }
731            Excluded(k) => {
732                let pk_prefix_serializer = self.pk_serializer.prefix(pk_prefix.len() + k.len());
733                let key = pk_prefix.chain(k);
734                let serialized_key = serialize_pk(&key, &pk_prefix_serializer);
735                if is_start_bound {
736                    // Storage doesn't support excluded begin key yet, so transform it to
737                    // included.
738                    // We always serialize a u8 for null of datum which is not equal to '\xff',
739                    // so we can assert that the next_key would never be empty.
740                    let next_serialized_key = next_key(&serialized_key);
741                    assert!(!next_serialized_key.is_empty());
742                    Included(Bytes::from(next_serialized_key))
743                } else {
744                    Excluded(serialized_key)
745                }
746            }
747            Unbounded => {
748                let pk_prefix_serializer = self.pk_serializer.prefix(pk_prefix.len());
749                let serialized_pk_prefix = serialize_pk(&pk_prefix, &pk_prefix_serializer);
750                if pk_prefix.is_empty() {
751                    Unbounded
752                } else if is_start_bound {
753                    Included(serialized_pk_prefix)
754                } else {
755                    end_bound_of_prefix(&serialized_pk_prefix)
756                }
757            }
758        }
759    }
760
761    /// Iterates on the table with the given prefix of the pk in `pk_prefix` and the range bounds.
762    async fn iter_with_pk_bounds(
763        &self,
764        epoch: HummockReadEpoch,
765        pk_prefix: impl Row,
766        range_bounds: impl RangeBounds<OwnedRow>,
767        ordered: bool,
768        prefetch_options: PrefetchOptions,
769    ) -> StorageResult<impl Stream<Item = StorageResult<OwnedRow>> + Send> {
770        let start_key = self.serialize_pk_bound(&pk_prefix, range_bounds.start_bound(), true);
771        let end_key = self.serialize_pk_bound(&pk_prefix, range_bounds.end_bound(), false);
772        assert!(pk_prefix.len() <= self.pk_indices.len());
773        let pk_prefix_indices = (0..pk_prefix.len())
774            .map(|index| self.pk_indices[index])
775            .collect_vec();
776
777        let prefix_hint = if self.read_prefix_len_hint != 0
778            && self.read_prefix_len_hint <= pk_prefix.len()
779        {
780            let encoded_prefix = if let Bound::Included(start_key) = start_key.as_ref() {
781                start_key
782            } else {
783                unreachable!()
784            };
785            let prefix_len = self
786                .pk_serializer
787                .deserialize_prefix_len(encoded_prefix, self.read_prefix_len_hint)?;
788            Some(Bytes::from(encoded_prefix[..prefix_len].to_vec()))
789        } else {
790            trace!(
791                "iter_with_pk_bounds dist_key_indices table_id {} not match prefix pk_prefix {:?}  pk_prefix_indices {:?}",
792                self.table_id, pk_prefix, pk_prefix_indices
793            );
794            None
795        };
796
797        trace!(
798            "iter_with_pk_bounds table_id {} prefix_hint {:?} start_key: {:?}, end_key: {:?} pk_prefix {:?}  pk_prefix_indices {:?}",
799            self.table_id, prefix_hint, start_key, end_key, pk_prefix, pk_prefix_indices
800        );
801
802        self.iter_with_encoded_key_range(
803            prefix_hint,
804            (start_key, end_key),
805            epoch,
806            self.distribution.try_compute_vnode_by_pk_prefix(pk_prefix),
807            ordered,
808            prefetch_options,
809        )
810        .await
811    }
812
813    // Construct a stream of (columns, row_count) from a row stream
814    #[try_stream(ok = (Vec<ArrayRef>, usize), error = StorageError)]
815    async fn convert_row_stream_to_array_vec_stream(
816        iter: impl Stream<Item = StorageResult<OwnedRow>>,
817        schema: Schema,
818        chunk_size: usize,
819    ) {
820        use futures::{TryStreamExt, pin_mut};
821        use risingwave_common::util::iter_util::ZipEqFast;
822
823        pin_mut!(iter);
824
825        let mut builders: Option<Vec<ArrayBuilderImpl>> = None;
826        let mut row_count = 0;
827
828        while let Some(row) = iter.try_next().await? {
829            row_count += 1;
830            // Uses ArrayBuilderImpl instead of DataChunkBuilder here to demonstrate how to build chunk in a columnar manner
831            let builders_ref =
832                builders.get_or_insert_with(|| schema.create_array_builders(chunk_size));
833            for (datum, builder) in row.iter().zip_eq_fast(builders_ref.iter_mut()) {
834                builder.append(datum);
835            }
836            if row_count == chunk_size {
837                let columns: Vec<_> = builders
838                    .take()
839                    .unwrap()
840                    .into_iter()
841                    .map(|builder| builder.finish().into())
842                    .collect();
843                yield (columns, row_count);
844                assert!(builders.is_none());
845                row_count = 0;
846            }
847        }
848
849        if let Some(builders) = builders {
850            assert_gt!(row_count, 0);
851            // yield the last chunk if any
852            let columns: Vec<_> = builders
853                .into_iter()
854                .map(|builder| builder.finish().into())
855                .collect();
856            yield (columns, row_count);
857        }
858    }
859
860    /// Iterates on the table with the given prefix of the pk in `pk_prefix` and the range bounds.
861    /// Returns a stream of chunks of columns with the provided `chunk_size`
862    async fn chunk_iter_with_pk_bounds(
863        &self,
864        epoch: HummockReadEpoch,
865        pk_prefix: impl Row,
866        range_bounds: impl RangeBounds<OwnedRow>,
867        ordered: bool,
868        chunk_size: usize,
869        prefetch_options: PrefetchOptions,
870    ) -> StorageResult<impl Stream<Item = StorageResult<(Vec<ArrayRef>, usize)>> + Send> {
871        let iter = self
872            .iter_with_pk_bounds(epoch, pk_prefix, range_bounds, ordered, prefetch_options)
873            .await?;
874
875        Ok(Self::convert_row_stream_to_array_vec_stream(
876            iter,
877            self.schema.clone(),
878            chunk_size,
879        ))
880    }
881
882    /// Construct a stream item `StorageResult<OwnedRow>` for batch executors.
883    /// Differs from the streaming one, this iterator will wait for the epoch before iteration
884    pub async fn batch_iter_with_pk_bounds(
885        &self,
886        epoch: HummockReadEpoch,
887        pk_prefix: impl Row,
888        range_bounds: impl RangeBounds<OwnedRow>,
889        ordered: bool,
890        prefetch_options: PrefetchOptions,
891    ) -> StorageResult<impl Stream<Item = StorageResult<OwnedRow>> + Send> {
892        self.iter_with_pk_bounds(epoch, pk_prefix, range_bounds, ordered, prefetch_options)
893            .await
894    }
895
896    // The returned iterator will iterate data from a snapshot corresponding to the given `epoch`.
897    pub async fn batch_iter(
898        &self,
899        epoch: HummockReadEpoch,
900        ordered: bool,
901        prefetch_options: PrefetchOptions,
902    ) -> StorageResult<impl Stream<Item = StorageResult<OwnedRow>> + Send> {
903        self.batch_iter_with_pk_bounds(epoch, row::empty(), .., ordered, prefetch_options)
904            .await
905    }
906
907    pub async fn batch_iter_vnode(
908        &self,
909        epoch: HummockReadEpoch,
910        start_pk: Option<&OwnedRow>,
911        vnode: VirtualNode,
912        prefetch_options: PrefetchOptions,
913    ) -> StorageResult<impl Stream<Item = StorageResult<OwnedRow>> + Send + 'static + use<S, SD>>
914    {
915        let start_bound = if let Some(start_pk) = start_pk {
916            let mut bytes = BytesMut::new();
917            self.pk_serializer.serialize(start_pk, &mut bytes);
918            let bytes = bytes.freeze();
919            Included(bytes)
920        } else {
921            Unbounded
922        };
923        let read_snapshot = self
924            .store
925            .new_read_snapshot(
926                epoch,
927                NewReadSnapshotOptions {
928                    table_id: self.table_id,
929                },
930            )
931            .await?;
932        Ok(self
933            .iter_vnode_with_encoded_key_range::<()>(
934                &read_snapshot,
935                None,
936                (start_bound.as_ref(), Unbounded),
937                vnode,
938                prefetch_options,
939            )
940            .await?
941            .map_ok(|(_, row)| row))
942    }
943
944    pub async fn next_epoch(&self, epoch: u64) -> StorageResult<u64> {
945        self.store
946            .next_epoch(
947                epoch,
948                NextEpochOptions {
949                    table_id: self.table_id,
950                },
951            )
952            .await
953    }
954
955    pub async fn batch_iter_vnode_log(
956        &self,
957        start_epoch: u64,
958        end_epoch: HummockReadEpoch,
959        start_pk: Option<&OwnedRow>,
960        vnode: VirtualNode,
961    ) -> StorageResult<impl Stream<Item = StorageResult<ChangeLogRow>> + Send + 'static + use<S, SD>>
962    {
963        let start_bound = if let Some(start_pk) = start_pk {
964            let mut bytes = BytesMut::new();
965            self.pk_serializer.serialize(start_pk, &mut bytes);
966            let bytes = bytes.freeze();
967            Included(bytes)
968        } else {
969            Unbounded
970        };
971        let stream = self
972            .batch_iter_log_inner::<()>(
973                start_epoch,
974                end_epoch,
975                (start_bound.as_ref(), Unbounded),
976                vnode,
977            )
978            .await?;
979        Ok(stream.map_ok(|(_, row)| row))
980    }
981
982    pub async fn batch_iter_log_with_pk_bounds(
983        &self,
984        start_epoch: u64,
985        end_epoch: HummockReadEpoch,
986        ordered: bool,
987        range_bounds: impl RangeBounds<OwnedRow>,
988        pk_prefix: impl Row,
989    ) -> StorageResult<impl Stream<Item = StorageResult<ChangeLogRow>> + Send> {
990        let start_key = self.serialize_pk_bound(&pk_prefix, range_bounds.start_bound(), true);
991        let end_key = self.serialize_pk_bound(&pk_prefix, range_bounds.end_bound(), false);
992        let vnodes = self.distribution.vnodes().iter_vnodes().collect_vec();
993        build_vnode_stream(
994            |vnode| {
995                self.batch_iter_log_inner(
996                    start_epoch,
997                    end_epoch,
998                    (start_key.as_ref(), end_key.as_ref()),
999                    vnode,
1000                )
1001            },
1002            |vnode| {
1003                self.batch_iter_log_inner(
1004                    start_epoch,
1005                    end_epoch,
1006                    (start_key.as_ref(), end_key.as_ref()),
1007                    vnode,
1008                )
1009            },
1010            &vnodes,
1011            ordered,
1012        )
1013        .await
1014    }
1015
1016    async fn batch_iter_log_inner<K: CopyFromSlice>(
1017        &self,
1018        start_epoch: u64,
1019        end_epoch: HummockReadEpoch,
1020        encoded_key_range: (Bound<&Bytes>, Bound<&Bytes>),
1021        vnode: VirtualNode,
1022    ) -> StorageResult<impl Stream<Item = StorageResult<(K, ChangeLogRow)>> + Send + use<K, S, SD>>
1023    {
1024        let table_key_range = prefixed_range_with_vnode::<&Bytes>(encoded_key_range, vnode);
1025        let read_options = ReadLogOptions {
1026            table_id: self.table_id,
1027        };
1028        let iter = BatchTableInnerIterLogInner::<S, SD>::new(
1029            &self.store,
1030            self.mapping.clone(),
1031            self.row_serde.clone(),
1032            table_key_range,
1033            read_options,
1034            start_epoch,
1035            end_epoch,
1036        )
1037        .await?
1038        .into_stream::<K>();
1039
1040        Ok(iter)
1041    }
1042
1043    /// Iterates on the table with the given prefix of the pk in `pk_prefix` and the range bounds.
1044    /// Returns a stream of `DataChunk` with the provided `chunk_size`
1045    pub async fn batch_chunk_iter_with_pk_bounds(
1046        &self,
1047        epoch: HummockReadEpoch,
1048        pk_prefix: impl Row,
1049        range_bounds: impl RangeBounds<OwnedRow>,
1050        ordered: bool,
1051        chunk_size: usize,
1052        prefetch_options: PrefetchOptions,
1053    ) -> StorageResult<impl Stream<Item = StorageResult<DataChunk>> + Send> {
1054        let iter = self
1055            .chunk_iter_with_pk_bounds(
1056                epoch,
1057                pk_prefix,
1058                range_bounds,
1059                ordered,
1060                chunk_size,
1061                prefetch_options,
1062            )
1063            .await?;
1064
1065        Ok(iter.map(|item| {
1066            let (columns, row_count) = item?;
1067            Ok(DataChunk::new(columns, row_count))
1068        }))
1069    }
1070}
1071
1072/// [`BatchTableInnerIterInner`] iterates on the storage table.
1073struct BatchTableInnerIterInner<SI: StateStoreIter, SD: ValueRowSerde> {
1074    /// An iterator that returns raw bytes from storage.
1075    iter: SI,
1076
1077    mapping: Arc<ColumnMapping>,
1078
1079    /// The index of system column `_rw_timestamp` in the output columns.
1080    epoch_idx: Option<usize>,
1081
1082    row_deserializer: Arc<SD>,
1083
1084    /// Used for serializing and deserializing the primary key.
1085    pk_serializer: Option<Arc<OrderedRowSerde>>,
1086
1087    output_indices: Vec<usize>,
1088
1089    /// the key part of `output_indices`.
1090    key_output_indices: Option<Vec<usize>>,
1091
1092    /// the value part of `output_indices`.
1093    value_output_indices: Vec<usize>,
1094
1095    /// used for deserializing key part of output row from pk.
1096    output_row_in_key_indices: Vec<usize>,
1097}
1098
1099impl<SI: StateStoreIter, SD: ValueRowSerde> BatchTableInnerIterInner<SI, SD> {
1100    /// If `wait_epoch` is true, it will wait for the given epoch to be committed before iteration.
1101    #[allow(clippy::too_many_arguments)]
1102    async fn new<S>(
1103        store: &S,
1104        mapping: Arc<ColumnMapping>,
1105        epoch_idx: Option<usize>,
1106        pk_serializer: Option<Arc<OrderedRowSerde>>,
1107        output_indices: Vec<usize>,
1108        key_output_indices: Option<Vec<usize>>,
1109        value_output_indices: Vec<usize>,
1110        output_row_in_key_indices: Vec<usize>,
1111        row_deserializer: Arc<SD>,
1112        table_key_range: TableKeyRange,
1113        read_options: ReadOptions,
1114    ) -> StorageResult<Self>
1115    where
1116        S: StateStoreRead<Iter = SI>,
1117    {
1118        let iter = store.iter(table_key_range, read_options).await?;
1119        let iter = Self {
1120            iter,
1121            mapping,
1122            epoch_idx,
1123            row_deserializer,
1124            pk_serializer,
1125            output_indices,
1126            key_output_indices,
1127            value_output_indices,
1128            output_row_in_key_indices,
1129        };
1130        Ok(iter)
1131    }
1132
1133    /// Yield a row with its primary key.
1134    #[try_stream(ok = (K, OwnedRow), error = StorageError)]
1135    async fn into_stream<K: CopyFromSlice>(mut self) {
1136        while let Some((k, v)) = self
1137            .iter
1138            .try_next()
1139            .instrument_await("storage_table_iter_next".verbose())
1140            .await?
1141        {
1142            let (table_key, value, epoch_with_gap) = (k.user_key.table_key, v, k.epoch_with_gap);
1143            let row = self.row_deserializer.deserialize(value)?;
1144            let result_row_in_value = self.mapping.project(OwnedRow::new(row));
1145            let row = match &self.key_output_indices {
1146                Some(key_output_indices) => {
1147                    let result_row_in_key = match self.pk_serializer.clone() {
1148                        Some(pk_serializer) => {
1149                            let pk = pk_serializer.deserialize(table_key.key_part().as_ref())?;
1150
1151                            pk.project(&self.output_row_in_key_indices).into_owned_row()
1152                        }
1153                        None => OwnedRow::empty(),
1154                    };
1155
1156                    let mut result_row_vec = vec![];
1157                    for idx in &self.output_indices {
1158                        if let Some(epoch_idx) = self.epoch_idx
1159                            && *idx == epoch_idx
1160                        {
1161                            let epoch = Epoch::from(epoch_with_gap.pure_epoch());
1162                            result_row_vec
1163                                .push(risingwave_common::types::Datum::from(epoch.as_scalar()));
1164                        } else if self.value_output_indices.contains(idx) {
1165                            let item_position_in_value_indices = &self
1166                                .value_output_indices
1167                                .iter()
1168                                .position(|p| idx == p)
1169                                .unwrap();
1170                            result_row_vec.push(
1171                                result_row_in_value
1172                                    .datum_at(*item_position_in_value_indices)
1173                                    .to_owned_datum(),
1174                            );
1175                        } else {
1176                            let item_position_in_pk_indices =
1177                                key_output_indices.iter().position(|p| idx == p).unwrap();
1178                            result_row_vec.push(
1179                                result_row_in_key
1180                                    .datum_at(item_position_in_pk_indices)
1181                                    .to_owned_datum(),
1182                            );
1183                        }
1184                    }
1185                    OwnedRow::new(result_row_vec)
1186                }
1187                None => match &self.epoch_idx {
1188                    Some(epoch_idx) => {
1189                        let mut result_row_vec = vec![];
1190                        for idx in &self.output_indices {
1191                            if idx == epoch_idx {
1192                                let epoch = Epoch::from(epoch_with_gap.pure_epoch());
1193                                result_row_vec
1194                                    .push(risingwave_common::types::Datum::from(epoch.as_scalar()));
1195                            } else {
1196                                let item_position_in_value_indices = &self
1197                                    .value_output_indices
1198                                    .iter()
1199                                    .position(|p| idx == p)
1200                                    .unwrap();
1201                                result_row_vec.push(
1202                                    result_row_in_value
1203                                        .datum_at(*item_position_in_value_indices)
1204                                        .to_owned_datum(),
1205                                );
1206                            }
1207                        }
1208                        OwnedRow::new(result_row_vec)
1209                    }
1210                    None => result_row_in_value.into_owned_row(),
1211                },
1212            };
1213            yield (K::copy_from_slice(table_key.as_ref()), row);
1214        }
1215    }
1216}
1217
1218/// [`BatchTableInnerIterLogInner`] iterates on the storage table.
1219struct BatchTableInnerIterLogInner<S: StateStore, SD: ValueRowSerde> {
1220    /// An iterator that returns raw bytes from storage.
1221    iter: S::ChangeLogIter,
1222
1223    mapping: Arc<ColumnMapping>,
1224
1225    row_deserializer: Arc<SD>,
1226}
1227
1228impl<S: StateStore, SD: ValueRowSerde> BatchTableInnerIterLogInner<S, SD> {
1229    /// If `wait_epoch` is true, it will wait for the given epoch to be committed before iteration.
1230    #[allow(clippy::too_many_arguments)]
1231    async fn new(
1232        store: &S,
1233        mapping: Arc<ColumnMapping>,
1234        row_deserializer: Arc<SD>,
1235        table_key_range: TableKeyRange,
1236        read_options: ReadLogOptions,
1237        start_epoch: u64,
1238        end_epoch: HummockReadEpoch,
1239    ) -> StorageResult<Self> {
1240        store
1241            .try_wait_epoch(
1242                end_epoch,
1243                TryWaitEpochOptions {
1244                    table_id: read_options.table_id,
1245                },
1246            )
1247            .await?;
1248        let iter = store
1249            .iter_log(
1250                (start_epoch, end_epoch.get_epoch()),
1251                table_key_range,
1252                read_options,
1253            )
1254            .await?;
1255        let iter = Self {
1256            iter,
1257            mapping,
1258            row_deserializer,
1259        };
1260        Ok(iter)
1261    }
1262
1263    /// Yield a row with its primary key.
1264    fn into_stream<K: CopyFromSlice>(self) -> impl Stream<Item = StorageResult<(K, ChangeLogRow)>> {
1265        self.iter.into_stream(move |(table_key, value)| {
1266            value
1267                .try_map(|value| {
1268                    let full_row = self.row_deserializer.deserialize(value)?;
1269                    let row = self
1270                        .mapping
1271                        .project(OwnedRow::new(full_row))
1272                        .into_owned_row();
1273                    Ok(row)
1274                })
1275                .map(|row| (K::copy_from_slice(table_key.as_ref()), row))
1276        })
1277    }
1278}