risingwave_connector/parser/
parquet_parser.rs

1// Copyright 2025 RisingWave Labs
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14use std::sync::Arc;
15
16use futures_async_stream::try_stream;
17use prometheus::core::GenericCounter;
18use risingwave_common::array::arrow::arrow_array_iceberg::RecordBatch;
19use risingwave_common::array::arrow::{IcebergArrowConvert, is_parquet_schema_match_source_schema};
20use risingwave_common::array::{ArrayBuilderImpl, DataChunk, StreamChunk};
21use risingwave_common::metrics::LabelGuardedMetric;
22use risingwave_common::types::{Datum, ScalarImpl};
23
24use crate::parser::ConnectorResult;
25use crate::source::SourceColumnDesc;
26/// `ParquetParser` is responsible for converting the incoming `record_batch_stream`
27/// into a `streamChunk`.
28#[derive(Debug)]
29pub struct ParquetParser {
30    rw_columns: Vec<SourceColumnDesc>,
31    file_name: String,
32    offset: usize,
33}
34
35impl ParquetParser {
36    pub fn new(
37        rw_columns: Vec<SourceColumnDesc>,
38        file_name: String,
39        offset: usize,
40    ) -> ConnectorResult<Self> {
41        Ok(Self {
42            rw_columns,
43            file_name,
44            offset,
45        })
46    }
47
48    #[try_stream(boxed, ok = StreamChunk, error = crate::error::ConnectorError)]
49    pub async fn into_stream(
50        mut self,
51        record_batch_stream: parquet::arrow::async_reader::ParquetRecordBatchStream<
52            tokio_util::compat::Compat<opendal::FuturesAsyncReader>,
53        >,
54        file_source_input_row_count_metrics: Option<
55            LabelGuardedMetric<GenericCounter<prometheus::core::AtomicU64>>,
56        >,
57        parquet_source_skip_row_count_metrics: Option<
58            LabelGuardedMetric<GenericCounter<prometheus::core::AtomicU64>>,
59        >,
60    ) {
61        #[for_await]
62        for record_batch in record_batch_stream {
63            let record_batch: RecordBatch = record_batch?;
64            // Convert each record batch into a stream chunk according to user defined schema.
65            let chunk: StreamChunk = self.convert_record_batch_to_stream_chunk(
66                record_batch,
67                file_source_input_row_count_metrics.clone(),
68                parquet_source_skip_row_count_metrics.clone(),
69            )?;
70
71            yield chunk;
72        }
73    }
74
75    fn inc_offset(&mut self) {
76        self.offset += 1;
77    }
78
79    /// The function `convert_record_batch_to_stream_chunk` is designed to transform the given `RecordBatch` into a `StreamChunk`.
80    ///
81    /// For each column in the source column:
82    /// - If the column's schema matches a column in the `RecordBatch` (both the data type and column name are the same),
83    ///   the corresponding records are converted into a column of the `StreamChunk`.
84    /// - If the column's schema does not match, null values are inserted.
85    /// - Hidden columns are handled separately by filling in the appropriate fields to ensure the data chunk maintains the correct format.
86    /// - If a column in the Parquet file does not exist in the source schema, it is skipped.
87    ///
88    /// # Arguments
89    ///
90    /// * `record_batch` - The `RecordBatch` to be converted into a `StreamChunk`.
91    ///
92    /// # Returns
93    ///
94    /// A `StreamChunk` containing the converted data from the `RecordBatch`.
95    ///
96    /// The hidden columns that must be included here are `_rw_file` and `_rw_offset`.
97    /// Depending on whether the user specifies a primary key (pk), there may be an additional hidden column `row_id`.
98    /// Therefore, the maximum number of hidden columns is three.
99    fn convert_record_batch_to_stream_chunk(
100        &mut self,
101        record_batch: RecordBatch,
102        file_source_input_row_count_metrics: Option<
103            LabelGuardedMetric<GenericCounter<prometheus::core::AtomicU64>>,
104        >,
105        parquet_source_skip_row_count_metrics: Option<
106            LabelGuardedMetric<GenericCounter<prometheus::core::AtomicU64>>,
107        >,
108    ) -> Result<StreamChunk, crate::error::ConnectorError> {
109        const MAX_HIDDEN_COLUMN_NUMS: usize = 3;
110        let column_size = self.rw_columns.len();
111        let mut chunk_columns = Vec::with_capacity(self.rw_columns.len() + MAX_HIDDEN_COLUMN_NUMS);
112
113        for source_column in self.rw_columns.clone() {
114            match source_column.column_type {
115                crate::source::SourceColumnType::Normal => {
116                    let rw_data_type: &risingwave_common::types::DataType =
117                        &source_column.data_type;
118                    let rw_column_name = &source_column.name;
119                    if let Some(parquet_column) = record_batch.column_by_name(rw_column_name)
120                        && is_parquet_schema_match_source_schema(
121                            parquet_column.data_type(),
122                            rw_data_type,
123                        )
124                    {
125                        let arrow_field =
126                            IcebergArrowConvert.to_arrow_field(rw_column_name, rw_data_type)?;
127                        let array_impl = IcebergArrowConvert
128                            .array_from_arrow_array(&arrow_field, parquet_column)?;
129                        chunk_columns.push(Arc::new(array_impl));
130                    } else {
131                        // Handle additional columns, for file source, the additional columns are offset and file name;
132                        // for columns defined in the user schema but not present in the parquet file, fill with null.
133                        let column = if let Some(additional_column_type) =
134                            &source_column.additional_column.column_type
135                        {
136                            match additional_column_type {
137                                risingwave_pb::plan_common::additional_column::ColumnType::Offset(_) => {
138                                    let mut array_builder = ArrayBuilderImpl::with_type(column_size, source_column.data_type.clone());
139                                    for _ in 0..record_batch.num_rows() {
140                                        let datum: Datum = Some(ScalarImpl::Utf8((self.offset).to_string().into()));
141                                        self.inc_offset();
142                                        array_builder.append(datum);
143                                    }
144                                    Arc::new(array_builder.finish())
145                                }
146                                risingwave_pb::plan_common::additional_column::ColumnType::Filename(_) => {
147                                    let mut array_builder = ArrayBuilderImpl::with_type(column_size, source_column.data_type.clone());
148                                    let datum: Datum = Some(ScalarImpl::Utf8(self.file_name.clone().into()));
149                                    array_builder.append_n(record_batch.num_rows(), datum);
150                                    Arc::new(array_builder.finish())
151                                }
152                                _ => unreachable!(),
153                            }
154                        } else {
155                            // For columns defined in the source schema but not present in the Parquet file, null values are filled in.
156                            let mut array_builder =
157                                ArrayBuilderImpl::with_type(column_size, rw_data_type.clone());
158                            array_builder.append_n_null(record_batch.num_rows());
159                            if let Some(metrics) = parquet_source_skip_row_count_metrics.clone() {
160                                metrics.inc_by(record_batch.num_rows() as u64);
161                            }
162                            Arc::new(array_builder.finish())
163                        };
164                        chunk_columns.push(column);
165                    }
166                }
167                crate::source::SourceColumnType::RowId => {
168                    let mut array_builder =
169                        ArrayBuilderImpl::with_type(column_size, source_column.data_type.clone());
170                    let datum: Datum = None;
171                    array_builder.append_n(record_batch.num_rows(), datum);
172                    let res = array_builder.finish();
173                    let column = Arc::new(res);
174                    chunk_columns.push(column);
175                }
176                // The following fields are only used in CDC source
177                crate::source::SourceColumnType::Offset | crate::source::SourceColumnType::Meta => {
178                    unreachable!()
179                }
180            }
181        }
182        if let Some(metrics) = file_source_input_row_count_metrics {
183            metrics.inc_by(record_batch.num_rows() as u64);
184        }
185
186        let data_chunk = DataChunk::new(chunk_columns.clone(), record_batch.num_rows());
187        Ok(data_chunk.into())
188    }
189}