1use std::collections::HashMap;
16use std::default::Default;
17use std::ops::Bound;
18use std::vec;
19
20use anyhow::anyhow;
21use chrono::{MappedLocalTime, TimeZone};
22use fixedbitset::FixedBitSet;
23use itertools::Itertools;
24use pretty_xmlish::{Pretty, Str, StrAssocArr, XmlNode};
25use risingwave_common::catalog::{
26 ColumnCatalog, ColumnDesc, ConflictBehavior, CreateType, Engine, Field, FieldDisplay,
27 OBJECT_ID_PLACEHOLDER, Schema, StreamJobStatus,
28};
29use risingwave_common::constants::log_store::v2::{
30 KV_LOG_STORE_PREDEFINED_COLUMNS, PK_ORDERING, VNODE_COLUMN_INDEX,
31};
32use risingwave_common::hash::VnodeCount;
33use risingwave_common::license::Feature;
34use risingwave_common::types::{DataType, Interval, ScalarImpl, Timestamptz};
35use risingwave_common::util::iter_util::ZipEqFast;
36use risingwave_common::util::scan_range::{ScanRange, is_full_range};
37use risingwave_common::util::sort_util::{ColumnOrder, OrderType};
38use risingwave_connector::source::iceberg::IcebergTimeTravelInfo;
39use risingwave_expr::aggregate::PbAggKind;
40use risingwave_expr::bail;
41use risingwave_pb::plan_common::as_of::AsOfType;
42use risingwave_pb::plan_common::{PbAsOf, as_of};
43use risingwave_sqlparser::ast::AsOf;
44
45use super::generic::{self, GenericPlanRef, PhysicalPlanRef};
46use super::pretty_config;
47use crate::PlanRef;
48use crate::catalog::table_catalog::TableType;
49use crate::catalog::{ColumnId, TableCatalog, TableId};
50use crate::error::{ErrorCode, Result};
51use crate::expr::InputRef;
52use crate::optimizer::StreamScanType;
53use crate::optimizer::plan_node::generic::Agg;
54use crate::optimizer::plan_node::{BatchSimpleAgg, PlanAggCall};
55use crate::optimizer::property::{Cardinality, Order, RequiredDist, WatermarkColumns};
56use crate::utils::{Condition, IndexSet};
57
58#[derive(Default)]
59pub struct TableCatalogBuilder {
60 columns: Vec<ColumnCatalog>,
62 pk: Vec<ColumnOrder>,
63 value_indices: Option<Vec<usize>>,
64 vnode_col_idx: Option<usize>,
65 column_names: HashMap<String, i32>,
66 watermark_columns: Option<FixedBitSet>,
67 dist_key_in_pk: Option<Vec<usize>>,
68}
69
70impl TableCatalogBuilder {
73 pub fn add_column(&mut self, field: &Field) -> usize {
75 let column_idx = self.columns.len();
76 let column_id = column_idx as i32;
77 let mut column_desc = ColumnDesc::from_field_with_column_id(field, column_id);
79
80 column_desc.name = column_desc.name.replace('.', "_");
82 self.avoid_duplicate_col_name(&mut column_desc);
84
85 self.columns.push(ColumnCatalog {
86 column_desc,
87 is_hidden: false,
89 });
90 column_idx
91 }
92
93 pub fn extend_columns(&mut self, columns: &[ColumnCatalog]) -> Vec<usize> {
97 let base_idx = self.columns.len();
98 columns.iter().enumerate().for_each(|(i, col)| {
99 assert!(!self.column_names.contains_key(col.name()));
100 self.column_names.insert(col.name().to_owned(), 0);
101
102 let mut new_col = col.clone();
104 new_col.column_desc.column_id = ColumnId::new((base_idx + i) as _);
105 self.columns.push(new_col);
106 });
107 Vec::from_iter(base_idx..(base_idx + columns.len()))
108 }
109
110 pub fn add_order_column(&mut self, column_index: usize, order_type: OrderType) {
113 self.pk.push(ColumnOrder::new(column_index, order_type));
114 }
115
116 pub fn get_current_pk_len(&self) -> usize {
118 self.pk.len()
119 }
120
121 pub fn set_vnode_col_idx(&mut self, vnode_col_idx: usize) {
122 self.vnode_col_idx = Some(vnode_col_idx);
123 }
124
125 pub fn set_value_indices(&mut self, value_indices: Vec<usize>) {
126 self.value_indices = Some(value_indices);
127 }
128
129 pub fn set_dist_key_in_pk(&mut self, dist_key_in_pk: Vec<usize>) {
130 self.dist_key_in_pk = Some(dist_key_in_pk);
131 }
132
133 fn avoid_duplicate_col_name(&mut self, column_desc: &mut ColumnDesc) {
136 if let Some(old_identity) = self.column_names.get(&column_desc.name) {
137 let column_name = column_desc.name.clone();
138 let mut identity = *old_identity;
139 loop {
140 column_desc.name = format!("{}_{}", column_name, identity);
141 identity += 1;
142 if !self.column_names.contains_key(&column_desc.name) {
143 break;
144 }
145 }
146 *self.column_names.get_mut(&column_name).unwrap() = identity;
147 }
148 self.column_names.insert(column_desc.name.clone(), 0);
149 }
150
151 pub fn build(self, distribution_key: Vec<usize>, read_prefix_len_hint: usize) -> TableCatalog {
155 assert!(read_prefix_len_hint <= self.pk.len());
156 let watermark_columns = match self.watermark_columns {
157 Some(w) => w,
158 None => FixedBitSet::with_capacity(self.columns.len()),
159 };
160
161 if let Some(dist_key_in_pk) = &self.dist_key_in_pk {
165 let derived_dist_key = dist_key_in_pk
166 .iter()
167 .map(|idx| self.pk[*idx].column_index)
168 .collect_vec();
169 assert_eq!(
170 derived_dist_key, distribution_key,
171 "dist_key mismatch with dist_key_in_pk"
172 );
173 }
174
175 TableCatalog {
176 id: TableId::placeholder(),
177 schema_id: 0,
178 database_id: 0,
179 associated_source_id: None,
180 name: String::new(),
181 columns: self.columns.clone(),
182 pk: self.pk,
183 stream_key: vec![],
184 distribution_key,
185 table_type: TableType::Internal,
188 append_only: false,
189 owner: risingwave_common::catalog::DEFAULT_SUPER_USER_ID,
190 fragment_id: OBJECT_ID_PLACEHOLDER,
191 dml_fragment_id: None,
192 vnode_col_index: self.vnode_col_idx,
193 row_id_index: None,
194 value_indices: self
195 .value_indices
196 .unwrap_or_else(|| (0..self.columns.len()).collect_vec()),
197 definition: "".into(),
198 conflict_behavior: ConflictBehavior::NoCheck,
199 version_column_index: None,
200 read_prefix_len_hint,
201 version: None, watermark_columns,
203 dist_key_in_pk: self.dist_key_in_pk.unwrap_or_default(),
204 cardinality: Cardinality::unknown(), created_at_epoch: None,
206 initialized_at_epoch: None,
207 cleaned_by_watermark: false,
208 create_type: CreateType::Foreground,
211 stream_job_status: StreamJobStatus::Creating,
212 description: None,
213 incoming_sinks: vec![],
214 initialized_at_cluster_version: None,
215 created_at_cluster_version: None,
216 retention_seconds: None,
217 cdc_table_id: None,
218 vnode_count: VnodeCount::Placeholder, webhook_info: None,
220 job_id: None,
221 engine: Engine::Hummock,
222 clean_watermark_index_in_pk: None, }
224 }
225
226 pub fn columns(&self) -> &[ColumnCatalog] {
227 &self.columns
228 }
229}
230
231pub trait Distill {
233 fn distill<'a>(&self) -> XmlNode<'a>;
234
235 fn distill_to_string(&self) -> String {
236 let mut config = pretty_config();
237 let mut output = String::with_capacity(2048);
238 config.unicode(&mut output, &Pretty::Record(self.distill()));
239 output
240 }
241}
242
243pub(super) fn childless_record<'a>(
244 name: impl Into<Str<'a>>,
245 fields: StrAssocArr<'a>,
246) -> XmlNode<'a> {
247 XmlNode::simple_record(name, fields, Default::default())
248}
249
250macro_rules! impl_distill_by_unit {
251 ($ty:ty, $core:ident, $name:expr) => {
252 use pretty_xmlish::XmlNode;
253 use $crate::optimizer::plan_node::generic::DistillUnit;
254 use $crate::optimizer::plan_node::utils::Distill;
255 impl Distill for $ty {
256 fn distill<'a>(&self) -> XmlNode<'a> {
257 self.$core.distill_with_name($name)
258 }
259 }
260 };
261}
262pub(crate) use impl_distill_by_unit;
263
264pub(crate) fn column_names_pretty<'a>(schema: &Schema) -> Pretty<'a> {
265 let columns = (schema.fields.iter())
266 .map(|f| f.name.clone())
267 .map(Pretty::from)
268 .collect();
269 Pretty::Array(columns)
270}
271
272pub(crate) fn watermark_pretty<'a>(
273 watermark_columns: &WatermarkColumns,
274 schema: &Schema,
275) -> Option<Pretty<'a>> {
276 if watermark_columns.is_empty() {
277 None
278 } else {
279 let groups = watermark_columns.grouped();
280 let pretty_groups = groups
281 .values()
282 .map(|cols| {
283 Pretty::Array(
284 cols.indices()
285 .map(|idx| FieldDisplay(schema.fields.get(idx).unwrap()))
286 .map(|d| Pretty::display(&d))
287 .collect::<Vec<_>>(),
288 )
289 })
290 .collect();
291 Some(Pretty::Array(pretty_groups))
292 }
293}
294
295#[derive(Clone, Copy)]
296pub struct IndicesDisplay<'a> {
297 pub indices: &'a [usize],
298 pub schema: &'a Schema,
299}
300
301impl<'a> IndicesDisplay<'a> {
302 pub fn from_join<'b, PlanRef: GenericPlanRef>(
304 join: &'a generic::Join<PlanRef>,
305 input_schema: &'a Schema,
306 ) -> Pretty<'b> {
307 let col_num = join.internal_column_num();
308 let id = Self::from(&join.output_indices, col_num, input_schema);
309 id.map_or_else(|| Pretty::from("all"), Self::distill)
310 }
311
312 fn from(indices: &'a [usize], col_num: usize, schema: &'a Schema) -> Option<Self> {
314 if indices.iter().copied().eq(0..col_num) {
315 return None;
316 }
317 Some(Self { indices, schema })
318 }
319
320 pub fn distill<'b>(self) -> Pretty<'b> {
321 let vec = self.indices.iter().map(|&i| {
322 let name = self.schema.fields.get(i).unwrap().name.clone();
323 Pretty::from(name)
324 });
325 Pretty::Array(vec.collect())
326 }
327}
328
329pub(crate) fn sum_affected_row(dml: PlanRef) -> Result<PlanRef> {
330 let dml = RequiredDist::single().enforce_if_not_satisfies(dml, &Order::any())?;
331 let sum_agg = PlanAggCall {
333 agg_type: PbAggKind::Sum.into(),
334 return_type: DataType::Int64,
335 inputs: vec![InputRef::new(0, DataType::Int64)],
336 distinct: false,
337 order_by: vec![],
338 filter: Condition::true_cond(),
339 direct_args: vec![],
340 };
341 let agg = Agg::new(vec![sum_agg], IndexSet::empty(), dml);
342 let batch_agg = BatchSimpleAgg::new(agg);
343 Ok(batch_agg.into())
344}
345
346macro_rules! plan_node_name {
350 ($name:literal $(, { $prop:literal, $cond:expr } )* $(,)?) => {
351 {
352 #[allow(unused_mut)]
353 let mut properties: Vec<&str> = vec![];
354 $( if $cond { properties.push($prop); } )*
355 let mut name = $name.to_string();
356 if !properties.is_empty() {
357 name += " [";
358 name += &properties.join(", ");
359 name += "]";
360 }
361 name
362 }
363 };
364}
365pub(crate) use plan_node_name;
366
367pub fn infer_kv_log_store_table_catalog_inner(
368 input: &PlanRef,
369 columns: &[ColumnCatalog],
370) -> TableCatalog {
371 let mut table_catalog_builder = TableCatalogBuilder::default();
372
373 let mut value_indices =
374 Vec::with_capacity(KV_LOG_STORE_PREDEFINED_COLUMNS.len() + input.schema().fields().len());
375
376 for (name, data_type) in KV_LOG_STORE_PREDEFINED_COLUMNS {
377 let indice = table_catalog_builder.add_column(&Field::with_name(data_type, name));
378 value_indices.push(indice);
379 }
380
381 table_catalog_builder.set_vnode_col_idx(VNODE_COLUMN_INDEX);
382
383 for (i, ordering) in PK_ORDERING.iter().enumerate() {
384 table_catalog_builder.add_order_column(i, *ordering);
385 }
386
387 let read_prefix_len_hint = table_catalog_builder.get_current_pk_len();
388
389 if columns.len() != input.schema().fields().len()
390 || columns
391 .iter()
392 .zip_eq_fast(input.schema().fields())
393 .any(|(c, f)| *c.data_type() != f.data_type())
394 {
395 tracing::warn!(
396 "sink schema different with upstream schema: sink columns: {:?}, input schema: {:?}.",
397 columns,
398 input.schema()
399 );
400 }
401 for field in input.schema().fields() {
402 let indice = table_catalog_builder.add_column(field);
403 value_indices.push(indice);
404 }
405 table_catalog_builder.set_value_indices(value_indices);
406
407 let dist_key = input
409 .distribution()
410 .dist_column_indices()
411 .iter()
412 .map(|idx| idx + KV_LOG_STORE_PREDEFINED_COLUMNS.len())
413 .collect_vec();
414
415 table_catalog_builder.build(dist_key, read_prefix_len_hint)
416}
417
418pub fn infer_synced_kv_log_store_table_catalog_inner(
419 input: &PlanRef,
420 columns: &[Field],
421) -> TableCatalog {
422 let mut table_catalog_builder = TableCatalogBuilder::default();
423
424 let mut value_indices =
425 Vec::with_capacity(KV_LOG_STORE_PREDEFINED_COLUMNS.len() + columns.len());
426
427 for (name, data_type) in KV_LOG_STORE_PREDEFINED_COLUMNS {
428 let indice = table_catalog_builder.add_column(&Field::with_name(data_type, name));
429 value_indices.push(indice);
430 }
431
432 table_catalog_builder.set_vnode_col_idx(VNODE_COLUMN_INDEX);
433
434 for (i, ordering) in PK_ORDERING.iter().enumerate() {
435 table_catalog_builder.add_order_column(i, *ordering);
436 }
437
438 let read_prefix_len_hint = table_catalog_builder.get_current_pk_len();
439
440 let payload_indices = {
441 let mut payload_indices = Vec::with_capacity(columns.len());
442 for column in columns {
443 let payload_index = table_catalog_builder.add_column(column);
444 payload_indices.push(payload_index);
445 }
446 payload_indices
447 };
448
449 value_indices.extend(payload_indices);
450 table_catalog_builder.set_value_indices(value_indices);
451
452 let dist_key = input
454 .distribution()
455 .dist_column_indices()
456 .iter()
457 .map(|idx| idx + KV_LOG_STORE_PREDEFINED_COLUMNS.len())
458 .collect_vec();
459
460 table_catalog_builder.build(dist_key, read_prefix_len_hint)
461}
462
463pub(crate) fn plan_can_use_background_ddl(plan: &PlanRef) -> bool {
468 if plan.inputs().is_empty() {
469 if plan.as_stream_source_scan().is_some()
470 || plan.as_stream_now().is_some()
471 || plan.as_stream_source().is_some()
472 {
473 true
474 } else if let Some(scan) = plan.as_stream_table_scan() {
475 scan.stream_scan_type() == StreamScanType::Backfill
476 || scan.stream_scan_type() == StreamScanType::ArrangementBackfill
477 || scan.stream_scan_type() == StreamScanType::CrossDbSnapshotBackfill
478 || scan.stream_scan_type() == StreamScanType::SnapshotBackfill
479 } else {
480 false
481 }
482 } else {
483 assert!(!plan.inputs().is_empty());
484 plan.inputs().iter().all(plan_can_use_background_ddl)
485 }
486}
487
488pub fn to_pb_time_travel_as_of(a: &Option<AsOf>) -> Result<Option<PbAsOf>> {
489 let Some(a) = a else {
490 return Ok(None);
491 };
492 Feature::TimeTravel
493 .check_available()
494 .map_err(|e| anyhow::anyhow!(e))?;
495 let as_of_type = match a {
496 AsOf::ProcessTime => {
497 return Err(ErrorCode::NotSupported(
498 "do not support as of proctime".to_owned(),
499 "please use as of timestamp".to_owned(),
500 )
501 .into());
502 }
503 AsOf::TimestampNum(ts) => AsOfType::Timestamp(as_of::Timestamp { timestamp: *ts }),
504 AsOf::TimestampString(ts) => {
505 let date_time = speedate::DateTime::parse_str_rfc3339(ts)
506 .map_err(|_e| anyhow!("fail to parse timestamp"))?;
507 let timestamp = if date_time.time.tz_offset.is_none() {
508 risingwave_expr::expr_context::TIME_ZONE::try_with(|set_time_zone| {
510 let tz =
511 Timestamptz::lookup_time_zone(set_time_zone).map_err(|e| anyhow!(e))?;
512 match tz.with_ymd_and_hms(
513 date_time.date.year.into(),
514 date_time.date.month.into(),
515 date_time.date.day.into(),
516 date_time.time.hour.into(),
517 date_time.time.minute.into(),
518 date_time.time.second.into(),
519 ) {
520 MappedLocalTime::Single(d) => Ok(d.timestamp()),
521 MappedLocalTime::Ambiguous(_, _) | MappedLocalTime::None => {
522 Err(anyhow!(format!(
523 "failed to parse the timestamp {ts} with the specified time zone {tz}"
524 )))
525 }
526 }
527 })??
528 } else {
529 date_time.timestamp_tz()
530 };
531 AsOfType::Timestamp(as_of::Timestamp { timestamp })
532 }
533 AsOf::VersionNum(_) | AsOf::VersionString(_) => {
534 return Err(ErrorCode::NotSupported(
535 "do not support as of version".to_owned(),
536 "please use as of timestamp".to_owned(),
537 )
538 .into());
539 }
540 AsOf::ProcessTimeWithInterval((value, leading_field)) => {
541 let interval = Interval::parse_with_fields(
542 value,
543 Some(crate::Binder::bind_date_time_field(leading_field.clone())),
544 )
545 .map_err(|_| anyhow!("fail to parse interval"))?;
546 let interval_sec = (interval.epoch_in_micros() / 1_000_000) as i64;
547 let timestamp = chrono::Utc::now()
548 .timestamp()
549 .checked_sub(interval_sec)
550 .ok_or_else(|| anyhow!("invalid timestamp"))?;
551 AsOfType::Timestamp(as_of::Timestamp { timestamp })
552 }
553 };
554 Ok(Some(PbAsOf {
555 as_of_type: Some(as_of_type),
556 }))
557}
558
559pub fn to_iceberg_time_travel_as_of(
560 a: &Option<AsOf>,
561 timezone: &String,
562) -> Result<Option<IcebergTimeTravelInfo>> {
563 Ok(match a {
564 Some(AsOf::VersionNum(v)) => Some(IcebergTimeTravelInfo::Version(*v)),
565 Some(AsOf::TimestampNum(ts)) => Some(IcebergTimeTravelInfo::TimestampMs(ts * 1000)),
566 Some(AsOf::VersionString(_)) => {
567 bail!("Unsupported version string in iceberg time travel")
568 }
569 Some(AsOf::TimestampString(ts)) => {
570 let date_time = speedate::DateTime::parse_str_rfc3339(ts)
571 .map_err(|_e| anyhow!("fail to parse timestamp"))?;
572 let timestamp = if date_time.time.tz_offset.is_none() {
573 let tz = Timestamptz::lookup_time_zone(timezone).map_err(|e| anyhow!(e))?;
575 match tz.with_ymd_and_hms(
576 date_time.date.year.into(),
577 date_time.date.month.into(),
578 date_time.date.day.into(),
579 date_time.time.hour.into(),
580 date_time.time.minute.into(),
581 date_time.time.second.into(),
582 ) {
583 MappedLocalTime::Single(d) => Ok(d.timestamp()),
584 MappedLocalTime::Ambiguous(_, _) | MappedLocalTime::None => {
585 Err(anyhow!(format!(
586 "failed to parse the timestamp {ts} with the specified time zone {tz}"
587 )))
588 }
589 }?
590 } else {
591 date_time.timestamp_tz()
592 };
593
594 Some(IcebergTimeTravelInfo::TimestampMs(
595 timestamp * 1000 + date_time.time.microsecond as i64 / 1000,
596 ))
597 }
598 Some(AsOf::ProcessTime) | Some(AsOf::ProcessTimeWithInterval(_)) => {
599 unreachable!()
600 }
601 None => None,
602 })
603}
604
605pub fn scan_ranges_as_strs(order_names: Vec<String>, scan_ranges: &Vec<ScanRange>) -> Vec<String> {
606 let mut range_strs = vec![];
607
608 let explain_max_range = 20;
609 for scan_range in scan_ranges.iter().take(explain_max_range) {
610 #[expect(clippy::disallowed_methods)]
611 let mut range_str = scan_range
612 .eq_conds
613 .iter()
614 .zip(order_names.iter())
615 .map(|(v, name)| match v {
616 Some(v) => format!("{} = {:?}", name, v),
617 None => format!("{} IS NULL", name),
618 })
619 .collect_vec();
620
621 let len = scan_range.eq_conds.len();
622 if !is_full_range(&scan_range.range) {
623 let bound_range_str = match (&scan_range.range.0, &scan_range.range.1) {
624 (Bound::Unbounded, Bound::Unbounded) => unreachable!(),
625 (Bound::Unbounded, ub) => ub_to_string(&order_names[len..], ub),
626 (lb, Bound::Unbounded) => lb_to_string(&order_names[len..], lb),
627 (lb, ub) => format!(
628 "{} AND {}",
629 lb_to_string(&order_names[len..], lb),
630 ub_to_string(&order_names[len..], ub)
631 ),
632 };
633 range_str.push(bound_range_str);
634 }
635 range_strs.push(range_str.join(" AND "));
636 }
637 if scan_ranges.len() > explain_max_range {
638 range_strs.push("...".to_owned());
639 }
640 range_strs
641}
642
643pub fn ub_to_string(order_names: &[String], ub: &Bound<Vec<Option<ScalarImpl>>>) -> String {
644 match ub {
645 Bound::Included(v) => {
646 let (name, value) = row_to_string(order_names, v);
647 format!("{} <= {}", name, value)
648 }
649 Bound::Excluded(v) => {
650 let (name, value) = row_to_string(order_names, v);
651 format!("{} < {}", name, value)
652 }
653 Bound::Unbounded => unreachable!(),
654 }
655}
656
657pub fn lb_to_string(order_names: &[String], lb: &Bound<Vec<Option<ScalarImpl>>>) -> String {
658 match lb {
659 Bound::Included(v) => {
660 let (name, value) = row_to_string(order_names, v);
661 format!("{} >= {}", name, value)
662 }
663 Bound::Excluded(v) => {
664 let (name, value) = row_to_string(order_names, v);
665 format!("{} > {}", name, value)
666 }
667 Bound::Unbounded => unreachable!(),
668 }
669}
670
671pub fn row_to_string(
672 order_names: &[String],
673 struct_values: &Vec<Option<ScalarImpl>>,
674) -> (String, String) {
675 let mut names = vec![];
676 let mut values = vec![];
677 #[expect(clippy::disallowed_methods)]
678 for (name, value) in order_names.iter().zip(struct_values.iter()) {
679 names.push(name);
680 match value {
681 Some(v) => values.push(format!("{:?}", v)),
682 None => values.push("null".to_owned()),
683 }
684 }
685 if names.len() == 1 {
686 (names[0].clone(), values[0].clone())
687 } else {
688 (
689 format!("({})", names.iter().join(", ")),
690 format!("({})", values.iter().join(", ")),
691 )
692 }
693}