risingwave_frontend/handler/
create_table.rs

1// Copyright 2025 RisingWave Labs
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use std::collections::{BTreeMap, HashMap, HashSet};
16use std::rc::Rc;
17use std::sync::Arc;
18
19use anyhow::{Context, anyhow};
20use clap::ValueEnum;
21use either::Either;
22use fixedbitset::FixedBitSet;
23use itertools::Itertools;
24use percent_encoding::percent_decode_str;
25use pgwire::pg_response::{PgResponse, StatementType};
26use prost::Message as _;
27use risingwave_common::catalog::{
28    CdcTableDesc, ColumnCatalog, ColumnDesc, ConflictBehavior, DEFAULT_SCHEMA_NAME, Engine,
29    ICEBERG_SINK_PREFIX, ICEBERG_SOURCE_PREFIX, RISINGWAVE_ICEBERG_ROW_ID, ROW_ID_COLUMN_NAME,
30    TableId,
31};
32use risingwave_common::config::MetaBackend;
33use risingwave_common::global_jvm::Jvm;
34use risingwave_common::session_config::sink_decouple::SinkDecouple;
35use risingwave_common::util::sort_util::{ColumnOrder, OrderType};
36use risingwave_common::util::value_encoding::DatumToProtoExt;
37use risingwave_common::{bail, bail_not_implemented};
38use risingwave_connector::sink::decouple_checkpoint_log_sink::COMMIT_CHECKPOINT_INTERVAL;
39use risingwave_connector::source::cdc::build_cdc_table_id;
40use risingwave_connector::source::cdc::external::{
41    DATABASE_NAME_KEY, ExternalCdcTableType, ExternalTableConfig, ExternalTableImpl,
42    SCHEMA_NAME_KEY, TABLE_NAME_KEY,
43};
44use risingwave_connector::{WithOptionsSecResolved, WithPropertiesExt, source};
45use risingwave_pb::catalog::connection::Info as ConnectionInfo;
46use risingwave_pb::catalog::connection_params::ConnectionType;
47use risingwave_pb::catalog::{PbSource, PbWebhookSourceInfo, WatermarkDesc};
48use risingwave_pb::ddl_service::{PbTableJobType, TableJobType};
49use risingwave_pb::plan_common::column_desc::GeneratedOrDefaultColumn;
50use risingwave_pb::plan_common::{
51    AdditionalColumn, ColumnDescVersion, DefaultColumnDesc, GeneratedColumnDesc,
52};
53use risingwave_pb::secret::PbSecretRef;
54use risingwave_pb::secret::secret_ref::PbRefAsType;
55use risingwave_pb::stream_plan::StreamFragmentGraph;
56use risingwave_sqlparser::ast::{
57    CdcTableInfo, ColumnDef, ColumnOption, CompatibleFormatEncode, ConnectionRefValue, CreateSink,
58    CreateSinkStatement, CreateSourceStatement, DataType as AstDataType, ExplainOptions, Format,
59    FormatEncodeOptions, Ident, ObjectName, OnConflict, SecretRefAsType, SourceWatermark,
60    Statement, TableConstraint, WebhookSourceInfo, WithProperties,
61};
62use risingwave_sqlparser::parser::{IncludeOption, Parser};
63use thiserror_ext::AsReport;
64
65use super::RwPgResponse;
66use super::create_source::{CreateSourceType, SqlColumnStrategy, bind_columns_from_source};
67use crate::binder::{Clause, SecureCompareContext, bind_data_type};
68use crate::catalog::root_catalog::SchemaPath;
69use crate::catalog::source_catalog::SourceCatalog;
70use crate::catalog::table_catalog::TableVersion;
71use crate::catalog::{ColumnId, DatabaseId, SchemaId, SourceId, check_column_name_not_reserved};
72use crate::error::{ErrorCode, Result, RwError, bail_bind_error};
73use crate::expr::{Expr, ExprImpl, ExprRewriter};
74use crate::handler::HandlerArgs;
75use crate::handler::create_source::{
76    UPSTREAM_SOURCE_KEY, bind_connector_props, bind_create_source_or_table_with_connector,
77    bind_source_watermark, handle_addition_columns,
78};
79use crate::handler::util::{
80    LongRunningNotificationAction, SourceSchemaCompatExt, execute_with_long_running_notification,
81};
82use crate::optimizer::plan_node::generic::{SourceNodeKind, build_cdc_scan_options_with_options};
83use crate::optimizer::plan_node::{
84    LogicalCdcScan, LogicalPlanRef, LogicalSource, StreamPlanRef as PlanRef,
85};
86use crate::optimizer::property::{Order, RequiredDist};
87use crate::optimizer::{OptimizerContext, OptimizerContextRef, PlanRoot};
88use crate::session::SessionImpl;
89use crate::session::current::notice_to_user;
90use crate::stream_fragmenter::{GraphJobType, build_graph};
91use crate::utils::OverwriteOptions;
92use crate::{Binder, Explain, TableCatalog, WithOptions};
93
94mod col_id_gen;
95pub use col_id_gen::*;
96use risingwave_connector::sink::SinkParam;
97use risingwave_connector::sink::iceberg::{
98    COMPACTION_DELETE_FILES_COUNT_THRESHOLD, COMPACTION_INTERVAL_SEC, COMPACTION_MAX_SNAPSHOTS_NUM,
99    COMPACTION_SMALL_FILES_THRESHOLD_MB, COMPACTION_TARGET_FILE_SIZE_MB,
100    COMPACTION_TRIGGER_SNAPSHOT_COUNT, COMPACTION_TYPE, CompactionType, ENABLE_COMPACTION,
101    ENABLE_SNAPSHOT_EXPIRATION, ICEBERG_WRITE_MODE_COPY_ON_WRITE, ICEBERG_WRITE_MODE_MERGE_ON_READ,
102    IcebergSink, IcebergWriteMode, SNAPSHOT_EXPIRATION_CLEAR_EXPIRED_FILES,
103    SNAPSHOT_EXPIRATION_CLEAR_EXPIRED_META_DATA, SNAPSHOT_EXPIRATION_MAX_AGE_MILLIS,
104    SNAPSHOT_EXPIRATION_RETAIN_LAST, WRITE_MODE, parse_partition_by_exprs,
105};
106use risingwave_pb::ddl_service::create_iceberg_table_request::{PbSinkJobInfo, PbTableJobInfo};
107
108use crate::handler::create_sink::{SinkPlanContext, gen_sink_plan};
109
110fn ensure_column_options_supported(c: &ColumnDef) -> Result<()> {
111    for option_def in &c.options {
112        match option_def.option {
113            ColumnOption::GeneratedColumns(_) => {}
114            ColumnOption::DefaultValue(_) => {}
115            ColumnOption::DefaultValueInternal { .. } => {}
116            ColumnOption::Unique { is_primary: true } => {}
117            ColumnOption::Null => {}
118            ColumnOption::NotNull => {}
119            _ => bail_not_implemented!("column constraints \"{}\"", option_def),
120        }
121    }
122    Ok(())
123}
124
125/// Binds the column schemas declared in CREATE statement into `ColumnDesc`.
126/// If a column is marked as `primary key`, its `ColumnId` is also returned.
127/// This primary key is not combined with table constraints yet.
128pub fn bind_sql_columns(
129    column_defs: &[ColumnDef],
130    is_for_drop_table_connector: bool,
131) -> Result<Vec<ColumnCatalog>> {
132    let mut columns = Vec::with_capacity(column_defs.len());
133
134    for column in column_defs {
135        ensure_column_options_supported(column)?;
136        // Destruct to make sure all fields are properly handled rather than ignored.
137        // Do NOT use `..` to ignore fields you do not want to deal with.
138        // Reject them with a clear NotImplemented error.
139        let ColumnDef {
140            name,
141            data_type,
142            collation,
143            options,
144            ..
145        } = column;
146
147        let data_type = data_type
148            .clone()
149            .ok_or_else(|| ErrorCode::InvalidInputSyntax("data type is not specified".into()))?;
150        if let Some(collation) = collation {
151            // PostgreSQL will limit the datatypes that collate can work on.
152            // https://www.postgresql.org/docs/16/collation.html#COLLATION-CONCEPTS
153            //   > The built-in collatable data types are `text`, `varchar`, and `char`.
154            //
155            // But we don't support real collation, we simply ignore it here.
156            if !["C", "POSIX"].contains(&collation.real_value().as_str()) {
157                bail_not_implemented!(
158                    "Collate collation other than `C` or `POSIX` is not implemented"
159                );
160            }
161
162            match data_type {
163                AstDataType::Text | AstDataType::Varchar | AstDataType::Char(_) => {}
164                _ => {
165                    return Err(ErrorCode::NotSupported(
166                        format!("{} is not a collatable data type", data_type),
167                        "The only built-in collatable data types are `varchar`, please check your type".into(),
168                    ).into());
169                }
170            }
171        }
172
173        if !is_for_drop_table_connector {
174            // additional column name may have prefix _rw
175            // When converting dropping the connector from table, the additional columns are converted to normal columns and keep the original name.
176            // Under this case, we loosen the check for _rw prefix.
177            check_column_name_not_reserved(&name.real_value())?;
178        }
179
180        let nullable: bool = !options
181            .iter()
182            .any(|def| matches!(def.option, ColumnOption::NotNull));
183
184        columns.push(ColumnCatalog {
185            column_desc: ColumnDesc {
186                data_type: bind_data_type(&data_type)?,
187                column_id: ColumnId::placeholder(),
188                name: name.real_value(),
189                generated_or_default_column: None,
190                description: None,
191                additional_column: AdditionalColumn { column_type: None },
192                version: ColumnDescVersion::LATEST,
193                system_column: None,
194                nullable,
195            },
196            is_hidden: false,
197        });
198    }
199
200    Ok(columns)
201}
202
203fn check_generated_column_constraints(
204    column_name: &String,
205    column_id: ColumnId,
206    expr: &ExprImpl,
207    column_catalogs: &[ColumnCatalog],
208    generated_column_names: &[String],
209    pk_column_ids: &[ColumnId],
210) -> Result<()> {
211    let input_refs = expr.collect_input_refs(column_catalogs.len());
212    for idx in input_refs.ones() {
213        let referred_generated_column = &column_catalogs[idx].column_desc.name;
214        if generated_column_names
215            .iter()
216            .any(|c| c == referred_generated_column)
217        {
218            return Err(ErrorCode::BindError(format!(
219                "Generated can not reference another generated column. \
220                But here generated column \"{}\" referenced another generated column \"{}\"",
221                column_name, referred_generated_column
222            ))
223            .into());
224        }
225    }
226
227    if pk_column_ids.contains(&column_id) && expr.is_impure() {
228        return Err(ErrorCode::BindError(format!(
229            "Generated columns with impure expressions should not be part of the primary key. \
230            Here column \"{}\" is defined as part of the primary key.",
231            column_name
232        ))
233        .into());
234    }
235
236    Ok(())
237}
238
239/// Binds constraints that can be only specified in column definitions,
240/// currently generated columns and default columns.
241pub fn bind_sql_column_constraints(
242    session: &SessionImpl,
243    table_name: String,
244    column_catalogs: &mut [ColumnCatalog],
245    columns: &[ColumnDef],
246    pk_column_ids: &[ColumnId],
247) -> Result<()> {
248    let generated_column_names = {
249        let mut names = vec![];
250        for column in columns {
251            for option_def in &column.options {
252                if let ColumnOption::GeneratedColumns(_) = option_def.option {
253                    names.push(column.name.real_value());
254                    break;
255                }
256            }
257        }
258        names
259    };
260
261    let mut binder = Binder::new_for_ddl(session);
262    binder.bind_columns_to_context(table_name, column_catalogs)?;
263
264    for column in columns {
265        let Some(idx) = column_catalogs
266            .iter()
267            .position(|c| c.name() == column.name.real_value())
268        else {
269            // It's possible that we don't follow the user defined columns in SQL but take the
270            // ones resolved from the source, thus missing some columns. Simply ignore them.
271            continue;
272        };
273
274        for option_def in &column.options {
275            match &option_def.option {
276                ColumnOption::GeneratedColumns(expr) => {
277                    binder.set_clause(Some(Clause::GeneratedColumn));
278
279                    let expr_impl = binder.bind_expr(expr).with_context(|| {
280                        format!(
281                            "fail to bind expression in generated column \"{}\"",
282                            column.name.real_value()
283                        )
284                    })?;
285
286                    check_generated_column_constraints(
287                        &column.name.real_value(),
288                        column_catalogs[idx].column_id(),
289                        &expr_impl,
290                        column_catalogs,
291                        &generated_column_names,
292                        pk_column_ids,
293                    )?;
294
295                    column_catalogs[idx].column_desc.generated_or_default_column = Some(
296                        GeneratedOrDefaultColumn::GeneratedColumn(GeneratedColumnDesc {
297                            expr: Some(expr_impl.to_expr_proto()),
298                        }),
299                    );
300                    binder.set_clause(None);
301                }
302                ColumnOption::DefaultValue(expr) => {
303                    let expr_impl = binder
304                        .bind_expr(expr)?
305                        .cast_assign(column_catalogs[idx].data_type())?;
306
307                    // Rewrite expressions to evaluate a snapshot value, used for missing values in the case of
308                    // schema change.
309                    //
310                    // TODO: Currently we don't support impure expressions other than `now()` (like `random()`),
311                    // so the rewritten expression should almost always be pure and we directly call `fold_const`
312                    // here. Actually we do not require purity of the expression here since we're only to get a
313                    // snapshot value.
314                    let rewritten_expr_impl = session
315                        .pinned_snapshot()
316                        .inline_now_proc_time()
317                        .rewrite_expr(expr_impl.clone());
318
319                    if let Some(snapshot_value) = rewritten_expr_impl.try_fold_const() {
320                        let snapshot_value = snapshot_value?;
321
322                        column_catalogs[idx].column_desc.generated_or_default_column =
323                            Some(GeneratedOrDefaultColumn::DefaultColumn(DefaultColumnDesc {
324                                snapshot_value: Some(snapshot_value.to_protobuf()),
325                                expr: Some(expr_impl.to_expr_proto()),
326                                // persist the original expression
327                            }));
328                    } else {
329                        return Err(ErrorCode::BindError(format!(
330                            "Default expression used in column `{}` cannot be evaluated. \
331                            Use generated columns instead if you mean to reference other columns.",
332                            column.name
333                        ))
334                        .into());
335                    }
336                }
337                ColumnOption::DefaultValueInternal { persisted, expr: _ } => {
338                    // When a `DEFAULT INTERNAL` is used internally for schema change, the persisted value
339                    // should already be set during purifcation. So if we encounter an empty value here, it
340                    // means the user has specified it explicitly in the SQL statement, typically by
341                    // directly copying the result of `SHOW CREATE TABLE` and executing it.
342                    if persisted.is_empty() {
343                        bail_bind_error!(
344                            "DEFAULT INTERNAL is only used for internal purposes, \
345                             please specify a concrete default value"
346                        );
347                    }
348
349                    let desc = DefaultColumnDesc::decode(&**persisted)
350                        .expect("failed to decode persisted `DefaultColumnDesc`");
351
352                    column_catalogs[idx].column_desc.generated_or_default_column =
353                        Some(GeneratedOrDefaultColumn::DefaultColumn(desc));
354                }
355                _ => {}
356            }
357        }
358    }
359    Ok(())
360}
361
362/// Currently we only support Primary key table constraint, so just return pk names if it exists
363pub fn bind_table_constraints(table_constraints: &[TableConstraint]) -> Result<Vec<String>> {
364    let mut pk_column_names = vec![];
365
366    for constraint in table_constraints {
367        match constraint {
368            TableConstraint::Unique {
369                name: _,
370                columns,
371                is_primary: true,
372            } => {
373                if !pk_column_names.is_empty() {
374                    return Err(multiple_pk_definition_err());
375                }
376                pk_column_names = columns.iter().map(|c| c.real_value()).collect_vec();
377            }
378            _ => bail_not_implemented!("table constraint \"{}\"", constraint),
379        }
380    }
381    Ok(pk_column_names)
382}
383
384pub fn bind_sql_pk_names(
385    columns_defs: &[ColumnDef],
386    pk_names_from_table_constraints: Vec<String>,
387) -> Result<Vec<String>> {
388    let mut pk_column_names = pk_names_from_table_constraints;
389
390    for column in columns_defs {
391        for option_def in &column.options {
392            if let ColumnOption::Unique { is_primary: true } = option_def.option {
393                if !pk_column_names.is_empty() {
394                    return Err(multiple_pk_definition_err());
395                }
396                pk_column_names.push(column.name.real_value());
397            };
398        }
399    }
400
401    Ok(pk_column_names)
402}
403
404fn multiple_pk_definition_err() -> RwError {
405    ErrorCode::BindError("multiple primary keys are not allowed".into()).into()
406}
407
408/// Binds primary keys defined in SQL.
409///
410/// It returns the columns together with `pk_column_ids`, and an optional row id column index if
411/// added.
412pub fn bind_pk_and_row_id_on_relation(
413    mut columns: Vec<ColumnCatalog>,
414    pk_names: Vec<String>,
415    must_need_pk: bool,
416) -> Result<(Vec<ColumnCatalog>, Vec<ColumnId>, Option<usize>)> {
417    for c in &columns {
418        assert!(c.column_id() != ColumnId::placeholder());
419    }
420
421    // Mapping from column name to column id.
422    let name_to_id = columns
423        .iter()
424        .map(|c| (c.name(), c.column_id()))
425        .collect::<HashMap<_, _>>();
426
427    let mut pk_column_ids: Vec<_> = pk_names
428        .iter()
429        .map(|name| {
430            name_to_id.get(name.as_str()).copied().ok_or_else(|| {
431                ErrorCode::BindError(format!("column \"{name}\" named in key does not exist"))
432            })
433        })
434        .try_collect()?;
435
436    // Add `_row_id` column if `pk_column_ids` is empty and must_need_pk
437    let need_row_id = pk_column_ids.is_empty() && must_need_pk;
438
439    let row_id_index = need_row_id.then(|| {
440        let column = ColumnCatalog::row_id_column();
441        let index = columns.len();
442        pk_column_ids = vec![column.column_id()];
443        columns.push(column);
444        index
445    });
446
447    if let Some(col) = columns.iter().map(|c| c.name()).duplicates().next() {
448        Err(ErrorCode::InvalidInputSyntax(format!(
449            "column \"{col}\" specified more than once"
450        )))?;
451    }
452
453    Ok((columns, pk_column_ids, row_id_index))
454}
455
456/// `gen_create_table_plan_with_source` generates the plan for creating a table with an external
457/// stream source.
458#[allow(clippy::too_many_arguments)]
459pub(crate) async fn gen_create_table_plan_with_source(
460    mut handler_args: HandlerArgs,
461    explain_options: ExplainOptions,
462    table_name: ObjectName,
463    column_defs: Vec<ColumnDef>,
464    wildcard_idx: Option<usize>,
465    constraints: Vec<TableConstraint>,
466    format_encode: FormatEncodeOptions,
467    source_watermarks: Vec<SourceWatermark>,
468    mut col_id_gen: ColumnIdGenerator,
469    include_column_options: IncludeOption,
470    props: CreateTableProps,
471    sql_column_strategy: SqlColumnStrategy,
472) -> Result<(PlanRef, Option<SourceCatalog>, TableCatalog)> {
473    if props.append_only
474        && format_encode.format != Format::Plain
475        && format_encode.format != Format::Native
476    {
477        return Err(ErrorCode::BindError(format!(
478            "Append only table does not support format {}.",
479            format_encode.format
480        ))
481        .into());
482    }
483
484    let session = &handler_args.session;
485    let (with_properties, refresh_mode) =
486        bind_connector_props(&handler_args, &format_encode, false)?;
487    if with_properties.is_shareable_cdc_connector() {
488        generated_columns_check_for_cdc_table(&column_defs)?;
489        not_null_check_for_cdc_table(&wildcard_idx, &column_defs)?;
490    } else if column_defs.iter().any(|col| {
491        col.options
492            .iter()
493            .any(|def| matches!(def.option, ColumnOption::NotNull))
494    }) {
495        // if non-cdc source
496        notice_to_user(
497            "The table contains columns with NOT NULL constraints. Any rows from upstream violating the constraints will be ignored silently.",
498        );
499    }
500
501    let db_name: &str = &session.database();
502    let (schema_name, _) = Binder::resolve_schema_qualified_name(db_name, &table_name)?;
503
504    // TODO: omit this step if `sql_column_strategy` is `Follow`.
505    let (columns_from_resolve_source, source_info) = bind_columns_from_source(
506        session,
507        &format_encode,
508        Either::Left(&with_properties),
509        CreateSourceType::Table,
510    )
511    .await?;
512
513    let overwrite_options = OverwriteOptions::new(&mut handler_args);
514    let rate_limit = overwrite_options.source_rate_limit;
515    let source = bind_create_source_or_table_with_connector(
516        handler_args.clone(),
517        table_name,
518        format_encode,
519        with_properties,
520        &column_defs,
521        constraints,
522        wildcard_idx,
523        source_watermarks,
524        columns_from_resolve_source,
525        source_info,
526        include_column_options,
527        &mut col_id_gen,
528        CreateSourceType::Table,
529        rate_limit,
530        sql_column_strategy,
531        refresh_mode,
532    )
533    .await?;
534
535    let context = OptimizerContext::new(handler_args, explain_options);
536
537    let (plan, table) = gen_table_plan_with_source(
538        context.into(),
539        schema_name,
540        source.clone(),
541        col_id_gen.into_version(),
542        props,
543    )?;
544
545    Ok((plan, Some(source), table))
546}
547
548/// `gen_create_table_plan` generates the plan for creating a table without an external stream
549/// source.
550#[allow(clippy::too_many_arguments)]
551pub(crate) fn gen_create_table_plan(
552    context: OptimizerContext,
553    table_name: ObjectName,
554    column_defs: Vec<ColumnDef>,
555    constraints: Vec<TableConstraint>,
556    mut col_id_gen: ColumnIdGenerator,
557    source_watermarks: Vec<SourceWatermark>,
558    props: CreateTableProps,
559    is_for_replace_plan: bool,
560) -> Result<(PlanRef, TableCatalog)> {
561    let mut columns = bind_sql_columns(&column_defs, is_for_replace_plan)?;
562    for c in &mut columns {
563        col_id_gen.generate(c)?;
564    }
565
566    let (_, secret_refs, connection_refs) = context.with_options().clone().into_parts();
567    if !secret_refs.is_empty() || !connection_refs.is_empty() {
568        return Err(crate::error::ErrorCode::InvalidParameterValue("Secret reference and Connection reference are not allowed in options when creating table without external source".to_owned()).into());
569    }
570
571    gen_create_table_plan_without_source(
572        context,
573        table_name,
574        columns,
575        column_defs,
576        constraints,
577        source_watermarks,
578        col_id_gen.into_version(),
579        props,
580    )
581}
582
583#[allow(clippy::too_many_arguments)]
584pub(crate) fn gen_create_table_plan_without_source(
585    context: OptimizerContext,
586    table_name: ObjectName,
587    columns: Vec<ColumnCatalog>,
588    column_defs: Vec<ColumnDef>,
589    constraints: Vec<TableConstraint>,
590    source_watermarks: Vec<SourceWatermark>,
591    version: TableVersion,
592    props: CreateTableProps,
593) -> Result<(PlanRef, TableCatalog)> {
594    // XXX: Why not bind outside?
595    let pk_names = bind_sql_pk_names(&column_defs, bind_table_constraints(&constraints)?)?;
596    let (mut columns, pk_column_ids, row_id_index) =
597        bind_pk_and_row_id_on_relation(columns, pk_names, true)?;
598
599    let watermark_descs = bind_source_watermark(
600        context.session_ctx(),
601        table_name.real_value(),
602        source_watermarks,
603        &columns,
604    )?;
605
606    bind_sql_column_constraints(
607        context.session_ctx(),
608        table_name.real_value(),
609        &mut columns,
610        &column_defs,
611        &pk_column_ids,
612    )?;
613    let session = context.session_ctx().clone();
614
615    let db_name = &session.database();
616    let (schema_name, table_name) = Binder::resolve_schema_qualified_name(db_name, &table_name)?;
617
618    let info = CreateTableInfo {
619        columns,
620        pk_column_ids,
621        row_id_index,
622        watermark_descs,
623        source_catalog: None,
624        version,
625    };
626
627    gen_table_plan_inner(context.into(), schema_name, table_name, info, props)
628}
629
630fn gen_table_plan_with_source(
631    context: OptimizerContextRef,
632    schema_name: Option<String>,
633    source_catalog: SourceCatalog,
634    version: TableVersion,
635    props: CreateTableProps,
636) -> Result<(PlanRef, TableCatalog)> {
637    let table_name = source_catalog.name.clone();
638
639    let info = CreateTableInfo {
640        columns: source_catalog.columns.clone(),
641        pk_column_ids: source_catalog.pk_col_ids.clone(),
642        row_id_index: source_catalog.row_id_index,
643        watermark_descs: source_catalog.watermark_descs.clone(),
644        source_catalog: Some(source_catalog),
645        version,
646    };
647
648    gen_table_plan_inner(context, schema_name, table_name, info, props)
649}
650
651/// On-conflict behavior either from user input or existing table catalog.
652#[derive(Clone, Copy)]
653pub enum EitherOnConflict {
654    Ast(Option<OnConflict>),
655    Resolved(ConflictBehavior),
656}
657
658impl From<Option<OnConflict>> for EitherOnConflict {
659    fn from(v: Option<OnConflict>) -> Self {
660        Self::Ast(v)
661    }
662}
663
664impl From<ConflictBehavior> for EitherOnConflict {
665    fn from(v: ConflictBehavior) -> Self {
666        Self::Resolved(v)
667    }
668}
669
670impl EitherOnConflict {
671    /// Resolves the conflict behavior based on the given information.
672    pub fn to_behavior(self, append_only: bool, row_id_as_pk: bool) -> Result<ConflictBehavior> {
673        let conflict_behavior = match self {
674            EitherOnConflict::Ast(on_conflict) => {
675                if append_only {
676                    if row_id_as_pk {
677                        // Primary key will be generated, no conflict check needed.
678                        ConflictBehavior::NoCheck
679                    } else {
680                        // User defined PK on append-only table, enforce `DO NOTHING`.
681                        if let Some(on_conflict) = on_conflict
682                            && on_conflict != OnConflict::Nothing
683                        {
684                            return Err(ErrorCode::InvalidInputSyntax(
685                                "When PRIMARY KEY constraint applied to an APPEND ONLY table, \
686                                     the ON CONFLICT behavior must be DO NOTHING."
687                                    .to_owned(),
688                            )
689                            .into());
690                        }
691                        ConflictBehavior::IgnoreConflict
692                    }
693                } else {
694                    // Default to `UPDATE FULL` for non-append-only tables.
695                    match on_conflict.unwrap_or(OnConflict::UpdateFull) {
696                        OnConflict::UpdateFull => ConflictBehavior::Overwrite,
697                        OnConflict::Nothing => ConflictBehavior::IgnoreConflict,
698                        OnConflict::UpdateIfNotNull => ConflictBehavior::DoUpdateIfNotNull,
699                    }
700                }
701            }
702            EitherOnConflict::Resolved(b) => b,
703        };
704
705        Ok(conflict_behavior)
706    }
707}
708
709/// Arguments of the functions that generate a table plan, part 1.
710///
711/// Compared to [`CreateTableProps`], this struct contains fields that need some work of binding
712/// or resolving based on the user input.
713pub struct CreateTableInfo {
714    pub columns: Vec<ColumnCatalog>,
715    pub pk_column_ids: Vec<ColumnId>,
716    pub row_id_index: Option<usize>,
717    pub watermark_descs: Vec<WatermarkDesc>,
718    pub source_catalog: Option<SourceCatalog>,
719    pub version: TableVersion,
720}
721
722/// Arguments of the functions that generate a table plan, part 2.
723///
724/// Compared to [`CreateTableInfo`], this struct contains fields that can be (relatively) simply
725/// obtained from the input or the context.
726pub struct CreateTableProps {
727    pub definition: String,
728    pub append_only: bool,
729    pub on_conflict: EitherOnConflict,
730    pub with_version_columns: Vec<String>,
731    pub webhook_info: Option<PbWebhookSourceInfo>,
732    pub engine: Engine,
733}
734
735#[allow(clippy::too_many_arguments)]
736fn gen_table_plan_inner(
737    context: OptimizerContextRef,
738    schema_name: Option<String>,
739    table_name: String,
740    info: CreateTableInfo,
741    props: CreateTableProps,
742) -> Result<(PlanRef, TableCatalog)> {
743    let CreateTableInfo {
744        ref columns,
745        row_id_index,
746        ref watermark_descs,
747        ref source_catalog,
748        ..
749    } = info;
750    let CreateTableProps { append_only, .. } = props;
751
752    let (database_id, schema_id) = context
753        .session_ctx()
754        .get_database_and_schema_id_for_create(schema_name)?;
755
756    let session = context.session_ctx().clone();
757    let retention_seconds = context.with_options().retention_seconds();
758
759    let source_node: LogicalPlanRef = LogicalSource::new(
760        source_catalog.clone().map(Rc::new),
761        columns.clone(),
762        row_id_index,
763        SourceNodeKind::CreateTable,
764        context.clone(),
765        None,
766    )?
767    .into();
768
769    let required_cols = FixedBitSet::with_capacity(columns.len());
770    let plan_root = PlanRoot::new_with_logical_plan(
771        source_node,
772        RequiredDist::Any,
773        Order::any(),
774        required_cols,
775        vec![],
776    );
777
778    let has_non_ttl_watermark = watermark_descs.iter().any(|d| !d.with_ttl);
779
780    if !append_only && has_non_ttl_watermark {
781        return Err(ErrorCode::NotSupported(
782            "Defining watermarks on table requires the table to be append only.".to_owned(),
783            "Use the key words `APPEND ONLY`".to_owned(),
784        )
785        .into());
786    }
787
788    if !append_only && retention_seconds.is_some() {
789        if session
790            .config()
791            .unsafe_enable_storage_retention_for_non_append_only_tables()
792        {
793            tracing::warn!(
794                "Storage retention is enabled for non-append-only table {}. This may lead to stream inconsistency.",
795                table_name
796            );
797            const NOTICE: &str = "Storage retention is enabled for non-append-only table. \
798                                  This may lead to stream inconsistency and unrecoverable \
799                                  node failure if there is any row INSERT/UPDATE/DELETE operation \
800                                  corresponding to the TTLed primary keys";
801            session.notice_to_user(NOTICE);
802        } else {
803            return Err(ErrorCode::NotSupported(
804                "Defining retention seconds on table requires the table to be append only."
805                    .to_owned(),
806                "Use the key words `APPEND ONLY`".to_owned(),
807            )
808            .into());
809        }
810    }
811
812    let materialize =
813        plan_root.gen_table_plan(context, table_name, database_id, schema_id, info, props)?;
814
815    let mut table = materialize.table().clone();
816    table.owner = session.user_id();
817
818    Ok((materialize.into(), table))
819}
820
821/// Generate stream plan for cdc table based on shared source.
822/// In replace workflow, the `table_id` is the id of the table to be replaced
823/// in create table workflow, the `table_id` is a placeholder will be filled in the Meta
824#[allow(clippy::too_many_arguments)]
825pub(crate) fn gen_create_table_plan_for_cdc_table(
826    context: OptimizerContextRef,
827    source: Arc<SourceCatalog>,
828    external_table_name: String,
829    column_defs: Vec<ColumnDef>,
830    mut columns: Vec<ColumnCatalog>,
831    pk_names: Vec<String>,
832    cdc_with_options: WithOptionsSecResolved,
833    mut col_id_gen: ColumnIdGenerator,
834    on_conflict: Option<OnConflict>,
835    with_version_columns: Vec<String>,
836    include_column_options: IncludeOption,
837    table_name: ObjectName,
838    resolved_table_name: String, // table name without schema prefix
839    database_id: DatabaseId,
840    schema_id: SchemaId,
841    table_id: TableId,
842    engine: Engine,
843) -> Result<(PlanRef, TableCatalog)> {
844    let session = context.session_ctx().clone();
845
846    // append additional columns to the end
847    handle_addition_columns(
848        None,
849        &cdc_with_options,
850        include_column_options,
851        &mut columns,
852        true,
853    )?;
854
855    for c in &mut columns {
856        col_id_gen.generate(c)?;
857    }
858
859    let (mut columns, pk_column_ids, _row_id_index) =
860        bind_pk_and_row_id_on_relation(columns, pk_names, true)?;
861
862    // NOTES: In auto schema change, default value is not provided in column definition.
863    bind_sql_column_constraints(
864        context.session_ctx(),
865        table_name.real_value(),
866        &mut columns,
867        &column_defs,
868        &pk_column_ids,
869    )?;
870
871    let definition = context.normalized_sql().to_owned();
872
873    let pk_column_indices = {
874        let mut id_to_idx = HashMap::new();
875        columns.iter().enumerate().for_each(|(idx, c)| {
876            id_to_idx.insert(c.column_id(), idx);
877        });
878        // pk column id must exist in table columns.
879        pk_column_ids
880            .iter()
881            .map(|c| id_to_idx.get(c).copied().unwrap())
882            .collect_vec()
883    };
884    let table_pk = pk_column_indices
885        .iter()
886        .map(|idx| ColumnOrder::new(*idx, OrderType::ascending()))
887        .collect();
888
889    let (options, secret_refs) = cdc_with_options.into_parts();
890
891    let non_generated_column_descs = columns
892        .iter()
893        .filter(|&c| !c.is_generated())
894        .map(|c| c.column_desc.clone())
895        .collect_vec();
896    let non_generated_column_num = non_generated_column_descs.len();
897    let cdc_table_type = ExternalCdcTableType::from_properties(&options);
898    let cdc_table_desc = CdcTableDesc {
899        table_id,
900        source_id: source.id, // id of cdc source streaming job
901        external_table_name: external_table_name.clone(),
902        pk: table_pk,
903        columns: non_generated_column_descs,
904        stream_key: pk_column_indices,
905        connect_properties: options,
906        secret_refs,
907    };
908
909    tracing::debug!(?cdc_table_desc, "create cdc table");
910    let options = build_cdc_scan_options_with_options(context.with_options(), &cdc_table_type)?;
911
912    let logical_scan = LogicalCdcScan::create(
913        external_table_name.clone(),
914        Rc::new(cdc_table_desc),
915        context.clone(),
916        options,
917    );
918
919    let scan_node: LogicalPlanRef = logical_scan.into();
920    let required_cols = FixedBitSet::with_capacity(non_generated_column_num);
921    let plan_root = PlanRoot::new_with_logical_plan(
922        scan_node,
923        RequiredDist::Any,
924        Order::any(),
925        required_cols,
926        vec![],
927    );
928
929    let cdc_table_id = build_cdc_table_id(source.id, &external_table_name);
930    let materialize = plan_root.gen_table_plan(
931        context,
932        resolved_table_name,
933        database_id,
934        schema_id,
935        CreateTableInfo {
936            columns,
937            pk_column_ids,
938            row_id_index: None,
939            watermark_descs: vec![],
940            source_catalog: Some((*source).clone()),
941            version: col_id_gen.into_version(),
942        },
943        CreateTableProps {
944            definition,
945            append_only: false,
946            on_conflict: on_conflict.into(),
947            with_version_columns,
948            webhook_info: None,
949            engine,
950        },
951    )?;
952
953    let mut table = materialize.table().clone();
954    table.owner = session.user_id();
955    table.cdc_table_id = Some(cdc_table_id);
956    table.cdc_table_type = Some(cdc_table_type);
957    Ok((materialize.into(), table))
958}
959
960fn derive_with_options_for_cdc_table(
961    source_with_properties: &WithOptionsSecResolved,
962    external_table_name: String,
963) -> Result<WithOptionsSecResolved> {
964    use source::cdc::{MYSQL_CDC_CONNECTOR, POSTGRES_CDC_CONNECTOR, SQL_SERVER_CDC_CONNECTOR};
965    // we should remove the prefix from `full_table_name`
966    let source_database_name: &str = source_with_properties
967        .get("database.name")
968        .ok_or_else(|| anyhow!("The source with properties does not contain 'database.name'"))?
969        .as_str();
970    let mut with_options = source_with_properties.clone();
971    if let Some(connector) = source_with_properties.get(UPSTREAM_SOURCE_KEY) {
972        match connector.as_str() {
973            MYSQL_CDC_CONNECTOR => {
974                // MySQL doesn't allow '.' in database name and table name, so we can split the
975                // external table name by '.' to get the table name
976                let (db_name, table_name) = external_table_name.split_once('.').ok_or_else(|| {
977                    anyhow!("The upstream table name must contain database name prefix, e.g. 'database.table'")
978                })?;
979                // We allow multiple database names in the source definition
980                if !source_database_name
981                    .split(',')
982                    .map(|s| s.trim())
983                    .any(|name| name == db_name)
984                {
985                    return Err(anyhow!(
986                        "The database name `{}` in the FROM clause is not included in the database name `{}` in source definition",
987                        db_name,
988                        source_database_name
989                    ).into());
990                }
991                with_options.insert(DATABASE_NAME_KEY.into(), db_name.into());
992                with_options.insert(TABLE_NAME_KEY.into(), table_name.into());
993            }
994            POSTGRES_CDC_CONNECTOR => {
995                let (schema_name, table_name) = external_table_name
996                    .split_once('.')
997                    .ok_or_else(|| anyhow!("The upstream table name must contain schema name prefix, e.g. 'public.table'"))?;
998
999                // insert 'schema.name' into connect properties
1000                with_options.insert(SCHEMA_NAME_KEY.into(), schema_name.into());
1001                with_options.insert(TABLE_NAME_KEY.into(), table_name.into());
1002            }
1003            SQL_SERVER_CDC_CONNECTOR => {
1004                // SQL Server external table name can be in different formats:
1005                // 1. 'databaseName.schemaName.tableName' (full format)
1006                // 2. 'schemaName.tableName' (schema and table only)
1007                // 3. 'tableName' (table only, will use default schema 'dbo')
1008                // We will auto-fill missing parts from source configuration
1009                let parts: Vec<&str> = external_table_name.split('.').collect();
1010                let (_, schema_name, table_name) = match parts.len() {
1011                    3 => {
1012                        // Full format: database.schema.table
1013                        let db_name = parts[0];
1014                        let schema_name = parts[1];
1015                        let table_name = parts[2];
1016
1017                        // Verify database name matches source configuration
1018                        if db_name != source_database_name {
1019                            return Err(anyhow!(
1020                                "The database name `{}` in the FROM clause is not the same as the database name `{}` in source definition",
1021                                db_name,
1022                                source_database_name
1023                            ).into());
1024                        }
1025                        (db_name, schema_name, table_name)
1026                    }
1027                    2 => {
1028                        // Schema and table only: schema.table
1029                        let schema_name = parts[0];
1030                        let table_name = parts[1];
1031                        (source_database_name, schema_name, table_name)
1032                    }
1033                    1 => {
1034                        // Table only: table (use default schema 'dbo')
1035                        let table_name = parts[0];
1036                        (source_database_name, "dbo", table_name)
1037                    }
1038                    _ => {
1039                        return Err(anyhow!(
1040                            "The upstream table name must be in one of these formats: 'database.schema.table', 'schema.table', or 'table'"
1041                        ).into());
1042                    }
1043                };
1044
1045                // insert 'schema.name' into connect properties
1046                with_options.insert(SCHEMA_NAME_KEY.into(), schema_name.into());
1047                with_options.insert(TABLE_NAME_KEY.into(), table_name.into());
1048            }
1049            _ => {
1050                return Err(RwError::from(anyhow!(
1051                    "connector {} is not supported for cdc table",
1052                    connector
1053                )));
1054            }
1055        };
1056    }
1057    Ok(with_options)
1058}
1059
1060#[allow(clippy::too_many_arguments)]
1061pub(super) async fn handle_create_table_plan(
1062    handler_args: HandlerArgs,
1063    explain_options: ExplainOptions,
1064    format_encode: Option<FormatEncodeOptions>,
1065    cdc_table_info: Option<CdcTableInfo>,
1066    table_name: &ObjectName,
1067    column_defs: Vec<ColumnDef>,
1068    wildcard_idx: Option<usize>,
1069    constraints: Vec<TableConstraint>,
1070    source_watermarks: Vec<SourceWatermark>,
1071    append_only: bool,
1072    on_conflict: Option<OnConflict>,
1073    with_version_columns: Vec<String>,
1074    include_column_options: IncludeOption,
1075    webhook_info: Option<WebhookSourceInfo>,
1076    engine: Engine,
1077) -> Result<(
1078    PlanRef,
1079    Option<SourceCatalog>,
1080    TableCatalog,
1081    TableJobType,
1082    Option<SourceId>,
1083)> {
1084    let col_id_gen = ColumnIdGenerator::new_initial();
1085    let format_encode = check_create_table_with_source(
1086        &handler_args.with_options,
1087        format_encode,
1088        &include_column_options,
1089        &cdc_table_info,
1090    )?;
1091    let webhook_info = webhook_info
1092        .map(|info| bind_webhook_info(&handler_args.session, &column_defs, info))
1093        .transpose()?;
1094
1095    let props = CreateTableProps {
1096        definition: handler_args.normalized_sql.clone(),
1097        append_only,
1098        on_conflict: on_conflict.into(),
1099        with_version_columns: with_version_columns.clone(),
1100        webhook_info,
1101        engine,
1102    };
1103
1104    let ((plan, source, table), job_type, shared_shource_id) = match (
1105        format_encode,
1106        cdc_table_info.as_ref(),
1107    ) {
1108        (Some(format_encode), None) => (
1109            gen_create_table_plan_with_source(
1110                handler_args,
1111                explain_options,
1112                table_name.clone(),
1113                column_defs,
1114                wildcard_idx,
1115                constraints,
1116                format_encode,
1117                source_watermarks,
1118                col_id_gen,
1119                include_column_options,
1120                props,
1121                SqlColumnStrategy::FollowChecked,
1122            )
1123            .await?,
1124            TableJobType::General,
1125            None,
1126        ),
1127        (None, None) => {
1128            let context = OptimizerContext::new(handler_args, explain_options);
1129            let (plan, table) = gen_create_table_plan(
1130                context,
1131                table_name.clone(),
1132                column_defs,
1133                constraints,
1134                col_id_gen,
1135                source_watermarks,
1136                props,
1137                false,
1138            )?;
1139
1140            ((plan, None, table), TableJobType::General, None)
1141        }
1142
1143        (None, Some(cdc_table)) => {
1144            sanity_check_for_table_on_cdc_source(
1145                append_only,
1146                &column_defs,
1147                &wildcard_idx,
1148                &constraints,
1149                &source_watermarks,
1150            )?;
1151
1152            generated_columns_check_for_cdc_table(&column_defs)?;
1153            not_null_check_for_cdc_table(&wildcard_idx, &column_defs)?;
1154
1155            let session = &handler_args.session;
1156            let db_name = &session.database();
1157            let user_name = &session.user_name();
1158            let search_path = session.config().search_path();
1159            let (schema_name, resolved_table_name) =
1160                Binder::resolve_schema_qualified_name(db_name, table_name)?;
1161            let (database_id, schema_id) =
1162                session.get_database_and_schema_id_for_create(schema_name.clone())?;
1163
1164            // cdc table cannot be append-only
1165            let (source_schema, source_name) =
1166                Binder::resolve_schema_qualified_name(db_name, &cdc_table.source_name)?;
1167
1168            let source = {
1169                let catalog_reader = session.env().catalog_reader().read_guard();
1170                let schema_path =
1171                    SchemaPath::new(source_schema.as_deref(), &search_path, user_name);
1172
1173                let (source, _) = catalog_reader.get_source_by_name(
1174                    db_name,
1175                    schema_path,
1176                    source_name.as_str(),
1177                )?;
1178                source.clone()
1179            };
1180            let cdc_with_options: WithOptionsSecResolved = derive_with_options_for_cdc_table(
1181                &source.with_properties,
1182                cdc_table.external_table_name.clone(),
1183            )?;
1184
1185            let (columns, pk_names) = match wildcard_idx {
1186                Some(_) => bind_cdc_table_schema_externally(cdc_with_options.clone()).await?,
1187                None => {
1188                    for column_def in &column_defs {
1189                        for option_def in &column_def.options {
1190                            if let ColumnOption::DefaultValue(_)
1191                            | ColumnOption::DefaultValueInternal { .. } = option_def.option
1192                            {
1193                                return Err(ErrorCode::NotSupported(
1194                                            "Default value for columns defined on the table created from a CDC source".into(),
1195                                            "Remove the default value expression in the column definitions".into(),
1196                                        )
1197                                            .into());
1198                            }
1199                        }
1200                    }
1201
1202                    let (columns, pk_names) =
1203                        bind_cdc_table_schema(&column_defs, &constraints, false)?;
1204                    // read default value definition from external db
1205                    let (options, secret_refs) = cdc_with_options.clone().into_parts();
1206                    let _config = ExternalTableConfig::try_from_btreemap(options, secret_refs)
1207                        .context("failed to extract external table config")?;
1208
1209                    (columns, pk_names)
1210                }
1211            };
1212
1213            let context: OptimizerContextRef =
1214                OptimizerContext::new(handler_args, explain_options).into();
1215            let shared_source_id = source.id;
1216            let (plan, table) = gen_create_table_plan_for_cdc_table(
1217                context,
1218                source,
1219                cdc_table.external_table_name.clone(),
1220                column_defs,
1221                columns,
1222                pk_names,
1223                cdc_with_options,
1224                col_id_gen,
1225                on_conflict,
1226                with_version_columns,
1227                include_column_options,
1228                table_name.clone(),
1229                resolved_table_name,
1230                database_id,
1231                schema_id,
1232                TableId::placeholder(),
1233                engine,
1234            )?;
1235
1236            (
1237                (plan, None, table),
1238                TableJobType::SharedCdcSource,
1239                Some(shared_source_id),
1240            )
1241        }
1242        (Some(_), Some(_)) => {
1243            return Err(ErrorCode::NotSupported(
1244                "Data format and encoding format doesn't apply to table created from a CDC source"
1245                    .into(),
1246                "Remove the FORMAT and ENCODE specification".into(),
1247            )
1248            .into());
1249        }
1250    };
1251    Ok((plan, source, table, job_type, shared_shource_id))
1252}
1253
1254// For both table from cdc source and table with cdc connector
1255fn generated_columns_check_for_cdc_table(columns: &Vec<ColumnDef>) -> Result<()> {
1256    let mut found_generated_column = false;
1257    for column in columns {
1258        let mut is_generated = false;
1259
1260        for option_def in &column.options {
1261            if let ColumnOption::GeneratedColumns(_) = option_def.option {
1262                is_generated = true;
1263                break;
1264            }
1265        }
1266
1267        if is_generated {
1268            found_generated_column = true;
1269        } else if found_generated_column {
1270            return Err(ErrorCode::NotSupported(
1271                "Non-generated column found after a generated column.".into(),
1272                "Ensure that all generated columns appear at the end of the cdc table definition."
1273                    .into(),
1274            )
1275            .into());
1276        }
1277    }
1278    Ok(())
1279}
1280
1281// For both table from cdc source and table with cdc connector
1282fn not_null_check_for_cdc_table(
1283    wildcard_idx: &Option<usize>,
1284    column_defs: &Vec<ColumnDef>,
1285) -> Result<()> {
1286    if !wildcard_idx.is_some()
1287        && column_defs.iter().any(|col| {
1288            col.options
1289                .iter()
1290                .any(|opt| matches!(opt.option, ColumnOption::NotNull))
1291        })
1292    {
1293        return Err(ErrorCode::NotSupported(
1294            "CDC table with NOT NULL constraint is not supported".to_owned(),
1295            "Please remove the NOT NULL constraint for columns".to_owned(),
1296        )
1297        .into());
1298    }
1299    Ok(())
1300}
1301
1302// Only for table from cdc source
1303fn sanity_check_for_table_on_cdc_source(
1304    append_only: bool,
1305    column_defs: &Vec<ColumnDef>,
1306    wildcard_idx: &Option<usize>,
1307    constraints: &Vec<TableConstraint>,
1308    source_watermarks: &Vec<SourceWatermark>,
1309) -> Result<()> {
1310    // wildcard cannot be used with column definitions
1311    if wildcard_idx.is_some() && !column_defs.is_empty() {
1312        return Err(ErrorCode::NotSupported(
1313            "wildcard(*) and column definitions cannot be used together".to_owned(),
1314            "Remove the wildcard or column definitions".to_owned(),
1315        )
1316        .into());
1317    }
1318
1319    // cdc table must have primary key constraint or primary key column
1320    if !wildcard_idx.is_some()
1321        && !constraints.iter().any(|c| {
1322            matches!(
1323                c,
1324                TableConstraint::Unique {
1325                    is_primary: true,
1326                    ..
1327                }
1328            )
1329        })
1330        && !column_defs.iter().any(|col| {
1331            col.options
1332                .iter()
1333                .any(|opt| matches!(opt.option, ColumnOption::Unique { is_primary: true }))
1334        })
1335    {
1336        return Err(ErrorCode::NotSupported(
1337            "CDC table without primary key constraint is not supported".to_owned(),
1338            "Please define a primary key".to_owned(),
1339        )
1340        .into());
1341    }
1342
1343    if append_only {
1344        return Err(ErrorCode::NotSupported(
1345            "append only modifier on the table created from a CDC source".into(),
1346            "Remove the APPEND ONLY clause".into(),
1347        )
1348        .into());
1349    }
1350
1351    if !source_watermarks.is_empty() {
1352        return Err(ErrorCode::NotSupported(
1353            "watermark defined on the table created from a CDC source".into(),
1354            "Remove the Watermark definitions".into(),
1355        )
1356        .into());
1357    }
1358
1359    Ok(())
1360}
1361
1362/// Derive schema for cdc table when create a new Table or alter an existing Table
1363async fn bind_cdc_table_schema_externally(
1364    cdc_with_options: WithOptionsSecResolved,
1365) -> Result<(Vec<ColumnCatalog>, Vec<String>)> {
1366    // read cdc table schema from external db or parsing the schema from SQL definitions
1367    let (options, secret_refs) = cdc_with_options.into_parts();
1368    let config = ExternalTableConfig::try_from_btreemap(options, secret_refs)
1369        .context("failed to extract external table config")?;
1370
1371    let table = ExternalTableImpl::connect(config)
1372        .await
1373        .context("failed to auto derive table schema")?;
1374
1375    Ok((
1376        table
1377            .column_descs()
1378            .iter()
1379            .cloned()
1380            .map(|column_desc| ColumnCatalog {
1381                column_desc,
1382                is_hidden: false,
1383            })
1384            .collect(),
1385        table.pk_names().clone(),
1386    ))
1387}
1388
1389/// Derive schema for cdc table when create a new Table or alter an existing Table
1390fn bind_cdc_table_schema(
1391    column_defs: &Vec<ColumnDef>,
1392    constraints: &Vec<TableConstraint>,
1393    is_for_replace_plan: bool,
1394) -> Result<(Vec<ColumnCatalog>, Vec<String>)> {
1395    let columns = bind_sql_columns(column_defs, is_for_replace_plan)?;
1396
1397    let pk_names = bind_sql_pk_names(column_defs, bind_table_constraints(constraints)?)?;
1398    Ok((columns, pk_names))
1399}
1400
1401#[allow(clippy::too_many_arguments)]
1402pub async fn handle_create_table(
1403    handler_args: HandlerArgs,
1404    table_name: ObjectName,
1405    column_defs: Vec<ColumnDef>,
1406    wildcard_idx: Option<usize>,
1407    constraints: Vec<TableConstraint>,
1408    if_not_exists: bool,
1409    format_encode: Option<FormatEncodeOptions>,
1410    source_watermarks: Vec<SourceWatermark>,
1411    append_only: bool,
1412    on_conflict: Option<OnConflict>,
1413    with_version_columns: Vec<String>,
1414    cdc_table_info: Option<CdcTableInfo>,
1415    include_column_options: IncludeOption,
1416    webhook_info: Option<WebhookSourceInfo>,
1417    ast_engine: risingwave_sqlparser::ast::Engine,
1418) -> Result<RwPgResponse> {
1419    let session = handler_args.session.clone();
1420
1421    if append_only {
1422        session.notice_to_user("APPEND ONLY TABLE is currently an experimental feature.");
1423    }
1424
1425    session.check_cluster_limits().await?;
1426
1427    let engine = match ast_engine {
1428        risingwave_sqlparser::ast::Engine::Hummock => Engine::Hummock,
1429        risingwave_sqlparser::ast::Engine::Iceberg => Engine::Iceberg,
1430    };
1431
1432    if let Either::Right(resp) = session.check_relation_name_duplicated(
1433        table_name.clone(),
1434        StatementType::CREATE_TABLE,
1435        if_not_exists,
1436    )? {
1437        return Ok(resp);
1438    }
1439
1440    let (graph, source, hummock_table, job_type, shared_source_id) = {
1441        let (plan, source, table, job_type, shared_source_id) = handle_create_table_plan(
1442            handler_args.clone(),
1443            ExplainOptions::default(),
1444            format_encode,
1445            cdc_table_info,
1446            &table_name,
1447            column_defs.clone(),
1448            wildcard_idx,
1449            constraints.clone(),
1450            source_watermarks,
1451            append_only,
1452            on_conflict,
1453            with_version_columns,
1454            include_column_options,
1455            webhook_info,
1456            engine,
1457        )
1458        .await?;
1459        tracing::trace!("table_plan: {:?}", plan.explain_to_string());
1460
1461        let graph = build_graph(plan, Some(GraphJobType::Table))?;
1462
1463        (graph, source, table, job_type, shared_source_id)
1464    };
1465
1466    tracing::trace!(
1467        "name={}, graph=\n{}",
1468        table_name,
1469        serde_json::to_string_pretty(&graph).unwrap()
1470    );
1471
1472    let dependencies = shared_source_id
1473        .map(|id| HashSet::from([id.as_object_id()]))
1474        .unwrap_or_default();
1475
1476    // Handle engine
1477    match engine {
1478        Engine::Hummock => {
1479            let catalog_writer = session.catalog_writer()?;
1480            let action = match job_type {
1481                TableJobType::SharedCdcSource => LongRunningNotificationAction::MonitorBackfillJob,
1482                _ => LongRunningNotificationAction::DiagnoseBarrierLatency,
1483            };
1484            execute_with_long_running_notification(
1485                catalog_writer.create_table(
1486                    source.map(|s| s.to_prost()),
1487                    hummock_table.to_prost(),
1488                    graph,
1489                    job_type,
1490                    if_not_exists,
1491                    dependencies,
1492                ),
1493                &session,
1494                "CREATE TABLE",
1495                action,
1496            )
1497            .await?;
1498        }
1499        Engine::Iceberg => {
1500            let hummock_table_name = hummock_table.name.clone();
1501            session.create_staging_table(hummock_table.clone());
1502            let res = create_iceberg_engine_table(
1503                session.clone(),
1504                handler_args,
1505                source.map(|s| s.to_prost()),
1506                hummock_table,
1507                graph,
1508                table_name,
1509                job_type,
1510                if_not_exists,
1511            )
1512            .await;
1513            session.drop_staging_table(&hummock_table_name);
1514            res?
1515        }
1516    }
1517
1518    Ok(PgResponse::empty_result(StatementType::CREATE_TABLE))
1519}
1520
1521/// Iceberg table engine is composed of hummock table, iceberg sink and iceberg source.
1522///
1523/// 1. fetch iceberg engine options from the meta node. Or use iceberg engine connection provided by users.
1524/// 2. create a hummock table
1525/// 3. create an iceberg sink
1526/// 4. create an iceberg source
1527///
1528/// See <https://github.com/risingwavelabs/risingwave/issues/21586> for an architecture diagram.
1529#[allow(clippy::too_many_arguments)]
1530pub async fn create_iceberg_engine_table(
1531    session: Arc<SessionImpl>,
1532    handler_args: HandlerArgs,
1533    mut source: Option<PbSource>,
1534    table: TableCatalog,
1535    graph: StreamFragmentGraph,
1536    table_name: ObjectName,
1537    job_type: PbTableJobType,
1538    if_not_exists: bool,
1539) -> Result<()> {
1540    let meta_client = session.env().meta_client();
1541    let meta_store_endpoint = meta_client.get_meta_store_endpoint().await?;
1542
1543    let meta_store_endpoint = url::Url::parse(&meta_store_endpoint).map_err(|_| {
1544        ErrorCode::InternalError("failed to parse the meta store endpoint".to_owned())
1545    })?;
1546    let meta_store_backend = meta_store_endpoint.scheme().to_owned();
1547    let meta_store_user = meta_store_endpoint.username().to_owned();
1548    let meta_store_password = match meta_store_endpoint.password() {
1549        Some(password) => percent_decode_str(password)
1550            .decode_utf8()
1551            .map_err(|_| {
1552                ErrorCode::InternalError(
1553                    "failed to parse password from meta store endpoint".to_owned(),
1554                )
1555            })?
1556            .into_owned(),
1557        None => "".to_owned(),
1558    };
1559    let meta_store_host = meta_store_endpoint
1560        .host_str()
1561        .ok_or_else(|| {
1562            ErrorCode::InternalError("failed to parse host from meta store endpoint".to_owned())
1563        })?
1564        .to_owned();
1565    let meta_store_port = meta_store_endpoint.port().ok_or_else(|| {
1566        ErrorCode::InternalError("failed to parse port from meta store endpoint".to_owned())
1567    })?;
1568    let meta_store_database = meta_store_endpoint
1569        .path()
1570        .trim_start_matches('/')
1571        .to_owned();
1572
1573    let Ok(meta_backend) = MetaBackend::from_str(&meta_store_backend, true) else {
1574        bail!("failed to parse meta backend: {}", meta_store_backend);
1575    };
1576
1577    let catalog_uri = match meta_backend {
1578        MetaBackend::Postgres => {
1579            format!(
1580                "jdbc:postgresql://{}:{}/{}",
1581                meta_store_host.clone(),
1582                meta_store_port.clone(),
1583                meta_store_database.clone()
1584            )
1585        }
1586        MetaBackend::Mysql => {
1587            format!(
1588                "jdbc:mysql://{}:{}/{}",
1589                meta_store_host.clone(),
1590                meta_store_port.clone(),
1591                meta_store_database.clone()
1592            )
1593        }
1594        MetaBackend::Sqlite | MetaBackend::Sql | MetaBackend::Mem => {
1595            bail!(
1596                "Unsupported meta backend for iceberg engine table: {}",
1597                meta_store_backend
1598            );
1599        }
1600    };
1601
1602    let rw_db_name = session
1603        .env()
1604        .catalog_reader()
1605        .read_guard()
1606        .get_database_by_id(table.database_id)?
1607        .name()
1608        .to_owned();
1609    let rw_schema_name = session
1610        .env()
1611        .catalog_reader()
1612        .read_guard()
1613        .get_schema_by_id(table.database_id, table.schema_id)?
1614        .name()
1615        .clone();
1616    let iceberg_catalog_name = rw_db_name.clone();
1617    let iceberg_database_name = rw_schema_name.clone();
1618    let iceberg_table_name = table_name.0.last().unwrap().real_value();
1619
1620    let iceberg_engine_connection: String = session.config().iceberg_engine_connection();
1621    let sink_decouple = session.config().sink_decouple();
1622    if matches!(sink_decouple, SinkDecouple::Disable) {
1623        bail!(
1624            "Iceberg engine table only supports with sink decouple, try `set sink_decouple = true` to resolve it"
1625        );
1626    }
1627
1628    let mut connection_ref = BTreeMap::new();
1629    let with_common = if iceberg_engine_connection.is_empty() {
1630        bail!("to use iceberg engine table, the variable `iceberg_engine_connection` must be set.");
1631    } else {
1632        let parts: Vec<&str> = iceberg_engine_connection.split('.').collect();
1633        assert_eq!(parts.len(), 2);
1634        let connection_catalog =
1635            session.get_connection_by_name(Some(parts[0].to_owned()), parts[1])?;
1636        if let ConnectionInfo::ConnectionParams(params) = &connection_catalog.info {
1637            if params.connection_type == ConnectionType::Iceberg as i32 {
1638                // With iceberg engine connection:
1639                connection_ref.insert(
1640                    "connection".to_owned(),
1641                    ConnectionRefValue {
1642                        connection_name: ObjectName::from(vec![
1643                            Ident::from(parts[0]),
1644                            Ident::from(parts[1]),
1645                        ]),
1646                    },
1647                );
1648
1649                let mut with_common = BTreeMap::new();
1650                with_common.insert("connector".to_owned(), "iceberg".to_owned());
1651                with_common.insert("database.name".to_owned(), iceberg_database_name);
1652                with_common.insert("table.name".to_owned(), iceberg_table_name);
1653
1654                if let Some(s) = params.properties.get("hosted_catalog")
1655                    && s.eq_ignore_ascii_case("true")
1656                {
1657                    with_common.insert("catalog.type".to_owned(), "jdbc".to_owned());
1658                    with_common.insert("catalog.uri".to_owned(), catalog_uri);
1659                    with_common.insert("catalog.jdbc.user".to_owned(), meta_store_user);
1660                    with_common.insert(
1661                        "catalog.jdbc.password".to_owned(),
1662                        meta_store_password.clone(),
1663                    );
1664                    with_common.insert("catalog.name".to_owned(), iceberg_catalog_name);
1665                }
1666
1667                with_common
1668            } else {
1669                return Err(RwError::from(ErrorCode::InvalidParameterValue(
1670                    "Only iceberg connection could be used in iceberg engine".to_owned(),
1671                )));
1672            }
1673        } else {
1674            return Err(RwError::from(ErrorCode::InvalidParameterValue(
1675                "Private Link Service has been deprecated. Please create a new connection instead."
1676                    .to_owned(),
1677            )));
1678        }
1679    };
1680
1681    // Iceberg sinks require a primary key, if none is provided, we will use the _row_id column
1682    // Fetch primary key from columns
1683    let mut pks = table
1684        .pk_column_names()
1685        .iter()
1686        .map(|c| c.to_string())
1687        .collect::<Vec<String>>();
1688
1689    // For the table without primary key. We will use `_row_id` as primary key
1690    let sink_from = if pks.len() == 1 && pks[0].eq(ROW_ID_COLUMN_NAME) {
1691        pks = vec![RISINGWAVE_ICEBERG_ROW_ID.to_owned()];
1692        let [stmt]: [_; 1] = Parser::parse_sql(&format!(
1693            "select {} as {}, * from {}",
1694            ROW_ID_COLUMN_NAME, RISINGWAVE_ICEBERG_ROW_ID, table_name
1695        ))
1696        .context("unable to parse query")?
1697        .try_into()
1698        .unwrap();
1699
1700        let Statement::Query(query) = &stmt else {
1701            panic!("unexpected statement: {:?}", stmt);
1702        };
1703        CreateSink::AsQuery(query.clone())
1704    } else {
1705        CreateSink::From(table_name.clone())
1706    };
1707
1708    let mut sink_name = table_name.clone();
1709    *sink_name.0.last_mut().unwrap() = Ident::from(
1710        (ICEBERG_SINK_PREFIX.to_owned() + &sink_name.0.last().unwrap().real_value()).as_str(),
1711    );
1712    let create_sink_stmt = CreateSinkStatement {
1713        if_not_exists: false,
1714        sink_name,
1715        with_properties: WithProperties(vec![]),
1716        sink_from,
1717        columns: vec![],
1718        emit_mode: None,
1719        sink_schema: None,
1720        into_table_name: None,
1721    };
1722
1723    let mut sink_handler_args = handler_args.clone();
1724
1725    let mut sink_with = with_common.clone();
1726
1727    if table.append_only {
1728        sink_with.insert("type".to_owned(), "append-only".to_owned());
1729    } else {
1730        sink_with.insert("primary_key".to_owned(), pks.join(","));
1731        sink_with.insert("type".to_owned(), "upsert".to_owned());
1732    }
1733    // sink_with.insert(SINK_SNAPSHOT_OPTION.to_owned(), "false".to_owned());
1734    //
1735    // Note: in theory, we don't need to backfill from the table to the sink,
1736    // but we don't have atomic DDL now https://github.com/risingwavelabs/risingwave/issues/21863
1737    // so it may have potential data loss problem on the first barrier.
1738    //
1739    // For non-append-only table, we can always solve it by the initial sink with backfill, since
1740    // data will be present in hummock table.
1741    //
1742    // For append-only table, we need to be more careful.
1743    //
1744    // The possible cases for a table:
1745    // - For table without connector: it doesn't matter, since there's no data before the table is created
1746    // - For table with connector: we workarounded it by setting SOURCE_RATE_LIMIT to 0
1747    //   + If we support blocking DDL for table with connector, we need to be careful.
1748    // - For table with an upstream job: Specifically, CDC table from shared CDC source.
1749    //   + Data may come from both upstream connector, and CDC table backfill, so we need to pause both of them.
1750    //   + For now we don't support APPEND ONLY CDC table, so it's safe.
1751    let commit_checkpoint_interval = handler_args
1752        .with_options
1753        .get(COMMIT_CHECKPOINT_INTERVAL)
1754        .map(|v| v.to_owned())
1755        .unwrap_or_else(|| "60".to_owned());
1756    let commit_checkpoint_interval = commit_checkpoint_interval.parse::<u32>().map_err(|_| {
1757        ErrorCode::InvalidInputSyntax(format!(
1758            "commit_checkpoint_interval must be greater than 0: {}",
1759            commit_checkpoint_interval
1760        ))
1761    })?;
1762
1763    if commit_checkpoint_interval == 0 {
1764        bail!("commit_checkpoint_interval must be greater than 0");
1765    }
1766
1767    // remove commit_checkpoint_interval from source options, otherwise it will be considered as an unknown field.
1768    source
1769        .as_mut()
1770        .map(|x| x.with_properties.remove(COMMIT_CHECKPOINT_INTERVAL));
1771
1772    let sink_decouple = session.config().sink_decouple();
1773    if matches!(sink_decouple, SinkDecouple::Disable) && commit_checkpoint_interval > 1 {
1774        bail!(
1775            "config conflict: `commit_checkpoint_interval` larger than 1 means that sink decouple must be enabled, but session config sink_decouple is disabled"
1776        )
1777    }
1778
1779    sink_with.insert(
1780        COMMIT_CHECKPOINT_INTERVAL.to_owned(),
1781        commit_checkpoint_interval.to_string(),
1782    );
1783    sink_with.insert("create_table_if_not_exists".to_owned(), "true".to_owned());
1784
1785    sink_with.insert("is_exactly_once".to_owned(), "true".to_owned());
1786
1787    if let Some(enable_compaction) = handler_args.with_options.get(ENABLE_COMPACTION) {
1788        match enable_compaction.to_lowercase().as_str() {
1789            "true" => {
1790                sink_with.insert(ENABLE_COMPACTION.to_owned(), "true".to_owned());
1791            }
1792            "false" => {
1793                sink_with.insert(ENABLE_COMPACTION.to_owned(), "false".to_owned());
1794            }
1795            _ => {
1796                return Err(ErrorCode::InvalidInputSyntax(format!(
1797                    "enable_compaction must be true or false: {}",
1798                    enable_compaction
1799                ))
1800                .into());
1801            }
1802        }
1803
1804        // remove enable_compaction from source options, otherwise it will be considered as an unknown field.
1805        source
1806            .as_mut()
1807            .map(|x| x.with_properties.remove("enable_compaction"));
1808    } else {
1809        sink_with.insert(ENABLE_COMPACTION.to_owned(), "true".to_owned());
1810    }
1811
1812    if let Some(compaction_interval_sec) = handler_args.with_options.get(COMPACTION_INTERVAL_SEC) {
1813        let compaction_interval_sec = compaction_interval_sec.parse::<u64>().map_err(|_| {
1814            ErrorCode::InvalidInputSyntax(format!(
1815                "compaction_interval_sec must be greater than 0: {}",
1816                commit_checkpoint_interval
1817            ))
1818        })?;
1819        if compaction_interval_sec == 0 {
1820            bail!("compaction_interval_sec must be greater than 0");
1821        }
1822        sink_with.insert(
1823            "compaction_interval_sec".to_owned(),
1824            compaction_interval_sec.to_string(),
1825        );
1826        // remove compaction_interval_sec from source options, otherwise it will be considered as an unknown field.
1827        source
1828            .as_mut()
1829            .map(|x| x.with_properties.remove("compaction_interval_sec"));
1830    }
1831
1832    let has_enabled_snapshot_expiration = if let Some(enable_snapshot_expiration) =
1833        handler_args.with_options.get(ENABLE_SNAPSHOT_EXPIRATION)
1834    {
1835        // remove enable_snapshot_expiration from source options, otherwise it will be considered as an unknown field.
1836        source
1837            .as_mut()
1838            .map(|x| x.with_properties.remove(ENABLE_SNAPSHOT_EXPIRATION));
1839        match enable_snapshot_expiration.to_lowercase().as_str() {
1840            "true" => {
1841                sink_with.insert(ENABLE_SNAPSHOT_EXPIRATION.to_owned(), "true".to_owned());
1842                true
1843            }
1844            "false" => {
1845                sink_with.insert(ENABLE_SNAPSHOT_EXPIRATION.to_owned(), "false".to_owned());
1846                false
1847            }
1848            _ => {
1849                return Err(ErrorCode::InvalidInputSyntax(format!(
1850                    "enable_snapshot_expiration must be true or false: {}",
1851                    enable_snapshot_expiration
1852                ))
1853                .into());
1854            }
1855        }
1856    } else {
1857        sink_with.insert(ENABLE_SNAPSHOT_EXPIRATION.to_owned(), "true".to_owned());
1858        true
1859    };
1860
1861    if has_enabled_snapshot_expiration {
1862        // configuration for snapshot expiration
1863        if let Some(snapshot_expiration_retain_last) = handler_args
1864            .with_options
1865            .get(SNAPSHOT_EXPIRATION_RETAIN_LAST)
1866        {
1867            sink_with.insert(
1868                SNAPSHOT_EXPIRATION_RETAIN_LAST.to_owned(),
1869                snapshot_expiration_retain_last.to_owned(),
1870            );
1871            // remove snapshot_expiration_retain_last from source options, otherwise it will be considered as an unknown field.
1872            source
1873                .as_mut()
1874                .map(|x| x.with_properties.remove(SNAPSHOT_EXPIRATION_RETAIN_LAST));
1875        }
1876
1877        if let Some(snapshot_expiration_max_age) = handler_args
1878            .with_options
1879            .get(SNAPSHOT_EXPIRATION_MAX_AGE_MILLIS)
1880        {
1881            sink_with.insert(
1882                SNAPSHOT_EXPIRATION_MAX_AGE_MILLIS.to_owned(),
1883                snapshot_expiration_max_age.to_owned(),
1884            );
1885            // remove snapshot_expiration_max_age from source options, otherwise it will be considered as an unknown field.
1886            source
1887                .as_mut()
1888                .map(|x| x.with_properties.remove(SNAPSHOT_EXPIRATION_MAX_AGE_MILLIS));
1889        }
1890
1891        if let Some(snapshot_expiration_clear_expired_files) = handler_args
1892            .with_options
1893            .get(SNAPSHOT_EXPIRATION_CLEAR_EXPIRED_FILES)
1894        {
1895            sink_with.insert(
1896                SNAPSHOT_EXPIRATION_CLEAR_EXPIRED_FILES.to_owned(),
1897                snapshot_expiration_clear_expired_files.to_owned(),
1898            );
1899            // remove snapshot_expiration_clear_expired_files from source options, otherwise it will be considered as an unknown field.
1900            source.as_mut().map(|x| {
1901                x.with_properties
1902                    .remove(SNAPSHOT_EXPIRATION_CLEAR_EXPIRED_FILES)
1903            });
1904        }
1905
1906        if let Some(snapshot_expiration_clear_expired_meta_data) = handler_args
1907            .with_options
1908            .get(SNAPSHOT_EXPIRATION_CLEAR_EXPIRED_META_DATA)
1909        {
1910            sink_with.insert(
1911                SNAPSHOT_EXPIRATION_CLEAR_EXPIRED_META_DATA.to_owned(),
1912                snapshot_expiration_clear_expired_meta_data.to_owned(),
1913            );
1914            // remove snapshot_expiration_clear_expired_meta_data from source options, otherwise it will be considered as an unknown field.
1915            source.as_mut().map(|x| {
1916                x.with_properties
1917                    .remove(SNAPSHOT_EXPIRATION_CLEAR_EXPIRED_META_DATA)
1918            });
1919        }
1920    }
1921
1922    if let Some(write_mode) = handler_args.with_options.get(WRITE_MODE) {
1923        let write_mode = IcebergWriteMode::try_from(write_mode.as_str()).map_err(|_| {
1924            ErrorCode::InvalidInputSyntax(format!(
1925                "invalid write_mode: {}, must be one of: {}, {}",
1926                write_mode, ICEBERG_WRITE_MODE_MERGE_ON_READ, ICEBERG_WRITE_MODE_COPY_ON_WRITE
1927            ))
1928        })?;
1929
1930        match write_mode {
1931            IcebergWriteMode::MergeOnRead => {
1932                sink_with.insert(WRITE_MODE.to_owned(), write_mode.as_str().to_owned());
1933            }
1934
1935            IcebergWriteMode::CopyOnWrite => {
1936                if table.append_only {
1937                    return Err(ErrorCode::NotSupported(
1938                        "COPY ON WRITE is not supported for append-only iceberg table".to_owned(),
1939                        "Please use MERGE ON READ instead".to_owned(),
1940                    )
1941                    .into());
1942                }
1943
1944                sink_with.insert(WRITE_MODE.to_owned(), write_mode.as_str().to_owned());
1945            }
1946        }
1947
1948        // remove write_mode from source options, otherwise it will be considered as an unknown field.
1949        source
1950            .as_mut()
1951            .map(|x| x.with_properties.remove("write_mode"));
1952    } else {
1953        sink_with.insert(
1954            WRITE_MODE.to_owned(),
1955            ICEBERG_WRITE_MODE_MERGE_ON_READ.to_owned(),
1956        );
1957    }
1958
1959    if let Some(max_snapshots_num_before_compaction) =
1960        handler_args.with_options.get(COMPACTION_MAX_SNAPSHOTS_NUM)
1961    {
1962        let max_snapshots_num_before_compaction = max_snapshots_num_before_compaction
1963            .parse::<u32>()
1964            .map_err(|_| {
1965                ErrorCode::InvalidInputSyntax(format!(
1966                    "{} must be greater than 0: {}",
1967                    COMPACTION_MAX_SNAPSHOTS_NUM, max_snapshots_num_before_compaction
1968                ))
1969            })?;
1970
1971        if max_snapshots_num_before_compaction == 0 {
1972            bail!(format!(
1973                "{} must be greater than 0",
1974                COMPACTION_MAX_SNAPSHOTS_NUM
1975            ));
1976        }
1977
1978        sink_with.insert(
1979            COMPACTION_MAX_SNAPSHOTS_NUM.to_owned(),
1980            max_snapshots_num_before_compaction.to_string(),
1981        );
1982
1983        // remove from source options, otherwise it will be considered as an unknown field.
1984        source
1985            .as_mut()
1986            .map(|x| x.with_properties.remove(COMPACTION_MAX_SNAPSHOTS_NUM));
1987    }
1988
1989    if let Some(small_files_threshold_mb) = handler_args
1990        .with_options
1991        .get(COMPACTION_SMALL_FILES_THRESHOLD_MB)
1992    {
1993        let small_files_threshold_mb = small_files_threshold_mb.parse::<u64>().map_err(|_| {
1994            ErrorCode::InvalidInputSyntax(format!(
1995                "{} must be greater than 0: {}",
1996                COMPACTION_SMALL_FILES_THRESHOLD_MB, small_files_threshold_mb
1997            ))
1998        })?;
1999        if small_files_threshold_mb == 0 {
2000            bail!(format!(
2001                "{} must be a greater than 0",
2002                COMPACTION_SMALL_FILES_THRESHOLD_MB
2003            ));
2004        }
2005        sink_with.insert(
2006            COMPACTION_SMALL_FILES_THRESHOLD_MB.to_owned(),
2007            small_files_threshold_mb.to_string(),
2008        );
2009
2010        // remove from source options, otherwise it will be considered as an unknown field.
2011        source.as_mut().map(|x| {
2012            x.with_properties
2013                .remove(COMPACTION_SMALL_FILES_THRESHOLD_MB)
2014        });
2015    }
2016
2017    if let Some(delete_files_count_threshold) = handler_args
2018        .with_options
2019        .get(COMPACTION_DELETE_FILES_COUNT_THRESHOLD)
2020    {
2021        let delete_files_count_threshold =
2022            delete_files_count_threshold.parse::<usize>().map_err(|_| {
2023                ErrorCode::InvalidInputSyntax(format!(
2024                    "{} must be greater than 0: {}",
2025                    COMPACTION_DELETE_FILES_COUNT_THRESHOLD, delete_files_count_threshold
2026                ))
2027            })?;
2028        if delete_files_count_threshold == 0 {
2029            bail!(format!(
2030                "{} must be greater than 0",
2031                COMPACTION_DELETE_FILES_COUNT_THRESHOLD
2032            ));
2033        }
2034        sink_with.insert(
2035            COMPACTION_DELETE_FILES_COUNT_THRESHOLD.to_owned(),
2036            delete_files_count_threshold.to_string(),
2037        );
2038
2039        // remove from source options, otherwise it will be considered as an unknown field.
2040        source.as_mut().map(|x| {
2041            x.with_properties
2042                .remove(COMPACTION_DELETE_FILES_COUNT_THRESHOLD)
2043        });
2044    }
2045
2046    if let Some(trigger_snapshot_count) = handler_args
2047        .with_options
2048        .get(COMPACTION_TRIGGER_SNAPSHOT_COUNT)
2049    {
2050        let trigger_snapshot_count = trigger_snapshot_count.parse::<usize>().map_err(|_| {
2051            ErrorCode::InvalidInputSyntax(format!(
2052                "{} must be greater than 0: {}",
2053                COMPACTION_TRIGGER_SNAPSHOT_COUNT, trigger_snapshot_count
2054            ))
2055        })?;
2056        if trigger_snapshot_count == 0 {
2057            bail!(format!(
2058                "{} must be greater than 0",
2059                COMPACTION_TRIGGER_SNAPSHOT_COUNT
2060            ));
2061        }
2062        sink_with.insert(
2063            COMPACTION_TRIGGER_SNAPSHOT_COUNT.to_owned(),
2064            trigger_snapshot_count.to_string(),
2065        );
2066
2067        // remove from source options, otherwise it will be considered as an unknown field.
2068        source
2069            .as_mut()
2070            .map(|x| x.with_properties.remove(COMPACTION_TRIGGER_SNAPSHOT_COUNT));
2071    }
2072
2073    if let Some(target_file_size_mb) = handler_args
2074        .with_options
2075        .get(COMPACTION_TARGET_FILE_SIZE_MB)
2076    {
2077        let target_file_size_mb = target_file_size_mb.parse::<u64>().map_err(|_| {
2078            ErrorCode::InvalidInputSyntax(format!(
2079                "{} must be greater than 0: {}",
2080                COMPACTION_TARGET_FILE_SIZE_MB, target_file_size_mb
2081            ))
2082        })?;
2083        if target_file_size_mb == 0 {
2084            bail!(format!(
2085                "{} must be greater than 0",
2086                COMPACTION_TARGET_FILE_SIZE_MB
2087            ));
2088        }
2089        sink_with.insert(
2090            COMPACTION_TARGET_FILE_SIZE_MB.to_owned(),
2091            target_file_size_mb.to_string(),
2092        );
2093        // remove from source options, otherwise it will be considered as an unknown field.
2094        source
2095            .as_mut()
2096            .map(|x| x.with_properties.remove(COMPACTION_TARGET_FILE_SIZE_MB));
2097    }
2098
2099    if let Some(compaction_type) = handler_args.with_options.get(COMPACTION_TYPE) {
2100        let compaction_type = CompactionType::try_from(compaction_type.as_str()).map_err(|_| {
2101            ErrorCode::InvalidInputSyntax(format!(
2102                "invalid compaction_type: {}, must be one of {:?}",
2103                compaction_type,
2104                &[
2105                    CompactionType::Full,
2106                    CompactionType::SmallFiles,
2107                    CompactionType::FilesWithDelete
2108                ]
2109            ))
2110        })?;
2111
2112        sink_with.insert(
2113            COMPACTION_TYPE.to_owned(),
2114            compaction_type.as_str().to_owned(),
2115        );
2116
2117        // remove from source options, otherwise it will be considered as an unknown field.
2118        source
2119            .as_mut()
2120            .map(|x| x.with_properties.remove(COMPACTION_TYPE));
2121    }
2122
2123    let partition_by = handler_args
2124        .with_options
2125        .get("partition_by")
2126        .map(|v| v.to_owned());
2127
2128    if let Some(partition_by) = &partition_by {
2129        let mut partition_columns = vec![];
2130        for (column, _) in parse_partition_by_exprs(partition_by.clone())? {
2131            table
2132                .columns()
2133                .iter()
2134                .find(|col| col.name().eq_ignore_ascii_case(&column))
2135                .ok_or_else(|| {
2136                    ErrorCode::InvalidInputSyntax(format!(
2137                        "Partition source column does not exist in schema: {}",
2138                        column
2139                    ))
2140                })?;
2141
2142            partition_columns.push(column);
2143        }
2144
2145        ensure_partition_columns_are_prefix_of_primary_key(&partition_columns, &pks).map_err(
2146            |_| {
2147                ErrorCode::InvalidInputSyntax(
2148                    "The partition columns should be the prefix of the primary key".to_owned(),
2149                )
2150            },
2151        )?;
2152
2153        sink_with.insert("partition_by".to_owned(), partition_by.to_owned());
2154
2155        // remove partition_by from source options, otherwise it will be considered as an unknown field.
2156        source
2157            .as_mut()
2158            .map(|x| x.with_properties.remove("partition_by"));
2159    }
2160
2161    sink_handler_args.with_options =
2162        WithOptions::new(sink_with, Default::default(), connection_ref.clone());
2163    let SinkPlanContext {
2164        sink_plan,
2165        sink_catalog,
2166        ..
2167    } = gen_sink_plan(sink_handler_args, create_sink_stmt, None, true).await?;
2168    let sink_graph = build_graph(sink_plan, Some(GraphJobType::Sink))?;
2169
2170    let mut source_name = table_name.clone();
2171    *source_name.0.last_mut().unwrap() = Ident::from(
2172        (ICEBERG_SOURCE_PREFIX.to_owned() + &source_name.0.last().unwrap().real_value()).as_str(),
2173    );
2174    let create_source_stmt = CreateSourceStatement {
2175        temporary: false,
2176        if_not_exists: false,
2177        columns: vec![],
2178        source_name,
2179        wildcard_idx: Some(0),
2180        constraints: vec![],
2181        with_properties: WithProperties(vec![]),
2182        format_encode: CompatibleFormatEncode::V2(FormatEncodeOptions::none()),
2183        source_watermarks: vec![],
2184        include_column_options: vec![],
2185    };
2186
2187    let mut source_handler_args = handler_args.clone();
2188    let source_with = with_common;
2189    source_handler_args.with_options =
2190        WithOptions::new(source_with, Default::default(), connection_ref);
2191
2192    let overwrite_options = OverwriteOptions::new(&mut source_handler_args);
2193    let format_encode = create_source_stmt.format_encode.into_v2_with_warning();
2194    let (with_properties, refresh_mode) =
2195        bind_connector_props(&source_handler_args, &format_encode, true)?;
2196
2197    // Create iceberg sink table, used for iceberg source column binding. See `bind_columns_from_source_for_non_cdc` for more details.
2198    // TODO: We can derive the columns directly from table definition in the future, so that we don't need to pre-create the table catalog.
2199    let (iceberg_catalog, table_identifier) = {
2200        let sink_param = SinkParam::try_from_sink_catalog(sink_catalog.clone())?;
2201        let iceberg_sink = IcebergSink::try_from(sink_param)?;
2202        iceberg_sink.create_table_if_not_exists().await?;
2203
2204        let iceberg_catalog = iceberg_sink.config.create_catalog().await?;
2205        let table_identifier = iceberg_sink.config.full_table_name()?;
2206        (iceberg_catalog, table_identifier)
2207    };
2208
2209    let create_source_type = CreateSourceType::for_newly_created(&session, &*with_properties);
2210    let (columns_from_resolve_source, source_info) = bind_columns_from_source(
2211        &session,
2212        &format_encode,
2213        Either::Left(&with_properties),
2214        create_source_type,
2215    )
2216    .await?;
2217    let mut col_id_gen = ColumnIdGenerator::new_initial();
2218
2219    let iceberg_source_catalog = bind_create_source_or_table_with_connector(
2220        source_handler_args,
2221        create_source_stmt.source_name,
2222        format_encode,
2223        with_properties,
2224        &create_source_stmt.columns,
2225        create_source_stmt.constraints,
2226        create_source_stmt.wildcard_idx,
2227        create_source_stmt.source_watermarks,
2228        columns_from_resolve_source,
2229        source_info,
2230        create_source_stmt.include_column_options,
2231        &mut col_id_gen,
2232        create_source_type,
2233        overwrite_options.source_rate_limit,
2234        SqlColumnStrategy::FollowChecked,
2235        refresh_mode,
2236    )
2237    .await?;
2238
2239    // before we create the table, ensure the JVM is initialized as we use jdbc catalog right now.
2240    // If JVM isn't initialized successfully, current not atomic ddl will result in a partially created iceberg engine table.
2241    let _ = Jvm::get_or_init()?;
2242
2243    let catalog_writer = session.catalog_writer()?;
2244    let action = match job_type {
2245        TableJobType::SharedCdcSource => LongRunningNotificationAction::MonitorBackfillJob,
2246        _ => LongRunningNotificationAction::DiagnoseBarrierLatency,
2247    };
2248    let res = execute_with_long_running_notification(
2249        catalog_writer.create_iceberg_table(
2250            PbTableJobInfo {
2251                source,
2252                table: Some(table.to_prost()),
2253                fragment_graph: Some(graph),
2254                job_type: job_type as _,
2255            },
2256            PbSinkJobInfo {
2257                sink: Some(sink_catalog.to_proto()),
2258                fragment_graph: Some(sink_graph),
2259            },
2260            iceberg_source_catalog.to_prost(),
2261            if_not_exists,
2262        ),
2263        &session,
2264        "CREATE TABLE",
2265        action,
2266    )
2267    .await;
2268
2269    if res.is_err() {
2270        let _ = iceberg_catalog
2271            .drop_table(&table_identifier)
2272            .await
2273            .inspect_err(|err| {
2274                tracing::error!(
2275                    "failed to drop iceberg table {} after create iceberg engine table failed: {}",
2276                    table_identifier,
2277                    err.as_report()
2278                );
2279            });
2280        res?
2281    }
2282
2283    Ok(())
2284}
2285
2286pub fn check_create_table_with_source(
2287    with_options: &WithOptions,
2288    format_encode: Option<FormatEncodeOptions>,
2289    include_column_options: &IncludeOption,
2290    cdc_table_info: &Option<CdcTableInfo>,
2291) -> Result<Option<FormatEncodeOptions>> {
2292    // skip check for cdc table
2293    if cdc_table_info.is_some() {
2294        return Ok(format_encode);
2295    }
2296    let defined_source = with_options.is_source_connector();
2297
2298    if !include_column_options.is_empty() && !defined_source {
2299        return Err(ErrorCode::InvalidInputSyntax(
2300            "INCLUDE should be used with a connector".to_owned(),
2301        )
2302        .into());
2303    }
2304    if defined_source {
2305        format_encode.as_ref().ok_or_else(|| {
2306            ErrorCode::InvalidInputSyntax("Please specify a source schema using FORMAT".to_owned())
2307        })?;
2308    }
2309    Ok(format_encode)
2310}
2311
2312fn ensure_partition_columns_are_prefix_of_primary_key(
2313    partition_columns: &[String],
2314    primary_key_columns: &[String],
2315) -> std::result::Result<(), String> {
2316    if partition_columns.len() > primary_key_columns.len() {
2317        return Err("Partition columns cannot be longer than primary key columns.".to_owned());
2318    }
2319
2320    for (i, partition_col) in partition_columns.iter().enumerate() {
2321        if primary_key_columns.get(i) != Some(partition_col) {
2322            return Err(format!(
2323                "Partition column '{}' is not a prefix of the primary key.",
2324                partition_col
2325            ));
2326        }
2327    }
2328
2329    Ok(())
2330}
2331
2332#[allow(clippy::too_many_arguments)]
2333pub async fn generate_stream_graph_for_replace_table(
2334    _session: &Arc<SessionImpl>,
2335    table_name: ObjectName,
2336    original_catalog: &Arc<TableCatalog>,
2337    handler_args: HandlerArgs,
2338    statement: Statement,
2339    col_id_gen: ColumnIdGenerator,
2340    sql_column_strategy: SqlColumnStrategy,
2341) -> Result<(
2342    StreamFragmentGraph,
2343    TableCatalog,
2344    Option<SourceCatalog>,
2345    TableJobType,
2346)> {
2347    let Statement::CreateTable {
2348        columns,
2349        constraints,
2350        source_watermarks,
2351        append_only,
2352        on_conflict,
2353        with_version_columns,
2354        wildcard_idx,
2355        cdc_table_info,
2356        format_encode,
2357        include_column_options,
2358        engine,
2359        with_options,
2360        ..
2361    } = statement
2362    else {
2363        panic!("unexpected statement type: {:?}", statement);
2364    };
2365
2366    let format_encode = format_encode
2367        .clone()
2368        .map(|format_encode| format_encode.into_v2_with_warning());
2369
2370    let engine = match engine {
2371        risingwave_sqlparser::ast::Engine::Hummock => Engine::Hummock,
2372        risingwave_sqlparser::ast::Engine::Iceberg => Engine::Iceberg,
2373    };
2374
2375    let is_drop_connector =
2376        original_catalog.associated_source_id().is_some() && format_encode.is_none();
2377    if is_drop_connector {
2378        debug_assert!(
2379            source_watermarks.is_empty()
2380                && include_column_options.is_empty()
2381                && with_options
2382                    .iter()
2383                    .all(|opt| opt.name.real_value().to_lowercase() != "connector")
2384        );
2385    }
2386
2387    let props = CreateTableProps {
2388        definition: handler_args.normalized_sql.clone(),
2389        append_only,
2390        on_conflict: on_conflict.into(),
2391        with_version_columns: with_version_columns
2392            .iter()
2393            .map(|col| col.real_value())
2394            .collect(),
2395        webhook_info: original_catalog.webhook_info.clone(),
2396        engine,
2397    };
2398
2399    let ((plan, mut source, mut table), job_type) = match (format_encode, cdc_table_info.as_ref()) {
2400        (Some(format_encode), None) => (
2401            gen_create_table_plan_with_source(
2402                handler_args,
2403                ExplainOptions::default(),
2404                table_name,
2405                columns,
2406                wildcard_idx,
2407                constraints,
2408                format_encode,
2409                source_watermarks,
2410                col_id_gen,
2411                include_column_options,
2412                props,
2413                sql_column_strategy,
2414            )
2415            .await?,
2416            TableJobType::General,
2417        ),
2418        (None, None) => {
2419            let context = OptimizerContext::from_handler_args(handler_args);
2420            let (plan, table) = gen_create_table_plan(
2421                context,
2422                table_name,
2423                columns,
2424                constraints,
2425                col_id_gen,
2426                source_watermarks,
2427                props,
2428                true,
2429            )?;
2430            ((plan, None, table), TableJobType::General)
2431        }
2432        (None, Some(cdc_table)) => {
2433            let session = &handler_args.session;
2434            let (source, resolved_table_name) =
2435                get_source_and_resolved_table_name(session, cdc_table.clone(), table_name.clone())?;
2436
2437            let cdc_with_options = derive_with_options_for_cdc_table(
2438                &source.with_properties,
2439                cdc_table.external_table_name.clone(),
2440            )?;
2441
2442            let (column_catalogs, pk_names) = bind_cdc_table_schema(&columns, &constraints, true)?;
2443
2444            let context: OptimizerContextRef =
2445                OptimizerContext::new(handler_args, ExplainOptions::default()).into();
2446            let (plan, table) = gen_create_table_plan_for_cdc_table(
2447                context,
2448                source,
2449                cdc_table.external_table_name.clone(),
2450                columns,
2451                column_catalogs,
2452                pk_names,
2453                cdc_with_options,
2454                col_id_gen,
2455                on_conflict,
2456                with_version_columns
2457                    .iter()
2458                    .map(|col| col.real_value())
2459                    .collect(),
2460                include_column_options,
2461                table_name,
2462                resolved_table_name,
2463                original_catalog.database_id,
2464                original_catalog.schema_id,
2465                original_catalog.id(),
2466                engine,
2467            )?;
2468
2469            ((plan, None, table), TableJobType::SharedCdcSource)
2470        }
2471        (Some(_), Some(_)) => {
2472            return Err(ErrorCode::NotSupported(
2473                "Data format and encoding format doesn't apply to table created from a CDC source"
2474                    .into(),
2475                "Remove the FORMAT and ENCODE specification".into(),
2476            )
2477            .into());
2478        }
2479    };
2480
2481    if table.pk_column_ids() != original_catalog.pk_column_ids() {
2482        Err(ErrorCode::InvalidInputSyntax(
2483            "alter primary key of table is not supported".to_owned(),
2484        ))?
2485    }
2486
2487    let graph = build_graph(plan, Some(GraphJobType::Table))?;
2488
2489    // Fill the original table ID.
2490    table.id = original_catalog.id();
2491    if !is_drop_connector && let Some(source_id) = original_catalog.associated_source_id() {
2492        table.associated_source_id = Some(source_id);
2493
2494        let source = source.as_mut().unwrap();
2495        source.id = source_id;
2496        source.associated_table_id = Some(table.id());
2497    }
2498
2499    Ok((graph, table, source, job_type))
2500}
2501
2502fn get_source_and_resolved_table_name(
2503    session: &Arc<SessionImpl>,
2504    cdc_table: CdcTableInfo,
2505    table_name: ObjectName,
2506) -> Result<(Arc<SourceCatalog>, String)> {
2507    let db_name = &session.database();
2508    let (_, resolved_table_name) = Binder::resolve_schema_qualified_name(db_name, &table_name)?;
2509
2510    let (source_schema, source_name) =
2511        Binder::resolve_schema_qualified_name(db_name, &cdc_table.source_name)?;
2512
2513    let source = {
2514        let catalog_reader = session.env().catalog_reader().read_guard();
2515        let schema_name = source_schema.unwrap_or(DEFAULT_SCHEMA_NAME.to_owned());
2516        let (source, _) = catalog_reader.get_source_by_name(
2517            db_name,
2518            SchemaPath::Name(schema_name.as_str()),
2519            source_name.as_str(),
2520        )?;
2521        source.clone()
2522    };
2523
2524    Ok((source, resolved_table_name))
2525}
2526
2527// validate the webhook_info and also bind the webhook_info to protobuf
2528fn bind_webhook_info(
2529    session: &Arc<SessionImpl>,
2530    columns_defs: &[ColumnDef],
2531    webhook_info: WebhookSourceInfo,
2532) -> Result<PbWebhookSourceInfo> {
2533    // validate columns
2534    if columns_defs.len() != 1 || columns_defs[0].data_type.as_ref().unwrap() != &AstDataType::Jsonb
2535    {
2536        return Err(ErrorCode::InvalidInputSyntax(
2537            "Table with webhook source should have exactly one JSONB column".to_owned(),
2538        )
2539        .into());
2540    }
2541
2542    let WebhookSourceInfo {
2543        secret_ref,
2544        signature_expr,
2545        wait_for_persistence,
2546        is_batched,
2547    } = webhook_info;
2548
2549    // validate secret_ref
2550    let (pb_secret_ref, secret_name) = if let Some(secret_ref) = secret_ref {
2551        let db_name = &session.database();
2552        let (schema_name, secret_name) =
2553            Binder::resolve_schema_qualified_name(db_name, &secret_ref.secret_name)?;
2554        let secret_catalog = session.get_secret_by_name(schema_name, &secret_name)?;
2555        (
2556            Some(PbSecretRef {
2557                secret_id: secret_catalog.id,
2558                ref_as: match secret_ref.ref_as {
2559                    SecretRefAsType::Text => PbRefAsType::Text,
2560                    SecretRefAsType::File => PbRefAsType::File,
2561                }
2562                .into(),
2563            }),
2564            Some(secret_name),
2565        )
2566    } else {
2567        (None, None)
2568    };
2569
2570    let secure_compare_context = SecureCompareContext {
2571        column_name: columns_defs[0].name.real_value(),
2572        secret_name,
2573    };
2574    let mut binder = Binder::new_for_ddl(session).with_secure_compare(secure_compare_context);
2575    let expr = binder.bind_expr(&signature_expr)?;
2576
2577    // validate expr, ensuring it is SECURE_COMPARE()
2578    if expr.as_function_call().is_none()
2579        || expr.as_function_call().unwrap().func_type()
2580            != crate::optimizer::plan_node::generic::ExprType::SecureCompare
2581    {
2582        return Err(ErrorCode::InvalidInputSyntax(
2583            "The signature verification function must be SECURE_COMPARE()".to_owned(),
2584        )
2585        .into());
2586    }
2587
2588    let pb_webhook_info = PbWebhookSourceInfo {
2589        secret_ref: pb_secret_ref,
2590        signature_expr: Some(expr.to_expr_proto()),
2591        wait_for_persistence,
2592        is_batched,
2593    };
2594
2595    Ok(pb_webhook_info)
2596}
2597
2598#[cfg(test)]
2599mod tests {
2600    use risingwave_common::catalog::{
2601        DEFAULT_DATABASE_NAME, ROW_ID_COLUMN_NAME, RW_TIMESTAMP_COLUMN_NAME,
2602    };
2603    use risingwave_common::types::{DataType, StructType};
2604
2605    use super::*;
2606    use crate::test_utils::{LocalFrontend, PROTO_FILE_DATA, create_proto_file};
2607
2608    #[tokio::test]
2609    async fn test_create_table_handler() {
2610        let sql =
2611            "create table t (v1 smallint, v2 struct<v3 bigint, v4 float, v5 double>) append only;";
2612        let frontend = LocalFrontend::new(Default::default()).await;
2613        frontend.run_sql(sql).await.unwrap();
2614
2615        let session = frontend.session_ref();
2616        let catalog_reader = session.env().catalog_reader().read_guard();
2617        let schema_path = SchemaPath::Name(DEFAULT_SCHEMA_NAME);
2618
2619        // Check table exists.
2620        let (table, _) = catalog_reader
2621            .get_created_table_by_name(DEFAULT_DATABASE_NAME, schema_path, "t")
2622            .unwrap();
2623        assert_eq!(table.name(), "t");
2624
2625        let columns = table
2626            .columns
2627            .iter()
2628            .map(|col| (col.name(), col.data_type().clone()))
2629            .collect::<HashMap<&str, DataType>>();
2630
2631        let expected_columns = maplit::hashmap! {
2632            ROW_ID_COLUMN_NAME => DataType::Serial,
2633            "v1" => DataType::Int16,
2634            "v2" => StructType::new(
2635                vec![("v3", DataType::Int64),("v4", DataType::Float64),("v5", DataType::Float64)],
2636            )
2637            .with_ids([3, 4, 5].map(ColumnId::new))
2638            .into(),
2639            RW_TIMESTAMP_COLUMN_NAME => DataType::Timestamptz,
2640        };
2641
2642        assert_eq!(columns, expected_columns, "{columns:#?}");
2643    }
2644
2645    #[test]
2646    fn test_bind_primary_key() {
2647        // Note: Column ID 0 is reserved for row ID column.
2648
2649        for (sql, expected) in [
2650            ("create table t (v1 int, v2 int)", Ok(&[0] as &[_])),
2651            ("create table t (v1 int primary key, v2 int)", Ok(&[1])),
2652            ("create table t (v1 int, v2 int primary key)", Ok(&[2])),
2653            (
2654                "create table t (v1 int primary key, v2 int primary key)",
2655                Err("multiple primary keys are not allowed"),
2656            ),
2657            (
2658                "create table t (v1 int primary key primary key, v2 int)",
2659                Err("multiple primary keys are not allowed"),
2660            ),
2661            (
2662                "create table t (v1 int, v2 int, primary key (v1))",
2663                Ok(&[1]),
2664            ),
2665            (
2666                "create table t (v1 int, primary key (v2), v2 int)",
2667                Ok(&[2]),
2668            ),
2669            (
2670                "create table t (primary key (v2, v1), v1 int, v2 int)",
2671                Ok(&[2, 1]),
2672            ),
2673            (
2674                "create table t (v1 int, primary key (v1), v2 int, primary key (v1))",
2675                Err("multiple primary keys are not allowed"),
2676            ),
2677            (
2678                "create table t (v1 int primary key, primary key (v1), v2 int)",
2679                Err("multiple primary keys are not allowed"),
2680            ),
2681            (
2682                "create table t (v1 int, primary key (V3), v2 int)",
2683                Err("column \"v3\" named in key does not exist"),
2684            ),
2685        ] {
2686            let mut ast = risingwave_sqlparser::parser::Parser::parse_sql(sql).unwrap();
2687            let risingwave_sqlparser::ast::Statement::CreateTable {
2688                columns: column_defs,
2689                constraints,
2690                ..
2691            } = ast.remove(0)
2692            else {
2693                panic!("test case should be create table")
2694            };
2695            let actual: Result<_> = (|| {
2696                let mut columns = bind_sql_columns(&column_defs, false)?;
2697                let mut col_id_gen = ColumnIdGenerator::new_initial();
2698                for c in &mut columns {
2699                    col_id_gen.generate(c)?;
2700                }
2701
2702                let pk_names =
2703                    bind_sql_pk_names(&column_defs, bind_table_constraints(&constraints)?)?;
2704                let (_, pk_column_ids, _) =
2705                    bind_pk_and_row_id_on_relation(columns, pk_names, true)?;
2706                Ok(pk_column_ids)
2707            })();
2708            match (expected, actual) {
2709                (Ok(expected), Ok(actual)) => assert_eq!(
2710                    expected.iter().copied().map(ColumnId::new).collect_vec(),
2711                    actual,
2712                    "sql: {sql}"
2713                ),
2714                (Ok(_), Err(actual)) => panic!("sql: {sql}\nunexpected error: {actual:?}"),
2715                (Err(_), Ok(actual)) => panic!("sql: {sql}\nexpects error but got: {actual:?}"),
2716                (Err(expected), Err(actual)) => assert!(
2717                    actual.to_string().contains(expected),
2718                    "sql: {sql}\nexpected: {expected:?}\nactual: {actual:?}"
2719                ),
2720            }
2721        }
2722    }
2723
2724    #[tokio::test]
2725    async fn test_duplicate_props_options() {
2726        let proto_file = create_proto_file(PROTO_FILE_DATA);
2727        let sql = format!(
2728            r#"CREATE TABLE t
2729    WITH (
2730        connector = 'kinesis',
2731        aws.region='user_test_topic',
2732        endpoint='172.10.1.1:9090,172.10.1.2:9090',
2733        aws.credentials.access_key_id = 'your_access_key_1',
2734        aws.credentials.secret_access_key = 'your_secret_key_1'
2735    )
2736    FORMAT PLAIN ENCODE PROTOBUF (
2737        message = '.test.TestRecord',
2738        aws.credentials.access_key_id = 'your_access_key_2',
2739        aws.credentials.secret_access_key = 'your_secret_key_2',
2740        schema.location = 'file://{}',
2741    )"#,
2742            proto_file.path().to_str().unwrap()
2743        );
2744        let frontend = LocalFrontend::new(Default::default()).await;
2745        frontend.run_sql(sql).await.unwrap();
2746
2747        let session = frontend.session_ref();
2748        let catalog_reader = session.env().catalog_reader().read_guard();
2749        let schema_path = SchemaPath::Name(DEFAULT_SCHEMA_NAME);
2750
2751        // Check source exists.
2752        let (source, _) = catalog_reader
2753            .get_source_by_name(DEFAULT_DATABASE_NAME, schema_path, "t")
2754            .unwrap();
2755        assert_eq!(source.name, "t");
2756
2757        // AwsAuth params exist in options.
2758        assert_eq!(
2759            source
2760                .info
2761                .format_encode_options
2762                .get("aws.credentials.access_key_id")
2763                .unwrap(),
2764            "your_access_key_2"
2765        );
2766        assert_eq!(
2767            source
2768                .info
2769                .format_encode_options
2770                .get("aws.credentials.secret_access_key")
2771                .unwrap(),
2772            "your_secret_key_2"
2773        );
2774
2775        // AwsAuth params exist in props.
2776        assert_eq!(
2777            source
2778                .with_properties
2779                .get("aws.credentials.access_key_id")
2780                .unwrap(),
2781            "your_access_key_1"
2782        );
2783        assert_eq!(
2784            source
2785                .with_properties
2786                .get("aws.credentials.secret_access_key")
2787                .unwrap(),
2788            "your_secret_key_1"
2789        );
2790
2791        // Options are not merged into props.
2792        assert!(!source.with_properties.contains_key("schema.location"));
2793    }
2794}