1use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet};
16use std::sync::{Arc, LazyLock};
17
18use either::Either;
19use iceberg::arrow::type_to_arrow_type;
20use iceberg::spec::Transform;
21use itertools::Itertools;
22use maplit::{convert_args, hashmap, hashset};
23use pgwire::pg_response::{PgResponse, StatementType};
24use risingwave_common::array::arrow::IcebergArrowConvert;
25use risingwave_common::array::arrow::arrow_schema_iceberg::DataType as ArrowDataType;
26use risingwave_common::bail;
27use risingwave_common::catalog::{ColumnCatalog, ICEBERG_SINK_PREFIX, ObjectId, Schema, UserId};
28use risingwave_common::license::Feature;
29use risingwave_common::secret::LocalSecretManager;
30use risingwave_common::system_param::reader::SystemParamsRead;
31use risingwave_common::types::DataType;
32use risingwave_connector::sink::catalog::{SinkCatalog, SinkFormatDesc};
33use risingwave_connector::sink::file_sink::s3::SnowflakeSink;
34use risingwave_connector::sink::iceberg::{ICEBERG_SINK, IcebergConfig};
35use risingwave_connector::sink::kafka::KAFKA_SINK;
36use risingwave_connector::sink::snowflake_redshift::redshift::RedshiftSink;
37use risingwave_connector::sink::snowflake_redshift::snowflake::SnowflakeV2Sink;
38use risingwave_connector::sink::{
39 CONNECTOR_TYPE_KEY, SINK_SNAPSHOT_OPTION, SINK_TYPE_OPTION, SINK_USER_FORCE_APPEND_ONLY_OPTION,
40 Sink, enforce_secret_sink,
41};
42use risingwave_connector::{
43 AUTO_SCHEMA_CHANGE_KEY, SINK_CREATE_TABLE_IF_NOT_EXISTS_KEY, SINK_INTERMEDIATE_TABLE_NAME,
44 SINK_TARGET_TABLE_NAME, WithPropertiesExt,
45};
46use risingwave_pb::catalog::connection_params::PbConnectionType;
47use risingwave_pb::telemetry::TelemetryDatabaseObject;
48use risingwave_sqlparser::ast::{
49 CreateSink, CreateSinkStatement, EmitMode, Encode, ExplainOptions, Format, FormatEncodeOptions,
50 ObjectName, Query,
51};
52
53use super::RwPgResponse;
54use super::create_mv::get_column_names;
55use super::create_source::UPSTREAM_SOURCE_KEY;
56use super::util::gen_query_from_table_name;
57use crate::binder::{Binder, Relation};
58use crate::catalog::table_catalog::TableType;
59use crate::error::{ErrorCode, Result, RwError};
60use crate::expr::{ExprImpl, InputRef, rewrite_now_to_proctime};
61use crate::handler::HandlerArgs;
62use crate::handler::alter_table_column::fetch_table_catalog_for_alter;
63use crate::handler::create_mv::parse_column_names;
64use crate::handler::util::{
65 LongRunningNotificationAction, check_connector_match_connection_type,
66 ensure_connection_type_allowed, execute_with_long_running_notification,
67};
68use crate::optimizer::backfill_order_strategy::plan_backfill_order;
69use crate::optimizer::plan_node::{
70 IcebergPartitionInfo, LogicalSource, PartitionComputeInfo, StreamPlanRef as PlanRef,
71 StreamProject, generic,
72};
73use crate::optimizer::{OptimizerContext, RelationCollectorVisitor};
74use crate::scheduler::streaming_manager::CreatingStreamingJobInfo;
75use crate::session::SessionImpl;
76use crate::session::current::notice_to_user;
77use crate::stream_fragmenter::{GraphJobType, build_graph_with_strategy};
78use crate::utils::{resolve_connection_ref_and_secret_ref, resolve_privatelink_in_with_option};
79use crate::{Explain, Planner, TableCatalog, WithOptions, WithOptionsSecResolved};
80
81static SINK_ALLOWED_CONNECTION_CONNECTOR: LazyLock<HashSet<PbConnectionType>> =
82 LazyLock::new(|| {
83 hashset! {
84 PbConnectionType::Unspecified,
85 PbConnectionType::Kafka,
86 PbConnectionType::Iceberg,
87 PbConnectionType::Elasticsearch,
88 }
89 });
90
91static SINK_ALLOWED_CONNECTION_SCHEMA_REGISTRY: LazyLock<HashSet<PbConnectionType>> =
92 LazyLock::new(|| {
93 hashset! {
94 PbConnectionType::Unspecified,
95 PbConnectionType::SchemaRegistry,
96 }
97 });
98
99pub struct SinkPlanContext {
101 pub query: Box<Query>,
102 pub sink_plan: PlanRef,
103 pub sink_catalog: SinkCatalog,
104 pub target_table_catalog: Option<Arc<TableCatalog>>,
105 pub dependencies: HashSet<ObjectId>,
106}
107
108pub async fn gen_sink_plan(
109 handler_args: HandlerArgs,
110 stmt: CreateSinkStatement,
111 explain_options: Option<ExplainOptions>,
112 is_iceberg_engine_internal: bool,
113) -> Result<SinkPlanContext> {
114 let session = handler_args.session.clone();
115 let session = session.as_ref();
116 let user_specified_columns = !stmt.columns.is_empty();
117 let db_name = &session.database();
118 let (sink_schema_name, sink_table_name) =
119 Binder::resolve_schema_qualified_name(db_name, &stmt.sink_name)?;
120
121 let mut with_options = handler_args.with_options.clone();
122
123 if session
124 .env()
125 .system_params_manager()
126 .get_params()
127 .load()
128 .enforce_secret()
129 && Feature::SecretManagement.check_available().is_ok()
130 {
131 enforce_secret_sink(&with_options)?;
132 }
133
134 resolve_privatelink_in_with_option(&mut with_options)?;
135 let (mut resolved_with_options, connection_type, connector_conn_ref) =
136 resolve_connection_ref_and_secret_ref(
137 with_options,
138 session,
139 Some(TelemetryDatabaseObject::Sink),
140 )?;
141 ensure_connection_type_allowed(connection_type, &SINK_ALLOWED_CONNECTION_CONNECTOR)?;
142
143 if !matches!(connection_type, PbConnectionType::Unspecified) {
145 let Some(connector) = resolved_with_options.get_connector() else {
146 return Err(RwError::from(ErrorCode::ProtocolError(format!(
147 "missing field '{}' in WITH clause",
148 CONNECTOR_TYPE_KEY
149 ))));
150 };
151 check_connector_match_connection_type(connector.as_str(), &connection_type)?;
152 }
153
154 let partition_info = get_partition_compute_info(&resolved_with_options).await?;
155
156 let context = if let Some(explain_options) = explain_options {
157 OptimizerContext::new(handler_args.clone(), explain_options)
158 } else {
159 OptimizerContext::from_handler_args(handler_args.clone())
160 };
161
162 let is_auto_schema_change = resolved_with_options
163 .remove(AUTO_SCHEMA_CHANGE_KEY)
164 .map(|value| {
165 value.parse::<bool>().map_err(|_| {
166 ErrorCode::InvalidInputSyntax(format!(
167 "invalid value {} of '{}' option, expect",
168 value, AUTO_SCHEMA_CHANGE_KEY
169 ))
170 })
171 })
172 .transpose()?
173 .unwrap_or(false);
174
175 if is_auto_schema_change {
176 Feature::SinkAutoSchemaChange.check_available()?;
177 }
178
179 let sink_into_table_name = stmt.into_table_name.as_ref().map(|name| name.real_value());
180 if sink_into_table_name.is_some() {
181 let prev = resolved_with_options.insert(CONNECTOR_TYPE_KEY.to_owned(), "table".to_owned());
182
183 if prev.is_some() {
184 return Err(RwError::from(ErrorCode::BindError(
185 "In the case of sinking into table, the 'connector' parameter should not be provided.".to_owned(),
186 )));
187 }
188 }
189 let connector = resolved_with_options
190 .get(CONNECTOR_TYPE_KEY)
191 .cloned()
192 .ok_or_else(|| ErrorCode::BindError(format!("missing field '{CONNECTOR_TYPE_KEY}'")))?;
193
194 let sink_from_table_name;
196 let direct_sink_from_name: Option<(ObjectName, bool)>;
199 let query = match stmt.sink_from {
200 CreateSink::From(from_name) => {
201 sink_from_table_name = from_name.0.last().unwrap().real_value();
202 direct_sink_from_name = Some((from_name.clone(), is_auto_schema_change));
203 if is_auto_schema_change && sink_into_table_name.is_some() {
204 return Err(RwError::from(ErrorCode::InvalidInputSyntax(
205 "auto schema change not supported for sink-into-table".to_owned(),
206 )));
207 }
208 if resolved_with_options
209 .value_eq_ignore_case(SINK_CREATE_TABLE_IF_NOT_EXISTS_KEY, "true")
210 && connector == RedshiftSink::SINK_NAME
211 || connector == SnowflakeV2Sink::SINK_NAME
212 {
213 if let Some(table_name) = resolved_with_options.get(SINK_TARGET_TABLE_NAME) {
214 if resolved_with_options
216 .get(SINK_INTERMEDIATE_TABLE_NAME)
217 .is_none()
218 {
219 let intermediate_table_name = format!(
221 "rw_{}_{}_{}",
222 sink_table_name,
223 table_name,
224 uuid::Uuid::new_v4()
225 );
226 resolved_with_options.insert(
227 SINK_INTERMEDIATE_TABLE_NAME.to_owned(),
228 intermediate_table_name,
229 );
230 }
231 } else {
232 return Err(RwError::from(ErrorCode::BindError(
233 "'table.name' option must be specified.".to_owned(),
234 )));
235 }
236 }
237 Box::new(gen_query_from_table_name(from_name))
238 }
239 CreateSink::AsQuery(query) => {
240 if is_auto_schema_change {
241 return Err(RwError::from(ErrorCode::InvalidInputSyntax(
242 "auto schema change not supported for CREATE SINK AS QUERY".to_owned(),
243 )));
244 }
245 sink_from_table_name = sink_table_name.clone();
246 direct_sink_from_name = None;
247 query
248 }
249 };
250
251 let (sink_database_id, sink_schema_id) =
252 session.get_database_and_schema_id_for_create(sink_schema_name.clone())?;
253
254 let (dependent_relations, dependent_udfs, bound, auto_refresh_schema_from_table) = {
255 let mut binder = Binder::new_for_stream(session);
256 let auto_refresh_schema_from_table = if let Some((from_name, true)) = &direct_sink_from_name
257 {
258 let from_relation = binder.bind_relation_by_name(from_name, None, None, true)?;
259 if let Relation::BaseTable(table) = from_relation {
260 if table.table_catalog.table_type != TableType::Table {
261 return Err(ErrorCode::InvalidInputSyntax(format!(
262 "auto schema change only support on TABLE, but got {:?}",
263 table.table_catalog.table_type
264 ))
265 .into());
266 }
267 if table.table_catalog.database_id != sink_database_id {
268 return Err(ErrorCode::InvalidInputSyntax(
269 "auto schema change sink does not support created from cross database table".to_owned()
270 )
271 .into());
272 }
273 for col in &table.table_catalog.columns {
274 if !col.is_hidden() && (col.is_generated() || col.is_rw_sys_column()) {
275 return Err(ErrorCode::InvalidInputSyntax(format!("auto schema change not supported for table with non-hidden generated column or sys column, but got {}", col.name())).into());
276 }
277 }
278 Some(table.table_catalog)
279 } else {
280 return Err(RwError::from(ErrorCode::NotSupported(
281 "auto schema change only supported for TABLE".to_owned(),
282 "try recreating the sink from table".to_owned(),
283 )));
284 }
285 } else {
286 None
287 };
288
289 let bound = binder.bind_query(&query)?;
290
291 (
292 binder.included_relations().clone(),
293 binder.included_udfs().clone(),
294 bound,
295 auto_refresh_schema_from_table,
296 )
297 };
298
299 let col_names = if sink_into_table_name.is_some() {
300 parse_column_names(&stmt.columns)
301 } else {
302 get_column_names(&bound, stmt.columns)?
304 };
305
306 let emit_on_window_close = stmt.emit_mode == Some(EmitMode::OnWindowClose);
307 if emit_on_window_close {
308 context.warn_to_user("EMIT ON WINDOW CLOSE is currently an experimental feature. Please use it with caution.");
309 }
310
311 let format_desc = match stmt.sink_schema {
312 Some(f) => {
314 validate_compatibility(&connector, &f)?;
315 Some(bind_sink_format_desc(session,f)?)
316 }
317 None => match resolved_with_options.get(SINK_TYPE_OPTION) {
318 Some(t) => SinkFormatDesc::from_legacy_type(&connector, t)?.map(|mut f| {
320 session.notice_to_user("Consider using the newer syntax `FORMAT ... ENCODE ...` instead of `type = '...'`.");
321 if let Some(v) = resolved_with_options.get(SINK_USER_FORCE_APPEND_ONLY_OPTION) {
322 f.options.insert(SINK_USER_FORCE_APPEND_ONLY_OPTION.into(), v.into());
323 }
324 f
325 }),
326 None => None,
328 },
329 };
330
331 let definition = context.normalized_sql().to_owned();
332 let mut plan_root = if is_iceberg_engine_internal {
333 Planner::new_for_iceberg_table_engine_sink(context.into()).plan_query(bound)?
334 } else {
335 Planner::new_for_stream(context.into()).plan_query(bound)?
336 };
337 if let Some(col_names) = &col_names {
338 plan_root.set_out_names(col_names.clone())?;
339 };
340
341 let without_backfill = match resolved_with_options.remove(SINK_SNAPSHOT_OPTION) {
342 Some(flag) if flag.eq_ignore_ascii_case("false") => {
343 if direct_sink_from_name.is_some() || is_iceberg_engine_internal {
344 true
345 } else {
346 return Err(ErrorCode::BindError(
347 "`snapshot = false` only support `CREATE SINK FROM MV or TABLE`".to_owned(),
348 )
349 .into());
350 }
351 }
352 _ => false,
353 };
354
355 let target_table_catalog = stmt
356 .into_table_name
357 .as_ref()
358 .map(|table_name| fetch_table_catalog_for_alter(session, table_name).map(|t| t.0))
359 .transpose()?;
360
361 if let Some(target_table_catalog) = &target_table_catalog {
362 if let Some(col_names) = col_names {
363 let target_table_columns = target_table_catalog
364 .columns()
365 .iter()
366 .map(|c| c.name())
367 .collect::<BTreeSet<_>>();
368 for c in col_names {
369 if !target_table_columns.contains(c.as_str()) {
370 return Err(RwError::from(ErrorCode::BindError(format!(
371 "Column {} not found in table {}",
372 c,
373 target_table_catalog.name()
374 ))));
375 }
376 }
377 }
378 if target_table_catalog
379 .columns()
380 .iter()
381 .any(|col| !col.nullable())
382 {
383 notice_to_user(format!(
384 "The target table `{}` contains columns with NOT NULL constraints. Any sinked rows violating the constraints will be ignored silently.",
385 target_table_catalog.name(),
386 ));
387 }
388 }
389
390 let allow_snapshot_backfill = target_table_catalog.is_none() && !is_iceberg_engine_internal;
391
392 let sink_plan = plan_root.gen_sink_plan(
393 sink_table_name,
394 definition,
395 resolved_with_options,
396 emit_on_window_close,
397 db_name.to_owned(),
398 sink_from_table_name,
399 format_desc,
400 without_backfill,
401 target_table_catalog.clone(),
402 partition_info,
403 user_specified_columns,
404 auto_refresh_schema_from_table,
405 allow_snapshot_backfill,
406 )?;
407
408 let sink_desc = sink_plan.sink_desc().clone();
409
410 let mut sink_plan: PlanRef = sink_plan.into();
411
412 let ctx = sink_plan.ctx();
413 let explain_trace = ctx.is_explain_trace();
414 if explain_trace {
415 ctx.trace("Create Sink:");
416 ctx.trace(sink_plan.explain_to_string());
417 }
418 tracing::trace!("sink_plan: {:?}", sink_plan.explain_to_string());
419
420 let dependencies =
423 RelationCollectorVisitor::collect_with(dependent_relations, sink_plan.clone())
424 .into_iter()
425 .chain(dependent_udfs.iter().copied().map_into())
426 .collect();
427
428 let sink_catalog = sink_desc.into_catalog(
429 sink_schema_id,
430 sink_database_id,
431 UserId::new(session.user_id()),
432 connector_conn_ref,
433 );
434
435 if let Some(table_catalog) = &target_table_catalog {
436 for column in sink_catalog.full_columns() {
437 if !column.can_dml() {
438 unreachable!(
439 "can not derive generated columns and system column `_rw_timestamp` in a sink's catalog, but meet one"
440 );
441 }
442 }
443
444 let table_columns_without_rw_timestamp = table_catalog.columns_without_rw_timestamp();
445 let exprs = derive_default_column_project_for_sink(
446 &sink_catalog,
447 sink_plan.schema(),
448 &table_columns_without_rw_timestamp,
449 user_specified_columns,
450 )?;
451
452 let logical_project = generic::Project::new(exprs, sink_plan);
453
454 sink_plan = StreamProject::new(logical_project).into();
455
456 let exprs = LogicalSource::derive_output_exprs_from_generated_columns(
457 &table_columns_without_rw_timestamp,
458 )?;
459
460 if let Some(exprs) = exprs {
461 let logical_project = generic::Project::new(exprs, sink_plan);
462 sink_plan = StreamProject::new(logical_project).into();
463 }
464 };
465
466 Ok(SinkPlanContext {
467 query,
468 sink_plan,
469 sink_catalog,
470 target_table_catalog,
471 dependencies,
472 })
473}
474
475pub async fn get_partition_compute_info(
480 with_options: &WithOptionsSecResolved,
481) -> Result<Option<PartitionComputeInfo>> {
482 let (options, secret_refs) = with_options.clone().into_parts();
483 let Some(connector) = options.get(UPSTREAM_SOURCE_KEY).cloned() else {
484 return Ok(None);
485 };
486 let properties = LocalSecretManager::global().fill_secrets(options, secret_refs)?;
487 match connector.as_str() {
488 ICEBERG_SINK => {
489 let iceberg_config = IcebergConfig::from_btreemap(properties)?;
490 get_partition_compute_info_for_iceberg(&iceberg_config).await
491 }
492 _ => Ok(None),
493 }
494}
495
496#[allow(clippy::unused_async)]
497async fn get_partition_compute_info_for_iceberg(
498 _iceberg_config: &IcebergConfig,
499) -> Result<Option<PartitionComputeInfo>> {
500 if _iceberg_config.create_table_if_not_exists {
502 return Ok(None);
503 }
504 let table = _iceberg_config.load_table().await?;
505 let partition_spec = table.metadata().default_partition_spec();
506 if partition_spec.is_unpartitioned() {
507 return Ok(None);
508 }
509
510 let has_sparse_partition = partition_spec.fields().iter().any(|f| match f.transform {
515 Transform::Identity | Transform::Truncate(_) | Transform::Bucket(_) => true,
517 Transform::Year
519 | Transform::Month
520 | Transform::Day
521 | Transform::Hour
522 | Transform::Void
523 | Transform::Unknown => false,
524 });
525 if !has_sparse_partition {
526 return Ok(None);
527 }
528
529 let arrow_type = type_to_arrow_type(&iceberg::spec::Type::Struct(
530 table.metadata().default_partition_type().clone(),
531 ))
532 .map_err(|_| {
533 RwError::from(ErrorCode::SinkError(
534 "Fail to convert iceberg partition type to arrow type".into(),
535 ))
536 })?;
537 let ArrowDataType::Struct(struct_fields) = arrow_type else {
538 return Err(RwError::from(ErrorCode::SinkError(
539 "Partition type of iceberg should be a struct type".into(),
540 )));
541 };
542
543 let schema = table.metadata().current_schema();
544 let partition_fields = partition_spec
545 .fields()
546 .iter()
547 .map(|f| {
548 let source_f =
549 schema
550 .field_by_id(f.source_id)
551 .ok_or(RwError::from(ErrorCode::SinkError(
552 "Fail to look up iceberg partition field".into(),
553 )))?;
554 Ok((source_f.name.clone(), f.transform))
555 })
556 .collect::<Result<Vec<_>>>()?;
557
558 Ok(Some(PartitionComputeInfo::Iceberg(IcebergPartitionInfo {
559 partition_type: IcebergArrowConvert.struct_from_fields(&struct_fields)?,
560 partition_fields,
561 })))
562}
563
564pub async fn handle_create_sink(
565 handle_args: HandlerArgs,
566 stmt: CreateSinkStatement,
567 is_iceberg_engine_internal: bool,
568) -> Result<RwPgResponse> {
569 let session = handle_args.session.clone();
570
571 session.check_cluster_limits().await?;
572
573 let if_not_exists = stmt.if_not_exists;
574 if let Either::Right(resp) = session.check_relation_name_duplicated(
575 stmt.sink_name.clone(),
576 StatementType::CREATE_SINK,
577 if_not_exists,
578 )? {
579 return Ok(resp);
580 }
581
582 if stmt.sink_name.base_name().starts_with(ICEBERG_SINK_PREFIX) {
583 return Err(RwError::from(ErrorCode::InvalidInputSyntax(format!(
584 "Sink name cannot start with reserved prefix '{}'",
585 ICEBERG_SINK_PREFIX
586 ))));
587 }
588
589 let (mut sink, graph, target_table_catalog, dependencies) = {
590 let backfill_order_strategy = handle_args.with_options.backfill_order_strategy();
591
592 let SinkPlanContext {
593 query,
594 sink_plan: plan,
595 sink_catalog: sink,
596 target_table_catalog,
597 dependencies,
598 } = gen_sink_plan(handle_args, stmt, None, is_iceberg_engine_internal).await?;
599
600 let has_order_by = !query.order_by.is_empty();
601 if has_order_by {
602 plan.ctx().warn_to_user(
603 r#"The ORDER BY clause in the CREATE SINK statement has no effect at all."#
604 .to_owned(),
605 );
606 }
607
608 let backfill_order =
609 plan_backfill_order(session.as_ref(), backfill_order_strategy, plan.clone())?;
610
611 let graph =
612 build_graph_with_strategy(plan, Some(GraphJobType::Sink), Some(backfill_order))?;
613
614 (sink, graph, target_table_catalog, dependencies)
615 };
616
617 if let Some(table_catalog) = target_table_catalog {
618 sink.original_target_columns = table_catalog.columns_without_rw_timestamp();
619 }
620
621 let _job_guard =
622 session
623 .env()
624 .creating_streaming_job_tracker()
625 .guard(CreatingStreamingJobInfo::new(
626 session.session_id(),
627 sink.database_id,
628 sink.schema_id,
629 sink.name.clone(),
630 ));
631
632 let catalog_writer = session.catalog_writer()?;
633 execute_with_long_running_notification(
634 catalog_writer.create_sink(sink.to_proto(), graph, dependencies, if_not_exists),
635 &session,
636 "CREATE SINK",
637 LongRunningNotificationAction::MonitorBackfillJob,
638 )
639 .await?;
640
641 Ok(PgResponse::empty_result(StatementType::CREATE_SINK))
642}
643
644pub fn fetch_incoming_sinks(
645 session: &Arc<SessionImpl>,
646 table: &TableCatalog,
647) -> Result<Vec<Arc<SinkCatalog>>> {
648 let reader = session.env().catalog_reader().read_guard();
649 let schema = reader.get_schema_by_id(table.database_id, table.schema_id)?;
650 let Some(incoming_sinks) = schema.table_incoming_sinks(table.id) else {
651 return Ok(vec![]);
652 };
653 let mut sinks = vec![];
654 for sink_id in incoming_sinks {
655 sinks.push(
656 schema
657 .get_sink_by_id(*sink_id)
658 .expect("should exist")
659 .clone(),
660 );
661 }
662 Ok(sinks)
663}
664
665fn derive_sink_to_table_expr(
666 sink_schema: &Schema,
667 idx: usize,
668 target_type: &DataType,
669) -> Result<ExprImpl> {
670 let input_type = &sink_schema.fields()[idx].data_type;
671
672 if !target_type.equals_datatype(input_type) {
673 bail!(
674 "column type mismatch: {:?} vs {:?}, column name: {:?}",
675 target_type,
676 input_type,
677 sink_schema.fields()[idx].name
678 );
679 } else {
680 Ok(ExprImpl::InputRef(Box::new(InputRef::new(
681 idx,
682 input_type.clone(),
683 ))))
684 }
685}
686
687pub(crate) fn derive_default_column_project_for_sink(
688 sink: &SinkCatalog,
689 sink_schema: &Schema,
690 columns: &[ColumnCatalog],
691 user_specified_columns: bool,
692) -> Result<Vec<ExprImpl>> {
693 assert_eq!(sink.full_schema().len(), sink_schema.len());
694
695 let default_column_exprs = TableCatalog::default_column_exprs(columns);
696
697 let mut exprs = vec![];
698
699 let sink_visible_col_idxes = sink
700 .full_columns()
701 .iter()
702 .positions(|c| !c.is_hidden())
703 .collect_vec();
704 let sink_visible_col_idxes_by_name = sink
705 .full_columns()
706 .iter()
707 .enumerate()
708 .filter(|(_, c)| !c.is_hidden())
709 .map(|(i, c)| (c.name(), i))
710 .collect::<BTreeMap<_, _>>();
711
712 for (idx, column) in columns.iter().enumerate() {
713 if !column.can_dml() {
714 continue;
715 }
716
717 let default_col_expr =
718 || -> ExprImpl { rewrite_now_to_proctime(default_column_exprs[idx].clone()) };
719
720 let sink_col_expr = |sink_col_idx: usize| -> Result<ExprImpl> {
721 derive_sink_to_table_expr(sink_schema, sink_col_idx, column.data_type())
722 };
723
724 #[allow(clippy::collapsible_else_if)]
728 if user_specified_columns {
729 if let Some(idx) = sink_visible_col_idxes_by_name.get(column.name()) {
730 exprs.push(sink_col_expr(*idx)?);
731 } else {
732 exprs.push(default_col_expr());
733 }
734 } else {
735 if idx < sink_visible_col_idxes.len() {
736 exprs.push(sink_col_expr(sink_visible_col_idxes[idx])?);
737 } else {
738 exprs.push(default_col_expr());
739 };
740 }
741 }
742 Ok(exprs)
743}
744
745fn bind_sink_format_desc(
749 session: &SessionImpl,
750 value: FormatEncodeOptions,
751) -> Result<SinkFormatDesc> {
752 use risingwave_connector::sink::catalog::{SinkEncode, SinkFormat};
753 use risingwave_connector::sink::encoder::TimestamptzHandlingMode;
754 use risingwave_sqlparser::ast::{Encode as E, Format as F};
755
756 let format = match value.format {
757 F::Plain => SinkFormat::AppendOnly,
758 F::Upsert => SinkFormat::Upsert,
759 F::Debezium => SinkFormat::Debezium,
760 f @ (F::Native | F::DebeziumMongo | F::Maxwell | F::Canal | F::None) => {
761 return Err(ErrorCode::BindError(format!("sink format unsupported: {f}")).into());
762 }
763 };
764 let encode = match value.row_encode {
765 E::Json => SinkEncode::Json,
766 E::Protobuf => SinkEncode::Protobuf,
767 E::Avro => SinkEncode::Avro,
768 E::Template => SinkEncode::Template,
769 E::Parquet => SinkEncode::Parquet,
770 E::Bytes => SinkEncode::Bytes,
771 e @ (E::Native | E::Csv | E::None | E::Text) => {
772 return Err(ErrorCode::BindError(format!("sink encode unsupported: {e}")).into());
773 }
774 };
775
776 let mut key_encode = None;
777 if let Some(encode) = value.key_encode {
778 match encode {
779 E::Text => key_encode = Some(SinkEncode::Text),
780 E::Bytes => key_encode = Some(SinkEncode::Bytes),
781 _ => {
782 return Err(ErrorCode::BindError(format!(
783 "sink key encode unsupported: {encode}, only TEXT and BYTES supported"
784 ))
785 .into());
786 }
787 }
788 }
789
790 let (props, connection_type_flag, schema_registry_conn_ref) =
791 resolve_connection_ref_and_secret_ref(
792 WithOptions::try_from(value.row_options.as_slice())?,
793 session,
794 Some(TelemetryDatabaseObject::Sink),
795 )?;
796 ensure_connection_type_allowed(
797 connection_type_flag,
798 &SINK_ALLOWED_CONNECTION_SCHEMA_REGISTRY,
799 )?;
800 let (mut options, secret_refs) = props.into_parts();
801
802 options
803 .entry(TimestamptzHandlingMode::OPTION_KEY.to_owned())
804 .or_insert(TimestamptzHandlingMode::FRONTEND_DEFAULT.to_owned());
805
806 Ok(SinkFormatDesc {
807 format,
808 encode,
809 options,
810 secret_refs,
811 key_encode,
812 connection_id: schema_registry_conn_ref,
813 })
814}
815
816static CONNECTORS_COMPATIBLE_FORMATS: LazyLock<HashMap<String, HashMap<Format, Vec<Encode>>>> =
817 LazyLock::new(|| {
818 use risingwave_connector::sink::Sink as _;
819 use risingwave_connector::sink::file_sink::azblob::AzblobSink;
820 use risingwave_connector::sink::file_sink::fs::FsSink;
821 use risingwave_connector::sink::file_sink::gcs::GcsSink;
822 use risingwave_connector::sink::file_sink::opendal_sink::FileSink;
823 use risingwave_connector::sink::file_sink::s3::S3Sink;
824 use risingwave_connector::sink::file_sink::webhdfs::WebhdfsSink;
825 use risingwave_connector::sink::google_pubsub::GooglePubSubSink;
826 use risingwave_connector::sink::kafka::KafkaSink;
827 use risingwave_connector::sink::kinesis::KinesisSink;
828 use risingwave_connector::sink::mqtt::MqttSink;
829 use risingwave_connector::sink::pulsar::PulsarSink;
830 use risingwave_connector::sink::redis::RedisSink;
831
832 convert_args!(hashmap!(
833 GooglePubSubSink::SINK_NAME => hashmap!(
834 Format::Plain => vec![Encode::Json],
835 ),
836 KafkaSink::SINK_NAME => hashmap!(
837 Format::Plain => vec![Encode::Json, Encode::Avro, Encode::Protobuf, Encode::Bytes],
838 Format::Upsert => vec![Encode::Json, Encode::Avro, Encode::Protobuf],
839 Format::Debezium => vec![Encode::Json],
840 ),
841 FileSink::<S3Sink>::SINK_NAME => hashmap!(
842 Format::Plain => vec![Encode::Parquet, Encode::Json],
843 ),
844 FileSink::<SnowflakeSink>::SINK_NAME => hashmap!(
845 Format::Plain => vec![Encode::Parquet, Encode::Json],
846 ),
847 FileSink::<GcsSink>::SINK_NAME => hashmap!(
848 Format::Plain => vec![Encode::Parquet, Encode::Json],
849 ),
850 FileSink::<AzblobSink>::SINK_NAME => hashmap!(
851 Format::Plain => vec![Encode::Parquet, Encode::Json],
852 ),
853 FileSink::<WebhdfsSink>::SINK_NAME => hashmap!(
854 Format::Plain => vec![Encode::Parquet, Encode::Json],
855 ),
856 FileSink::<FsSink>::SINK_NAME => hashmap!(
857 Format::Plain => vec![Encode::Parquet, Encode::Json],
858 ),
859 KinesisSink::SINK_NAME => hashmap!(
860 Format::Plain => vec![Encode::Json],
861 Format::Upsert => vec![Encode::Json],
862 Format::Debezium => vec![Encode::Json],
863 ),
864 MqttSink::SINK_NAME => hashmap!(
865 Format::Plain => vec![Encode::Json, Encode::Protobuf],
866 ),
867 PulsarSink::SINK_NAME => hashmap!(
868 Format::Plain => vec![Encode::Json],
869 Format::Upsert => vec![Encode::Json],
870 Format::Debezium => vec![Encode::Json],
871 ),
872 RedisSink::SINK_NAME => hashmap!(
873 Format::Plain => vec![Encode::Json, Encode::Template],
874 Format::Upsert => vec![Encode::Json, Encode::Template],
875 ),
876 ))
877 });
878
879pub fn validate_compatibility(connector: &str, format_desc: &FormatEncodeOptions) -> Result<()> {
880 let compatible_formats = CONNECTORS_COMPATIBLE_FORMATS
881 .get(connector)
882 .ok_or_else(|| {
883 ErrorCode::BindError(format!(
884 "connector {} is not supported by FORMAT ... ENCODE ... syntax",
885 connector
886 ))
887 })?;
888 let compatible_encodes = compatible_formats.get(&format_desc.format).ok_or_else(|| {
889 ErrorCode::BindError(format!(
890 "connector {} does not support format {:?}",
891 connector, format_desc.format
892 ))
893 })?;
894 if !compatible_encodes.contains(&format_desc.row_encode) {
895 return Err(ErrorCode::BindError(format!(
896 "connector {} does not support format {:?} with encode {:?}",
897 connector, format_desc.format, format_desc.row_encode
898 ))
899 .into());
900 }
901
902 if let Some(encode) = &format_desc.key_encode
904 && connector != KAFKA_SINK
905 && matches!(encode, Encode::Bytes)
906 {
907 return Err(ErrorCode::BindError(format!(
908 "key encode bytes only works with kafka connector, but found {}",
909 connector
910 ))
911 .into());
912 }
913
914 Ok(())
915}
916
917#[cfg(test)]
918pub mod tests {
919 use risingwave_common::catalog::{DEFAULT_DATABASE_NAME, DEFAULT_SCHEMA_NAME};
920
921 use crate::catalog::root_catalog::SchemaPath;
922 use crate::test_utils::{LocalFrontend, PROTO_FILE_DATA, create_proto_file};
923
924 #[tokio::test]
925 async fn test_create_sink_handler() {
926 let proto_file = create_proto_file(PROTO_FILE_DATA);
927 let sql = format!(
928 r#"CREATE SOURCE t1
929 WITH (connector = 'kafka', kafka.topic = 'abc', kafka.brokers = 'localhost:1001')
930 FORMAT PLAIN ENCODE PROTOBUF (message = '.test.TestRecord', schema.location = 'file://{}')"#,
931 proto_file.path().to_str().unwrap()
932 );
933 let frontend = LocalFrontend::new(Default::default()).await;
934 frontend.run_sql(sql).await.unwrap();
935
936 let sql = "create materialized view mv1 as select t1.country from t1;";
937 frontend.run_sql(sql).await.unwrap();
938
939 let sql = r#"CREATE SINK snk1 FROM mv1
940 WITH (connector = 'jdbc', mysql.endpoint = '127.0.0.1:3306', mysql.table =
941 '<table_name>', mysql.database = '<database_name>', mysql.user = '<user_name>',
942 mysql.password = '<password>', type = 'append-only', force_append_only = 'true');"#.to_owned();
943 frontend.run_sql(sql).await.unwrap();
944
945 let session = frontend.session_ref();
946 let catalog_reader = session.env().catalog_reader().read_guard();
947 let schema_path = SchemaPath::Name(DEFAULT_SCHEMA_NAME);
948
949 let (source, _) = catalog_reader
951 .get_source_by_name(DEFAULT_DATABASE_NAME, schema_path, "t1")
952 .unwrap();
953 assert_eq!(source.name, "t1");
954
955 let (table, schema_name) = catalog_reader
957 .get_created_table_by_name(DEFAULT_DATABASE_NAME, schema_path, "mv1")
958 .unwrap();
959 assert_eq!(table.name(), "mv1");
960
961 let (sink, _) = catalog_reader
963 .get_created_sink_by_name(DEFAULT_DATABASE_NAME, SchemaPath::Name(schema_name), "snk1")
964 .unwrap();
965 assert_eq!(sink.name, "snk1");
966 }
967}