1use std::cmp::Ordering;
16use std::collections::{HashMap, HashSet};
17use std::num::NonZeroUsize;
18use std::sync::Arc;
19use std::sync::atomic::AtomicU64;
20use std::time::Duration;
21
22use anyhow::{Context, anyhow};
23use await_tree::InstrumentAwait;
24use either::Either;
25use itertools::Itertools;
26use risingwave_common::catalog::{
27 AlterDatabaseParam, ColumnCatalog, ColumnId, Field, FragmentTypeFlag,
28};
29use risingwave_common::config::DefaultParallelism;
30use risingwave_common::hash::VnodeCountCompat;
31use risingwave_common::id::{JobId, TableId};
32use risingwave_common::secret::{LocalSecretManager, SecretEncryption};
33use risingwave_common::system_param::reader::SystemParamsRead;
34use risingwave_common::util::stream_graph_visitor::visit_stream_node_cont_mut;
35use risingwave_common::{bail, bail_not_implemented};
36use risingwave_connector::WithOptionsSecResolved;
37use risingwave_connector::connector_common::validate_connection;
38use risingwave_connector::sink::SinkParam;
39use risingwave_connector::sink::iceberg::IcebergSink;
40use risingwave_connector::source::cdc::CdcScanOptions;
41use risingwave_connector::source::{
42 ConnectorProperties, SourceEnumeratorContext, UPSTREAM_SOURCE_KEY,
43};
44use risingwave_meta_model::object::ObjectType;
45use risingwave_meta_model::{
46 ConnectionId, DatabaseId, DispatcherType, FragmentId, FunctionId, IndexId, JobStatus, ObjectId,
47 SchemaId, SecretId, SinkId, SourceId, StreamingParallelism, SubscriptionId, UserId, ViewId,
48 streaming_job,
49};
50use risingwave_pb::catalog::{
51 Comment, Connection, CreateType, Database, Function, PbTable, Schema, Secret, Source,
52 Subscription, Table, View,
53};
54use risingwave_pb::ddl_service::alter_owner_request::Object;
55use risingwave_pb::ddl_service::{
56 DdlProgress, TableJobType, WaitVersion, alter_name_request, alter_set_schema_request,
57 alter_swap_rename_request, streaming_job_resource_type,
58};
59use risingwave_pb::meta::table_fragments::fragment::FragmentDistributionType as PbFragmentDistributionType;
60use risingwave_pb::plan_common::PbColumnCatalog;
61use risingwave_pb::stream_plan::stream_node::NodeBody;
62use risingwave_pb::stream_plan::{
63 PbDispatchOutputMapping, PbStreamFragmentGraph, PbStreamNode, PbUpstreamSinkInfo,
64 StreamFragmentGraph as StreamFragmentGraphProto,
65};
66use risingwave_pb::telemetry::{PbTelemetryDatabaseObject, PbTelemetryEventStage};
67use strum::Display;
68use thiserror_ext::AsReport;
69use tokio::sync::{OwnedSemaphorePermit, Semaphore};
70use tokio::time::sleep;
71use tracing::Instrument;
72
73use crate::barrier::{BarrierManagerRef, Command};
74use crate::controller::catalog::{DropTableConnectorContext, ReleaseContext};
75use crate::controller::cluster::StreamingClusterInfo;
76use crate::controller::streaming_job::{FinishAutoRefreshSchemaSinkContext, SinkIntoTableContext};
77use crate::controller::utils::build_select_node_list;
78use crate::error::{MetaErrorInner, bail_invalid_parameter, bail_unavailable};
79use crate::manager::iceberg_compaction::IcebergCompactionManagerRef;
80use crate::manager::sink_coordination::SinkCoordinatorManager;
81use crate::manager::{
82 IGNORED_NOTIFICATION_VERSION, LocalNotification, MetaSrvEnv, MetadataManager,
83 NotificationVersion, StreamingJob, StreamingJobType,
84};
85use crate::model::{
86 DownstreamFragmentRelation, FragmentDownstreamRelation, FragmentId as CatalogFragmentId,
87 StreamContext, StreamJobFragments, StreamJobFragmentsToCreate, TableParallelism,
88};
89use crate::stream::cdc::{
90 parallel_cdc_table_backfill_fragment, try_init_parallel_cdc_table_snapshot_splits,
91};
92use crate::stream::{
93 ActorGraphBuildResult, ActorGraphBuilder, AutoRefreshSchemaSinkContext,
94 CompleteStreamFragmentGraph, CreateStreamingJobContext, CreateStreamingJobOption,
95 FragmentGraphDownstreamContext, FragmentGraphUpstreamContext, GlobalStreamManagerRef,
96 ReplaceStreamJobContext, ReschedulePolicy, SourceChange, SourceManagerRef, StreamFragmentGraph,
97 UpstreamSinkInfo, check_sink_fragments_support_refresh_schema, create_source_worker,
98 rewrite_refresh_schema_sink_fragment, state_match, validate_sink,
99};
100use crate::telemetry::report_event;
101use crate::{MetaError, MetaResult};
102
103#[derive(PartialEq)]
104pub enum DropMode {
105 Restrict,
106 Cascade,
107}
108
109impl DropMode {
110 pub fn from_request_setting(cascade: bool) -> DropMode {
111 if cascade {
112 DropMode::Cascade
113 } else {
114 DropMode::Restrict
115 }
116 }
117}
118
119#[derive(strum::AsRefStr)]
120pub enum StreamingJobId {
121 MaterializedView(TableId),
122 Sink(SinkId),
123 Table(Option<SourceId>, TableId),
124 Index(IndexId),
125}
126
127impl std::fmt::Display for StreamingJobId {
128 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
129 write!(f, "{}", self.as_ref())?;
130 write!(f, "({})", self.id())
131 }
132}
133
134impl StreamingJobId {
135 fn id(&self) -> JobId {
136 match self {
137 StreamingJobId::MaterializedView(id) | StreamingJobId::Table(_, id) => id.as_job_id(),
138 StreamingJobId::Index(id) => id.as_job_id(),
139 StreamingJobId::Sink(id) => id.as_job_id(),
140 }
141 }
142}
143
144pub struct ReplaceStreamJobInfo {
147 pub streaming_job: StreamingJob,
148 pub fragment_graph: StreamFragmentGraphProto,
149}
150
151#[derive(Display)]
152pub enum DdlCommand {
153 CreateDatabase(Database),
154 DropDatabase(DatabaseId),
155 CreateSchema(Schema),
156 DropSchema(SchemaId, DropMode),
157 CreateNonSharedSource(Source),
158 DropSource(SourceId, DropMode),
159 ResetSource(SourceId),
160 CreateFunction(Function),
161 DropFunction(FunctionId, DropMode),
162 CreateView(View, HashSet<ObjectId>),
163 DropView(ViewId, DropMode),
164 CreateStreamingJob {
165 stream_job: StreamingJob,
166 fragment_graph: StreamFragmentGraphProto,
167 dependencies: HashSet<ObjectId>,
168 resource_type: streaming_job_resource_type::ResourceType,
169 if_not_exists: bool,
170 },
171 DropStreamingJob {
172 job_id: StreamingJobId,
173 drop_mode: DropMode,
174 },
175 AlterName(alter_name_request::Object, String),
176 AlterSwapRename(alter_swap_rename_request::Object),
177 ReplaceStreamJob(ReplaceStreamJobInfo),
178 AlterNonSharedSource(Source),
179 AlterObjectOwner(Object, UserId),
180 AlterSetSchema(alter_set_schema_request::Object, SchemaId),
181 CreateConnection(Connection),
182 DropConnection(ConnectionId, DropMode),
183 CreateSecret(Secret),
184 AlterSecret(Secret),
185 DropSecret(SecretId, DropMode),
186 CommentOn(Comment),
187 CreateSubscription(Subscription),
188 DropSubscription(SubscriptionId, DropMode),
189 AlterSubscriptionRetention {
190 subscription_id: SubscriptionId,
191 retention_seconds: u64,
192 definition: String,
193 },
194 AlterDatabaseParam(DatabaseId, AlterDatabaseParam),
195 AlterStreamingJobConfig(JobId, HashMap<String, String>, Vec<String>),
196}
197
198impl DdlCommand {
199 fn object(&self) -> Either<String, ObjectId> {
201 use Either::*;
202 match self {
203 DdlCommand::CreateDatabase(database) => Left(database.name.clone()),
204 DdlCommand::DropDatabase(id) => Right(id.as_object_id()),
205 DdlCommand::CreateSchema(schema) => Left(schema.name.clone()),
206 DdlCommand::DropSchema(id, _) => Right(id.as_object_id()),
207 DdlCommand::CreateNonSharedSource(source) => Left(source.name.clone()),
208 DdlCommand::DropSource(id, _) => Right(id.as_object_id()),
209 DdlCommand::ResetSource(id) => Right(id.as_object_id()),
210 DdlCommand::CreateFunction(function) => Left(function.name.clone()),
211 DdlCommand::DropFunction(id, _) => Right(id.as_object_id()),
212 DdlCommand::CreateView(view, _) => Left(view.name.clone()),
213 DdlCommand::DropView(id, _) => Right(id.as_object_id()),
214 DdlCommand::CreateStreamingJob { stream_job, .. } => Left(stream_job.name()),
215 DdlCommand::DropStreamingJob { job_id, .. } => Right(job_id.id().as_object_id()),
216 DdlCommand::AlterName(object, _) => Left(format!("{object:?}")),
217 DdlCommand::AlterSwapRename(object) => Left(format!("{object:?}")),
218 DdlCommand::ReplaceStreamJob(info) => Left(info.streaming_job.name()),
219 DdlCommand::AlterNonSharedSource(source) => Left(source.name.clone()),
220 DdlCommand::AlterObjectOwner(object, _) => Left(format!("{object:?}")),
221 DdlCommand::AlterSetSchema(object, _) => Left(format!("{object:?}")),
222 DdlCommand::CreateConnection(connection) => Left(connection.name.clone()),
223 DdlCommand::DropConnection(id, _) => Right(id.as_object_id()),
224 DdlCommand::CreateSecret(secret) => Left(secret.name.clone()),
225 DdlCommand::AlterSecret(secret) => Left(secret.name.clone()),
226 DdlCommand::DropSecret(id, _) => Right(id.as_object_id()),
227 DdlCommand::CommentOn(comment) => Right(comment.table_id.into()),
228 DdlCommand::CreateSubscription(subscription) => Left(subscription.name.clone()),
229 DdlCommand::DropSubscription(id, _) => Right(id.as_object_id()),
230 DdlCommand::AlterSubscriptionRetention {
231 subscription_id, ..
232 } => Right(subscription_id.as_object_id()),
233 DdlCommand::AlterDatabaseParam(id, _) => Right(id.as_object_id()),
234 DdlCommand::AlterStreamingJobConfig(job_id, _, _) => Right(job_id.as_object_id()),
235 }
236 }
237
238 fn allow_in_recovery(&self) -> bool {
239 match self {
240 DdlCommand::DropDatabase(_)
241 | DdlCommand::DropSchema(_, _)
242 | DdlCommand::DropSource(_, _)
243 | DdlCommand::DropFunction(_, _)
244 | DdlCommand::DropView(_, _)
245 | DdlCommand::DropStreamingJob { .. }
246 | DdlCommand::DropConnection(_, _)
247 | DdlCommand::DropSecret(_, _)
248 | DdlCommand::DropSubscription(_, _)
249 | DdlCommand::AlterName(_, _)
250 | DdlCommand::AlterObjectOwner(_, _)
251 | DdlCommand::AlterSetSchema(_, _)
252 | DdlCommand::CreateDatabase(_)
253 | DdlCommand::CreateSchema(_)
254 | DdlCommand::CreateFunction(_)
255 | DdlCommand::CreateView(_, _)
256 | DdlCommand::CreateConnection(_)
257 | DdlCommand::CommentOn(_)
258 | DdlCommand::CreateSecret(_)
259 | DdlCommand::AlterSecret(_)
260 | DdlCommand::AlterSwapRename(_)
261 | DdlCommand::AlterDatabaseParam(_, _)
262 | DdlCommand::AlterStreamingJobConfig(_, _, _)
263 | DdlCommand::AlterSubscriptionRetention { .. } => true,
264 DdlCommand::CreateStreamingJob { .. }
265 | DdlCommand::CreateNonSharedSource(_)
266 | DdlCommand::ReplaceStreamJob(_)
267 | DdlCommand::AlterNonSharedSource(_)
268 | DdlCommand::ResetSource(_)
269 | DdlCommand::CreateSubscription(_) => false,
270 }
271 }
272}
273
274#[derive(Clone)]
275pub struct DdlController {
276 pub(crate) env: MetaSrvEnv,
277
278 pub(crate) metadata_manager: MetadataManager,
279 pub(crate) stream_manager: GlobalStreamManagerRef,
280 pub(crate) source_manager: SourceManagerRef,
281 barrier_manager: BarrierManagerRef,
282 sink_manager: SinkCoordinatorManager,
283 iceberg_compaction_manager: IcebergCompactionManagerRef,
284
285 pub(crate) creating_streaming_job_permits: Arc<CreatingStreamingJobPermit>,
287
288 seq: Arc<AtomicU64>,
290}
291
292#[derive(Clone)]
293pub struct CreatingStreamingJobPermit {
294 pub(crate) semaphore: Arc<Semaphore>,
295}
296
297impl CreatingStreamingJobPermit {
298 async fn new(env: &MetaSrvEnv) -> Self {
299 let mut permits = env
300 .system_params_reader()
301 .await
302 .max_concurrent_creating_streaming_jobs() as usize;
303 if permits == 0 {
304 permits = Semaphore::MAX_PERMITS;
306 }
307 let semaphore = Arc::new(Semaphore::new(permits));
308
309 let (local_notification_tx, mut local_notification_rx) =
310 tokio::sync::mpsc::unbounded_channel();
311 env.notification_manager()
312 .insert_local_sender(local_notification_tx);
313 let semaphore_clone = semaphore.clone();
314 tokio::spawn(async move {
315 while let Some(notification) = local_notification_rx.recv().await {
316 let LocalNotification::SystemParamsChange(p) = ¬ification else {
317 continue;
318 };
319 let mut new_permits = p.max_concurrent_creating_streaming_jobs() as usize;
320 if new_permits == 0 {
321 new_permits = Semaphore::MAX_PERMITS;
322 }
323 match permits.cmp(&new_permits) {
324 Ordering::Less => {
325 semaphore_clone.add_permits(new_permits - permits);
326 }
327 Ordering::Equal => continue,
328 Ordering::Greater => {
329 let to_release = permits - new_permits;
330 let reduced = semaphore_clone.forget_permits(to_release);
331 if reduced != to_release {
333 tracing::warn!(
334 "no enough permits to release, expected {}, but reduced {}",
335 to_release,
336 reduced
337 );
338 }
339 }
340 }
341 tracing::info!(
342 "max_concurrent_creating_streaming_jobs changed from {} to {}",
343 permits,
344 new_permits
345 );
346 permits = new_permits;
347 }
348 });
349
350 Self { semaphore }
351 }
352}
353
354impl DdlController {
355 pub async fn new(
356 env: MetaSrvEnv,
357 metadata_manager: MetadataManager,
358 stream_manager: GlobalStreamManagerRef,
359 source_manager: SourceManagerRef,
360 barrier_manager: BarrierManagerRef,
361 sink_manager: SinkCoordinatorManager,
362 iceberg_compaction_manager: IcebergCompactionManagerRef,
363 ) -> Self {
364 let creating_streaming_job_permits = Arc::new(CreatingStreamingJobPermit::new(&env).await);
365 Self {
366 env,
367 metadata_manager,
368 stream_manager,
369 source_manager,
370 barrier_manager,
371 sink_manager,
372 iceberg_compaction_manager,
373 creating_streaming_job_permits,
374 seq: Arc::new(AtomicU64::new(0)),
375 }
376 }
377
378 pub fn next_seq(&self) -> u64 {
380 self.seq.fetch_add(1, std::sync::atomic::Ordering::Relaxed)
382 }
383
384 #[allow(clippy::large_stack_frames)]
391 pub async fn run_command(&self, command: DdlCommand) -> MetaResult<Option<WaitVersion>> {
392 if !command.allow_in_recovery() {
393 self.barrier_manager.check_status_running()?;
394 }
395
396 let await_tree_key = format!("DDL Command {}", self.next_seq());
397 let await_tree_span = await_tree::span!("{command}({})", command.object());
398
399 let ctrl = self.clone();
400 let fut = Box::pin(async move {
401 match command {
402 DdlCommand::CreateDatabase(database) => ctrl.create_database(database).await,
403 DdlCommand::DropDatabase(database_id) => ctrl.drop_database(database_id).await,
404 DdlCommand::CreateSchema(schema) => ctrl.create_schema(schema).await,
405 DdlCommand::DropSchema(schema_id, drop_mode) => {
406 ctrl.drop_schema(schema_id, drop_mode).await
407 }
408 DdlCommand::CreateNonSharedSource(source) => {
409 ctrl.create_non_shared_source(source).await
410 }
411 DdlCommand::DropSource(source_id, drop_mode) => {
412 ctrl.drop_source(source_id, drop_mode).await
413 }
414 DdlCommand::ResetSource(source_id) => ctrl.reset_source(source_id).await,
415 DdlCommand::CreateFunction(function) => ctrl.create_function(function).await,
416 DdlCommand::DropFunction(function_id, drop_mode) => {
417 ctrl.drop_function(function_id, drop_mode).await
418 }
419 DdlCommand::CreateView(view, dependencies) => {
420 ctrl.create_view(view, dependencies).await
421 }
422 DdlCommand::DropView(view_id, drop_mode) => {
423 ctrl.drop_view(view_id, drop_mode).await
424 }
425 DdlCommand::CreateStreamingJob {
426 stream_job,
427 fragment_graph,
428 dependencies,
429 resource_type,
430 if_not_exists,
431 } => {
432 ctrl.create_streaming_job(
433 stream_job,
434 fragment_graph,
435 dependencies,
436 resource_type,
437 if_not_exists,
438 )
439 .await
440 }
441 DdlCommand::DropStreamingJob { job_id, drop_mode } => {
442 ctrl.drop_streaming_job(job_id, drop_mode).await
443 }
444 DdlCommand::ReplaceStreamJob(ReplaceStreamJobInfo {
445 streaming_job,
446 fragment_graph,
447 }) => ctrl.replace_job(streaming_job, fragment_graph).await,
448 DdlCommand::AlterName(relation, name) => ctrl.alter_name(relation, &name).await,
449 DdlCommand::AlterObjectOwner(object, owner_id) => {
450 ctrl.alter_owner(object, owner_id).await
451 }
452 DdlCommand::AlterSetSchema(object, new_schema_id) => {
453 ctrl.alter_set_schema(object, new_schema_id).await
454 }
455 DdlCommand::CreateConnection(connection) => {
456 ctrl.create_connection(connection).await
457 }
458 DdlCommand::DropConnection(connection_id, drop_mode) => {
459 ctrl.drop_connection(connection_id, drop_mode).await
460 }
461 DdlCommand::CreateSecret(secret) => ctrl.create_secret(secret).await,
462 DdlCommand::DropSecret(secret_id, drop_mode) => {
463 ctrl.drop_secret(secret_id, drop_mode).await
464 }
465 DdlCommand::AlterSecret(secret) => ctrl.alter_secret(secret).await,
466 DdlCommand::AlterNonSharedSource(source) => {
467 ctrl.alter_non_shared_source(source).await
468 }
469 DdlCommand::CommentOn(comment) => ctrl.comment_on(comment).await,
470 DdlCommand::CreateSubscription(subscription) => {
471 ctrl.create_subscription(subscription).await
472 }
473 DdlCommand::DropSubscription(subscription_id, drop_mode) => {
474 ctrl.drop_subscription(subscription_id, drop_mode).await
475 }
476 DdlCommand::AlterSubscriptionRetention {
477 subscription_id,
478 retention_seconds,
479 definition,
480 } => {
481 ctrl.alter_subscription_retention(
482 subscription_id,
483 retention_seconds,
484 definition,
485 )
486 .await
487 }
488 DdlCommand::AlterSwapRename(objects) => ctrl.alter_swap_rename(objects).await,
489 DdlCommand::AlterDatabaseParam(database_id, param) => {
490 ctrl.alter_database_param(database_id, param).await
491 }
492 DdlCommand::AlterStreamingJobConfig(job_id, entries_to_add, keys_to_remove) => {
493 ctrl.alter_streaming_job_config(job_id, entries_to_add, keys_to_remove)
494 .await
495 }
496 }
497 })
498 .in_current_span();
499 let fut = (self.env.await_tree_reg())
500 .register(await_tree_key, await_tree_span)
501 .instrument(Box::pin(fut));
502 let notification_version = tokio::spawn(fut).await.map_err(|e| anyhow!(e))??;
503 Ok(Some(WaitVersion {
504 catalog_version: notification_version,
505 hummock_version_id: self.barrier_manager.get_hummock_version_id().await,
506 }))
507 }
508
509 pub async fn get_ddl_progress(&self) -> MetaResult<Vec<DdlProgress>> {
510 self.barrier_manager.get_ddl_progress().await
511 }
512
513 async fn create_database(&self, database: Database) -> MetaResult<NotificationVersion> {
514 let (version, updated_db) = self
515 .metadata_manager
516 .catalog_controller
517 .create_database(database)
518 .await?;
519 self.barrier_manager
521 .update_database_barrier(
522 updated_db.database_id,
523 updated_db.barrier_interval_ms.map(|v| v as u32),
524 updated_db.checkpoint_frequency.map(|v| v as u64),
525 )
526 .await?;
527 Ok(version)
528 }
529
530 #[tracing::instrument(skip(self), level = "debug")]
531 pub async fn reschedule_streaming_job(
532 &self,
533 job_id: JobId,
534 target: ReschedulePolicy,
535 mut deferred: bool,
536 ) -> MetaResult<()> {
537 tracing::info!("altering parallelism for job {}", job_id);
538 if self.barrier_manager.check_status_running().is_err() {
539 tracing::info!(
540 "alter parallelism is set to deferred mode because the system is in recovery state"
541 );
542 deferred = true;
543 }
544
545 self.stream_manager
546 .reschedule_streaming_job(job_id, target, deferred)
547 .await
548 }
549
550 pub async fn reschedule_streaming_job_backfill_parallelism(
551 &self,
552 job_id: JobId,
553 parallelism: Option<StreamingParallelism>,
554 mut deferred: bool,
555 ) -> MetaResult<()> {
556 tracing::info!("altering backfill parallelism for job {}", job_id);
557 if self.barrier_manager.check_status_running().is_err() {
558 tracing::info!(
559 "alter backfill parallelism is set to deferred mode because the system is in recovery state"
560 );
561 deferred = true;
562 }
563
564 self.stream_manager
565 .reschedule_streaming_job_backfill_parallelism(job_id, parallelism, deferred)
566 .await
567 }
568
569 pub async fn reschedule_cdc_table_backfill(
570 &self,
571 job_id: JobId,
572 target: ReschedulePolicy,
573 ) -> MetaResult<()> {
574 tracing::info!("alter CDC table backfill parallelism");
575 if self.barrier_manager.check_status_running().is_err() {
576 return Err(anyhow::anyhow!("CDC table backfill reschedule is unavailable because the system is in recovery state").into());
577 }
578 self.stream_manager
579 .reschedule_cdc_table_backfill(job_id, target)
580 .await
581 }
582
583 pub async fn reschedule_fragments(
584 &self,
585 fragment_targets: HashMap<FragmentId, Option<StreamingParallelism>>,
586 ) -> MetaResult<()> {
587 tracing::info!(
588 "altering parallelism for fragments {:?}",
589 fragment_targets.keys()
590 );
591 let fragment_targets = fragment_targets
592 .into_iter()
593 .map(|(fragment_id, parallelism)| (fragment_id as CatalogFragmentId, parallelism))
594 .collect();
595
596 self.stream_manager
597 .reschedule_fragments(fragment_targets)
598 .await
599 }
600
601 async fn drop_database(&self, database_id: DatabaseId) -> MetaResult<NotificationVersion> {
602 self.drop_object(ObjectType::Database, database_id, DropMode::Cascade)
603 .await
604 }
605
606 async fn create_schema(&self, schema: Schema) -> MetaResult<NotificationVersion> {
607 self.metadata_manager
608 .catalog_controller
609 .create_schema(schema)
610 .await
611 }
612
613 async fn drop_schema(
614 &self,
615 schema_id: SchemaId,
616 drop_mode: DropMode,
617 ) -> MetaResult<NotificationVersion> {
618 self.drop_object(ObjectType::Schema, schema_id, drop_mode)
619 .await
620 }
621
622 async fn create_non_shared_source(&self, source: Source) -> MetaResult<NotificationVersion> {
624 let handle = create_source_worker(&source, self.source_manager.metrics.clone())
625 .await
626 .context("failed to create source worker")?;
627
628 let (source_id, version) = self
629 .metadata_manager
630 .catalog_controller
631 .create_source(source)
632 .await?;
633 self.source_manager
634 .register_source_with_handle(source_id, handle)
635 .await;
636 Ok(version)
637 }
638
639 async fn drop_source(
640 &self,
641 source_id: SourceId,
642 drop_mode: DropMode,
643 ) -> MetaResult<NotificationVersion> {
644 self.drop_object(ObjectType::Source, source_id, drop_mode)
645 .await
646 }
647
648 async fn reset_source(&self, source_id: SourceId) -> MetaResult<NotificationVersion> {
649 tracing::info!(source_id = %source_id, "resetting CDC source offset to latest");
650
651 let database_id = self
653 .metadata_manager
654 .catalog_controller
655 .get_object_database_id(source_id)
656 .await?;
657
658 self.stream_manager
659 .barrier_scheduler
660 .run_command(database_id, Command::ResetSource { source_id })
661 .await?;
662
663 let version = self
665 .metadata_manager
666 .catalog_controller
667 .notify_frontend_trivial()
668 .await;
669 Ok(version)
670 }
671
672 async fn alter_non_shared_source(&self, source: Source) -> MetaResult<NotificationVersion> {
675 self.metadata_manager
676 .catalog_controller
677 .alter_non_shared_source(source)
678 .await
679 }
680
681 async fn create_function(&self, function: Function) -> MetaResult<NotificationVersion> {
682 self.metadata_manager
683 .catalog_controller
684 .create_function(function)
685 .await
686 }
687
688 async fn drop_function(
689 &self,
690 function_id: FunctionId,
691 drop_mode: DropMode,
692 ) -> MetaResult<NotificationVersion> {
693 self.drop_object(ObjectType::Function, function_id, drop_mode)
694 .await
695 }
696
697 async fn create_view(
698 &self,
699 view: View,
700 dependencies: HashSet<ObjectId>,
701 ) -> MetaResult<NotificationVersion> {
702 self.metadata_manager
703 .catalog_controller
704 .create_view(view, dependencies)
705 .await
706 }
707
708 async fn drop_view(
709 &self,
710 view_id: ViewId,
711 drop_mode: DropMode,
712 ) -> MetaResult<NotificationVersion> {
713 self.drop_object(ObjectType::View, view_id, drop_mode).await
714 }
715
716 async fn create_connection(&self, connection: Connection) -> MetaResult<NotificationVersion> {
717 validate_connection(&connection).await?;
718 self.metadata_manager
719 .catalog_controller
720 .create_connection(connection)
721 .await
722 }
723
724 async fn drop_connection(
725 &self,
726 connection_id: ConnectionId,
727 drop_mode: DropMode,
728 ) -> MetaResult<NotificationVersion> {
729 self.drop_object(ObjectType::Connection, connection_id, drop_mode)
730 .await
731 }
732
733 async fn alter_database_param(
734 &self,
735 database_id: DatabaseId,
736 param: AlterDatabaseParam,
737 ) -> MetaResult<NotificationVersion> {
738 let (version, updated_db) = self
739 .metadata_manager
740 .catalog_controller
741 .alter_database_param(database_id, param)
742 .await?;
743 self.barrier_manager
745 .update_database_barrier(
746 database_id,
747 updated_db.barrier_interval_ms.map(|v| v as u32),
748 updated_db.checkpoint_frequency.map(|v| v as u64),
749 )
750 .await?;
751 Ok(version)
752 }
753
754 fn get_encrypted_payload(&self, secret: &Secret) -> MetaResult<Vec<u8>> {
757 let secret_store_private_key = self
758 .env
759 .opts
760 .secret_store_private_key
761 .clone()
762 .ok_or_else(|| anyhow!("secret_store_private_key is not configured"))?;
763
764 let encrypted_payload = SecretEncryption::encrypt(
765 secret_store_private_key.as_slice(),
766 secret.get_value().as_slice(),
767 )
768 .context(format!("failed to encrypt secret {}", secret.name))?;
769 Ok(encrypted_payload
770 .serialize()
771 .context(format!("failed to serialize secret {}", secret.name))?)
772 }
773
774 async fn create_secret(&self, mut secret: Secret) -> MetaResult<NotificationVersion> {
775 let secret_plain_payload = secret.value.clone();
778 let encrypted_payload = self.get_encrypted_payload(&secret)?;
779 secret.value = encrypted_payload;
780
781 self.metadata_manager
782 .catalog_controller
783 .create_secret(secret, secret_plain_payload)
784 .await
785 }
786
787 async fn drop_secret(
788 &self,
789 secret_id: SecretId,
790 drop_mode: DropMode,
791 ) -> MetaResult<NotificationVersion> {
792 self.drop_object(ObjectType::Secret, secret_id, drop_mode)
793 .await
794 }
795
796 async fn alter_secret(&self, mut secret: Secret) -> MetaResult<NotificationVersion> {
797 let secret_plain_payload = secret.value.clone();
798 let encrypted_payload = self.get_encrypted_payload(&secret)?;
799 secret.value = encrypted_payload;
800 self.metadata_manager
801 .catalog_controller
802 .alter_secret(secret, secret_plain_payload)
803 .await
804 }
805
806 async fn create_subscription(
807 &self,
808 mut subscription: Subscription,
809 ) -> MetaResult<NotificationVersion> {
810 tracing::debug!("create subscription");
811 let _permit = self
812 .creating_streaming_job_permits
813 .semaphore
814 .acquire()
815 .await
816 .unwrap();
817 let _reschedule_job_lock = self.stream_manager.reschedule_lock_read_guard().await;
818 self.metadata_manager
819 .catalog_controller
820 .create_subscription_catalog(&mut subscription)
821 .await?;
822 if let Err(err) = self.stream_manager.create_subscription(&subscription).await {
823 tracing::debug!(error = %err.as_report(), "failed to create subscription");
824 let _ = self
825 .metadata_manager
826 .catalog_controller
827 .try_abort_creating_subscription(subscription.id)
828 .await
829 .inspect_err(|e| {
830 tracing::error!(
831 error = %e.as_report(),
832 "failed to abort create subscription after failure"
833 );
834 });
835 return Err(err);
836 }
837
838 let version = self
839 .metadata_manager
840 .catalog_controller
841 .notify_create_subscription(subscription.id)
842 .await?;
843 tracing::debug!("finish create subscription");
844 Ok(version)
845 }
846
847 async fn drop_subscription(
848 &self,
849 subscription_id: SubscriptionId,
850 drop_mode: DropMode,
851 ) -> MetaResult<NotificationVersion> {
852 tracing::debug!("preparing drop subscription");
853 let _reschedule_job_lock = self.stream_manager.reschedule_lock_read_guard().await;
854 let subscription = self
855 .metadata_manager
856 .catalog_controller
857 .get_subscription_by_id(subscription_id)
858 .await?;
859 let table_id = subscription.dependent_table_id;
860 let database_id = subscription.database_id;
861 let (_, version) = self
862 .metadata_manager
863 .catalog_controller
864 .drop_object(ObjectType::Subscription, subscription_id, drop_mode)
865 .await?;
866 self.stream_manager
867 .drop_subscription(database_id, subscription_id, table_id)
868 .await;
869 tracing::debug!("finish drop subscription");
870 Ok(version)
871 }
872
873 async fn alter_subscription_retention(
874 &self,
875 subscription_id: SubscriptionId,
876 retention_seconds: u64,
877 definition: String,
878 ) -> MetaResult<NotificationVersion> {
879 tracing::debug!("alter subscription retention");
880 let _reschedule_job_lock = self.stream_manager.reschedule_lock_read_guard().await;
881 let (version, subscription) = self
882 .metadata_manager
883 .catalog_controller
884 .alter_subscription_retention(subscription_id, retention_seconds, definition)
885 .await?;
886 self.stream_manager
887 .alter_subscription_retention(
888 subscription.database_id,
889 subscription.id,
890 subscription.dependent_table_id,
891 subscription.retention_seconds,
892 )
893 .await?;
894 tracing::debug!("finish alter subscription retention");
895 Ok(version)
896 }
897
898 #[await_tree::instrument]
900 pub(crate) async fn validate_cdc_table(
901 &self,
902 table: &Table,
903 table_fragments: &StreamJobFragments,
904 ) -> MetaResult<()> {
905 let stream_scan_fragment = table_fragments
906 .fragments
907 .values()
908 .filter(|f| {
909 f.fragment_type_mask.contains(FragmentTypeFlag::StreamScan)
910 || f.fragment_type_mask
911 .contains(FragmentTypeFlag::StreamCdcScan)
912 })
913 .exactly_one()
914 .ok()
915 .with_context(|| {
916 format!(
917 "expect exactly one stream scan fragment, got: {:?}",
918 table_fragments.fragments
919 )
920 })?;
921 fn assert_parallelism(
922 distribution_type: PbFragmentDistributionType,
923 node_body: &Option<NodeBody>,
924 ) {
925 if let Some(NodeBody::StreamCdcScan(node)) = node_body {
926 if let Some(o) = node.options
927 && CdcScanOptions::from_proto(&o).is_parallelized_backfill()
928 {
929 } else {
931 assert_eq!(
932 distribution_type,
933 PbFragmentDistributionType::Single,
934 "Non-parallelized CDC scan fragment should have Single distribution"
935 );
936 }
937 }
938 }
939 let mut found_cdc_scan = false;
940 match &stream_scan_fragment.nodes.node_body {
941 Some(NodeBody::StreamCdcScan(_)) => {
942 assert_parallelism(
943 stream_scan_fragment.distribution_type,
944 &stream_scan_fragment.nodes.node_body,
945 );
946 if self
947 .validate_cdc_table_inner(&stream_scan_fragment.nodes.node_body, table.id)
948 .await?
949 {
950 found_cdc_scan = true;
951 }
952 }
953 Some(NodeBody::Project(_)) => {
955 for input in &stream_scan_fragment.nodes.input {
956 assert_parallelism(stream_scan_fragment.distribution_type, &input.node_body);
957 if self
958 .validate_cdc_table_inner(&input.node_body, table.id)
959 .await?
960 {
961 found_cdc_scan = true;
962 }
963 }
964 }
965 _ => {
966 bail!("Unexpected node body for stream cdc scan");
967 }
968 };
969 if !found_cdc_scan {
970 bail!("No stream cdc scan node found in stream scan fragment");
971 }
972 Ok(())
973 }
974
975 async fn validate_cdc_table_inner(
976 &self,
977 node_body: &Option<NodeBody>,
978 table_id: TableId,
979 ) -> MetaResult<bool> {
980 if let Some(NodeBody::StreamCdcScan(stream_cdc_scan)) = node_body
981 && let Some(ref cdc_table_desc) = stream_cdc_scan.cdc_table_desc
982 {
983 let options_with_secret = WithOptionsSecResolved::new(
984 cdc_table_desc.connect_properties.clone(),
985 cdc_table_desc.secret_refs.clone(),
986 );
987
988 let mut props = ConnectorProperties::extract(options_with_secret, true)?;
989 props.init_from_pb_cdc_table_desc(cdc_table_desc);
990
991 let _enumerator = props
993 .create_split_enumerator(SourceEnumeratorContext::dummy().into())
994 .await?;
995
996 tracing::debug!(?table_id, "validate cdc table success");
997 Ok(true)
998 } else {
999 Ok(false)
1000 }
1001 }
1002
1003 pub async fn validate_table_for_sink(&self, table_id: TableId) -> MetaResult<()> {
1004 let migrated = self
1005 .metadata_manager
1006 .catalog_controller
1007 .has_table_been_migrated(table_id)
1008 .await?;
1009 if !migrated {
1010 Err(anyhow::anyhow!("Creating sink into table is not allowed for unmigrated table {}. Please migrate it first.", table_id).into())
1011 } else {
1012 Ok(())
1013 }
1014 }
1015
1016 #[await_tree::instrument(boxed, "create_streaming_job({streaming_job})")]
1019 pub async fn create_streaming_job(
1020 &self,
1021 mut streaming_job: StreamingJob,
1022 fragment_graph: StreamFragmentGraphProto,
1023 dependencies: HashSet<ObjectId>,
1024 resource_type: streaming_job_resource_type::ResourceType,
1025 if_not_exists: bool,
1026 ) -> MetaResult<NotificationVersion> {
1027 if let StreamingJob::Sink(sink) = &streaming_job
1028 && let Some(target_table) = sink.target_table
1029 {
1030 self.validate_table_for_sink(target_table).await?;
1031 }
1032 let ctx = StreamContext::from_protobuf(fragment_graph.get_ctx().unwrap());
1033 let streaming_job_model = match self
1034 .metadata_manager
1035 .catalog_controller
1036 .create_job_catalog(
1037 &mut streaming_job,
1038 &ctx,
1039 &fragment_graph.parallelism,
1040 fragment_graph.max_parallelism as _,
1041 dependencies,
1042 resource_type.clone(),
1043 &fragment_graph.backfill_parallelism,
1044 )
1045 .await
1046 {
1047 Ok(model) => model,
1048 Err(meta_err) => {
1049 if !if_not_exists {
1050 return Err(meta_err);
1051 }
1052 return if let MetaErrorInner::Duplicated(_, _, Some(job_id)) = meta_err.inner() {
1053 if streaming_job.create_type() == CreateType::Foreground {
1054 let database_id = streaming_job.database_id();
1055 self.metadata_manager
1056 .wait_streaming_job_finished(database_id, *job_id)
1057 .await
1058 } else {
1059 Ok(IGNORED_NOTIFICATION_VERSION)
1060 }
1061 } else {
1062 Err(meta_err)
1063 };
1064 }
1065 };
1066 let job_id = streaming_job.id();
1067 tracing::debug!(
1068 id = %job_id,
1069 definition = streaming_job.definition(),
1070 create_type = streaming_job.create_type().as_str_name(),
1071 job_type = ?streaming_job.job_type(),
1072 "starting streaming job",
1073 );
1074 let permit = self
1076 .creating_streaming_job_permits
1077 .semaphore
1078 .clone()
1079 .acquire_owned()
1080 .instrument_await("acquire_creating_streaming_job_permit")
1081 .await
1082 .unwrap();
1083 let _reschedule_job_lock = self.stream_manager.reschedule_lock_read_guard().await;
1084
1085 let name = streaming_job.name();
1086 let definition = streaming_job.definition();
1087 let source_id = match &streaming_job {
1088 StreamingJob::Table(Some(src), _, _) | StreamingJob::Source(src) => Some(src.id),
1089 _ => None,
1090 };
1091
1092 match self
1094 .create_streaming_job_inner(
1095 ctx,
1096 streaming_job,
1097 fragment_graph,
1098 resource_type,
1099 permit,
1100 streaming_job_model,
1101 )
1102 .await
1103 {
1104 Ok(version) => Ok(version),
1105 Err(err) => {
1106 tracing::error!(id = %job_id, error = %err.as_report(), "failed to create streaming job");
1107 let event = risingwave_pb::meta::event_log::EventCreateStreamJobFail {
1108 id: job_id,
1109 name,
1110 definition,
1111 error: err.as_report().to_string(),
1112 };
1113 self.env.event_log_manager_ref().add_event_logs(vec![
1114 risingwave_pb::meta::event_log::Event::CreateStreamJobFail(event),
1115 ]);
1116 let (aborted, _) = self
1117 .metadata_manager
1118 .catalog_controller
1119 .try_abort_creating_streaming_job(job_id, false)
1120 .await?;
1121 if aborted {
1122 tracing::warn!(id = %job_id, "aborted streaming job");
1123 if let Some(source_id) = source_id {
1125 self.source_manager
1126 .apply_source_change(SourceChange::DropSource {
1127 dropped_source_ids: vec![source_id],
1128 })
1129 .await;
1130 }
1131 }
1132 Err(err)
1133 }
1134 }
1135 }
1136
1137 #[await_tree::instrument(boxed)]
1138 async fn create_streaming_job_inner(
1139 &self,
1140 ctx: StreamContext,
1141 mut streaming_job: StreamingJob,
1142 fragment_graph: StreamFragmentGraphProto,
1143 resource_type: streaming_job_resource_type::ResourceType,
1144 permit: OwnedSemaphorePermit,
1145 streaming_job_model: streaming_job::Model,
1146 ) -> MetaResult<NotificationVersion> {
1147 let mut fragment_graph =
1148 StreamFragmentGraph::new(&self.env, fragment_graph, &streaming_job)?;
1149 streaming_job.set_info_from_graph(&fragment_graph);
1150
1151 let incomplete_internal_tables = fragment_graph
1153 .incomplete_internal_tables()
1154 .into_values()
1155 .collect_vec();
1156 let table_id_map = self
1157 .metadata_manager
1158 .catalog_controller
1159 .create_internal_table_catalog(&streaming_job, incomplete_internal_tables)
1160 .await?;
1161 fragment_graph.refill_internal_table_ids(table_id_map);
1162
1163 tracing::debug!(id = %streaming_job.id(), "building streaming job");
1165 let (ctx, stream_job_fragments) = self
1166 .build_stream_job(
1167 ctx,
1168 streaming_job,
1169 fragment_graph,
1170 resource_type,
1171 streaming_job_model,
1172 )
1173 .await?;
1174
1175 let streaming_job = &ctx.streaming_job;
1176
1177 match streaming_job {
1178 StreamingJob::Table(None, table, TableJobType::SharedCdcSource) => {
1179 self.validate_cdc_table(table, &stream_job_fragments)
1180 .await?;
1181 }
1182 StreamingJob::Table(Some(source), ..) => {
1183 self.source_manager.register_source(source).await?;
1185 let connector_name = source
1186 .get_with_properties()
1187 .get(UPSTREAM_SOURCE_KEY)
1188 .cloned();
1189 let attr = source.info.as_ref().map(|source_info| {
1190 jsonbb::json!({
1191 "format": source_info.format().as_str_name(),
1192 "encode": source_info.row_encode().as_str_name(),
1193 })
1194 });
1195 report_create_object(
1196 streaming_job.id(),
1197 "source",
1198 PbTelemetryDatabaseObject::Source,
1199 connector_name,
1200 attr,
1201 );
1202 }
1203 StreamingJob::Sink(sink) => {
1204 if sink.auto_refresh_schema_from_table.is_some() {
1205 check_sink_fragments_support_refresh_schema(&stream_job_fragments.fragments)?
1206 }
1207 validate_sink(sink).await?;
1209 let connector_name = sink.get_properties().get(UPSTREAM_SOURCE_KEY).cloned();
1210 let attr = sink.format_desc.as_ref().map(|sink_info| {
1211 jsonbb::json!({
1212 "format": sink_info.format().as_str_name(),
1213 "encode": sink_info.encode().as_str_name(),
1214 })
1215 });
1216 report_create_object(
1217 streaming_job.id(),
1218 "sink",
1219 PbTelemetryDatabaseObject::Sink,
1220 connector_name,
1221 attr,
1222 );
1223 }
1224 StreamingJob::Source(source) => {
1225 self.source_manager.register_source(source).await?;
1227 let connector_name = source
1228 .get_with_properties()
1229 .get(UPSTREAM_SOURCE_KEY)
1230 .cloned();
1231 let attr = source.info.as_ref().map(|source_info| {
1232 jsonbb::json!({
1233 "format": source_info.format().as_str_name(),
1234 "encode": source_info.row_encode().as_str_name(),
1235 })
1236 });
1237 report_create_object(
1238 streaming_job.id(),
1239 "source",
1240 PbTelemetryDatabaseObject::Source,
1241 connector_name,
1242 attr,
1243 );
1244 }
1245 _ => {}
1246 }
1247
1248 let backfill_orders = ctx.fragment_backfill_ordering.to_meta_model();
1249 self.metadata_manager
1250 .catalog_controller
1251 .prepare_stream_job_fragments(
1252 &stream_job_fragments,
1253 streaming_job,
1254 false,
1255 Some(backfill_orders),
1256 )
1257 .await?;
1258
1259 let version = self
1261 .stream_manager
1262 .create_streaming_job(stream_job_fragments, ctx, permit)
1263 .await?;
1264
1265 Ok(version)
1266 }
1267
1268 pub async fn drop_object(
1270 &self,
1271 object_type: ObjectType,
1272 object_id: impl Into<ObjectId>,
1273 drop_mode: DropMode,
1274 ) -> MetaResult<NotificationVersion> {
1275 let object_id = object_id.into();
1276 let _reschedule_job_lock = self.stream_manager.reschedule_lock_read_guard().await;
1279 let _source_tick_pause_guard = self.source_manager.pause_tick().await;
1280
1281 let (release_ctx, version) = self
1282 .metadata_manager
1283 .catalog_controller
1284 .drop_object(object_type, object_id, drop_mode)
1285 .await?;
1286
1287 if object_type == ObjectType::Source {
1288 self.env
1289 .notification_manager_ref()
1290 .notify_local_subscribers(LocalNotification::SourceDropped(object_id));
1291 }
1292
1293 let ReleaseContext {
1294 database_id,
1295 removed_streaming_job_ids,
1296 removed_state_table_ids,
1297 removed_source_ids,
1298 removed_secret_ids: secret_ids,
1299 removed_source_fragments,
1300 removed_fragments,
1301 removed_sink_fragment_by_targets,
1302 removed_iceberg_table_sinks,
1303 } = release_ctx;
1304
1305 self.stream_manager
1306 .drop_streaming_jobs(
1307 database_id,
1308 removed_streaming_job_ids,
1309 removed_state_table_ids,
1310 removed_fragments.iter().map(|id| *id as _).collect(),
1311 removed_sink_fragment_by_targets
1312 .into_iter()
1313 .map(|(target, sinks)| {
1314 (target as _, sinks.into_iter().map(|id| id as _).collect())
1315 })
1316 .collect(),
1317 )
1318 .await;
1319
1320 self.source_manager
1323 .apply_source_change(SourceChange::DropSource {
1324 dropped_source_ids: removed_source_ids.into_iter().map(|id| id as _).collect(),
1325 })
1326 .await;
1327
1328 let dropped_source_fragments = removed_source_fragments;
1331 self.source_manager
1332 .apply_source_change(SourceChange::DropMv {
1333 dropped_source_fragments,
1334 })
1335 .await;
1336
1337 let iceberg_sink_ids: Vec<SinkId> = removed_iceberg_table_sinks
1339 .iter()
1340 .map(|sink| sink.id)
1341 .collect();
1342
1343 for sink in removed_iceberg_table_sinks {
1344 let sink_param = SinkParam::try_from_sink_catalog(sink.into())
1345 .expect("Iceberg sink should be valid");
1346 let iceberg_sink =
1347 IcebergSink::try_from(sink_param).expect("Iceberg sink should be valid");
1348 if let Ok(iceberg_catalog) = iceberg_sink.config.create_catalog().await {
1349 let table_identifier = iceberg_sink.config.full_table_name().unwrap();
1350 tracing::info!(
1351 "dropping iceberg table {} for dropped sink",
1352 table_identifier
1353 );
1354
1355 let _ = iceberg_catalog
1356 .drop_table(&table_identifier)
1357 .await
1358 .inspect_err(|err| {
1359 tracing::error!(
1360 "failed to drop iceberg table {} during cleanup: {}",
1361 table_identifier,
1362 err.as_report()
1363 );
1364 });
1365 }
1366 }
1367
1368 if !iceberg_sink_ids.is_empty() {
1370 self.sink_manager
1371 .stop_sink_coordinator(iceberg_sink_ids.clone())
1372 .await;
1373
1374 for sink_id in iceberg_sink_ids {
1375 self.iceberg_compaction_manager
1376 .clear_iceberg_commits_by_sink_id(sink_id);
1377 }
1378 }
1379
1380 for secret in secret_ids {
1382 LocalSecretManager::global().remove_secret(secret);
1383 }
1384 Ok(version)
1385 }
1386
1387 #[await_tree::instrument(boxed, "replace_streaming_job({streaming_job})")]
1389 pub async fn replace_job(
1390 &self,
1391 mut streaming_job: StreamingJob,
1392 fragment_graph: StreamFragmentGraphProto,
1393 ) -> MetaResult<NotificationVersion> {
1394 match &streaming_job {
1395 StreamingJob::Table(..)
1396 | StreamingJob::Source(..)
1397 | StreamingJob::MaterializedView(..) => {}
1398 StreamingJob::Sink(..) | StreamingJob::Index(..) => {
1399 bail_not_implemented!("schema change for {}", streaming_job.job_type_str())
1400 }
1401 }
1402
1403 let job_id = streaming_job.id();
1404
1405 let _reschedule_job_lock = self.stream_manager.reschedule_lock_read_guard().await;
1406 let ctx = StreamContext::from_protobuf(fragment_graph.get_ctx().unwrap());
1407
1408 let original_max_parallelism = self
1410 .metadata_manager
1411 .get_job_max_parallelism(streaming_job.id())
1412 .await?;
1413 let fragment_graph = PbStreamFragmentGraph {
1414 max_parallelism: original_max_parallelism as _,
1415 ..fragment_graph
1416 };
1417
1418 let fragment_graph = StreamFragmentGraph::new(&self.env, fragment_graph, &streaming_job)?;
1420 streaming_job.set_info_from_graph(&fragment_graph);
1421
1422 let streaming_job = streaming_job;
1424
1425 let auto_refresh_schema_sinks = if let StreamingJob::Table(_, table, _) = &streaming_job {
1426 let auto_refresh_schema_sinks = self
1427 .metadata_manager
1428 .catalog_controller
1429 .get_sink_auto_refresh_schema_from(table.id)
1430 .await?;
1431 if !auto_refresh_schema_sinks.is_empty() {
1432 let original_table_columns = self
1433 .metadata_manager
1434 .catalog_controller
1435 .get_table_columns(table.id)
1436 .await?;
1437 let original_table_column_ids: HashSet<_> = original_table_columns
1439 .iter()
1440 .map(|col| col.column_id())
1441 .collect();
1442 let new_table_column_ids: HashSet<_> = table
1443 .columns
1444 .iter()
1445 .map(|col| ColumnId::new(col.column_desc.as_ref().unwrap().column_id as _))
1446 .collect();
1447 let newly_added_columns = table
1448 .columns
1449 .iter()
1450 .filter(|col| {
1451 !original_table_column_ids.contains(&ColumnId::new(
1452 col.column_desc.as_ref().unwrap().column_id as _,
1453 ))
1454 })
1455 .map(|col| ColumnCatalog::from(col.clone()))
1456 .collect_vec();
1457 let removed_columns = original_table_columns
1458 .iter()
1459 .filter(|col| !new_table_column_ids.contains(&col.column_id()))
1460 .cloned()
1461 .collect_vec();
1462 if !removed_columns.is_empty() {
1464 return Err(anyhow!(
1465 "new table columns does not contains all original columns. new: {:?}, original: {:?}, not included: {:?}",
1466 table.columns,
1467 original_table_columns,
1468 removed_columns
1469 .iter()
1470 .map(|col| col.column_id())
1471 .collect_vec()
1472 )
1473 .into());
1474 }
1475 let mut sinks = Vec::with_capacity(auto_refresh_schema_sinks.len());
1476 for sink in auto_refresh_schema_sinks {
1477 let sink_job_fragments = self
1478 .metadata_manager
1479 .get_job_fragments_by_id(sink.id.as_job_id())
1480 .await?;
1481 if sink_job_fragments.fragments.len() != 1 {
1482 return Err(anyhow!(
1483 "auto schema refresh sink must have only one fragment, but got {}",
1484 sink_job_fragments.fragments.len()
1485 )
1486 .into());
1487 }
1488 let sink_ctx = sink_job_fragments.ctx;
1489 let original_sink_fragment =
1490 sink_job_fragments.fragments.into_values().next().unwrap();
1491 let (new_sink_fragment, new_schema, new_log_store_table) =
1492 rewrite_refresh_schema_sink_fragment(
1493 &original_sink_fragment,
1494 &sink,
1495 &newly_added_columns,
1496 &removed_columns,
1497 table,
1498 fragment_graph.table_fragment_id(),
1499 self.env.id_gen_manager(),
1500 )?;
1501
1502 let streaming_job = StreamingJob::Sink(sink);
1503
1504 let tmp_sink_model = self
1505 .metadata_manager
1506 .catalog_controller
1507 .create_job_catalog_for_replace(&streaming_job, None, None, None)
1508 .await?;
1509 let tmp_sink_id = tmp_sink_model.job_id.as_sink_id();
1510 let StreamingJob::Sink(sink) = streaming_job else {
1511 unreachable!()
1512 };
1513
1514 sinks.push(AutoRefreshSchemaSinkContext {
1515 tmp_sink_id,
1516 original_sink: sink,
1517 original_fragment: original_sink_fragment,
1518 new_schema,
1519 newly_add_fields: newly_added_columns
1520 .iter()
1521 .map(|col| Field::from(&col.column_desc))
1522 .collect(),
1523 removed_column_names: removed_columns
1524 .iter()
1525 .map(|col| col.name.clone())
1526 .collect(),
1527 new_fragment: new_sink_fragment,
1528 new_log_store_table,
1529 ctx: sink_ctx,
1530 });
1531 }
1532 Some(sinks)
1533 } else {
1534 None
1535 }
1536 } else {
1537 None
1538 };
1539
1540 let streaming_job_model = self
1541 .metadata_manager
1542 .catalog_controller
1543 .create_job_catalog_for_replace(
1544 &streaming_job,
1545 Some(&ctx),
1546 fragment_graph.specified_parallelism().as_ref(),
1547 Some(fragment_graph.max_parallelism()),
1548 )
1549 .await?;
1550 let tmp_id = streaming_job_model.job_id;
1551
1552 let tmp_sink_ids = auto_refresh_schema_sinks.as_ref().map(|sinks| {
1553 sinks
1554 .iter()
1555 .map(|sink| sink.tmp_sink_id.as_object_id())
1556 .collect_vec()
1557 });
1558
1559 tracing::debug!(id = %job_id, "building replace streaming job");
1560 let mut updated_sink_catalogs = vec![];
1561
1562 let mut drop_table_connector_ctx = None;
1563 let result: MetaResult<_> = try {
1564 let (mut ctx, mut stream_job_fragments) = self
1565 .build_replace_job(
1566 ctx,
1567 &streaming_job,
1568 fragment_graph,
1569 tmp_id,
1570 auto_refresh_schema_sinks,
1571 streaming_job_model,
1572 )
1573 .await?;
1574 drop_table_connector_ctx = ctx.drop_table_connector_ctx.clone();
1575 let auto_refresh_schema_sink_finish_ctx =
1576 ctx.auto_refresh_schema_sinks.as_ref().map(|sinks| {
1577 sinks
1578 .iter()
1579 .map(|sink| FinishAutoRefreshSchemaSinkContext {
1580 tmp_sink_id: sink.tmp_sink_id,
1581 original_sink_id: sink.original_sink.id,
1582 columns: sink.new_schema.clone(),
1583 new_log_store_table: sink
1584 .new_log_store_table
1585 .as_ref()
1586 .map(|table| (table.id, table.columns.clone())),
1587 })
1588 .collect()
1589 });
1590
1591 if let StreamingJob::Table(_, table, ..) = &streaming_job {
1593 let union_fragment = stream_job_fragments.inner.union_fragment_for_table();
1594 let upstream_infos = self
1595 .metadata_manager
1596 .catalog_controller
1597 .get_all_upstream_sink_infos(table, union_fragment.fragment_id as _)
1598 .await?;
1599 refill_upstream_sink_union_in_table(&mut union_fragment.nodes, &upstream_infos);
1600
1601 for upstream_info in &upstream_infos {
1602 let upstream_fragment_id = upstream_info.sink_fragment_id;
1603 ctx.upstream_fragment_downstreams
1604 .entry(upstream_fragment_id)
1605 .or_default()
1606 .push(upstream_info.new_sink_downstream.clone());
1607 if upstream_info.sink_original_target_columns.is_empty() {
1608 updated_sink_catalogs.push(upstream_info.sink_id);
1609 }
1610 }
1611 }
1612
1613 let replace_upstream = ctx.replace_upstream.clone();
1614
1615 if let Some(sinks) = &ctx.auto_refresh_schema_sinks {
1616 let empty_downstreams = FragmentDownstreamRelation::default();
1617 for sink in sinks {
1618 self.metadata_manager
1619 .catalog_controller
1620 .prepare_streaming_job(
1621 sink.tmp_sink_id.as_job_id(),
1622 || [&sink.new_fragment].into_iter(),
1623 &empty_downstreams,
1624 true,
1625 None,
1626 None,
1627 )
1628 .await?;
1629 }
1630 }
1631
1632 self.metadata_manager
1633 .catalog_controller
1634 .prepare_stream_job_fragments(&stream_job_fragments, &streaming_job, true, None)
1635 .await?;
1636
1637 self.stream_manager
1638 .replace_stream_job(stream_job_fragments, ctx)
1639 .await?;
1640 (replace_upstream, auto_refresh_schema_sink_finish_ctx)
1641 };
1642
1643 match result {
1644 Ok((replace_upstream, auto_refresh_schema_sink_finish_ctx)) => {
1645 let version = self
1646 .metadata_manager
1647 .catalog_controller
1648 .finish_replace_streaming_job(
1649 tmp_id,
1650 streaming_job,
1651 replace_upstream,
1652 SinkIntoTableContext {
1653 updated_sink_catalogs,
1654 },
1655 drop_table_connector_ctx.as_ref(),
1656 auto_refresh_schema_sink_finish_ctx,
1657 )
1658 .await?;
1659 if let Some(drop_table_connector_ctx) = &drop_table_connector_ctx {
1660 self.source_manager
1661 .apply_source_change(SourceChange::DropSource {
1662 dropped_source_ids: vec![drop_table_connector_ctx.to_remove_source_id],
1663 })
1664 .await;
1665 }
1666 Ok(version)
1667 }
1668 Err(err) => {
1669 tracing::error!(id = %job_id, error = ?err.as_report(), "failed to replace job");
1670 let _ = self.metadata_manager
1671 .catalog_controller
1672 .try_abort_replacing_streaming_job(tmp_id, tmp_sink_ids)
1673 .await.inspect_err(|err| {
1674 tracing::error!(id = %job_id, error = ?err.as_report(), "failed to abort replacing job");
1675 });
1676 Err(err)
1677 }
1678 }
1679 }
1680
1681 #[await_tree::instrument(boxed, "drop_streaming_job{}({job_id})", if let DropMode::Cascade = drop_mode { "_cascade" } else { "" }
1682 )]
1683 async fn drop_streaming_job(
1684 &self,
1685 job_id: StreamingJobId,
1686 drop_mode: DropMode,
1687 ) -> MetaResult<NotificationVersion> {
1688 let (object_id, object_type) = match job_id {
1689 StreamingJobId::MaterializedView(id) => (id.as_object_id(), ObjectType::Table),
1690 StreamingJobId::Sink(id) => (id.as_object_id(), ObjectType::Sink),
1691 StreamingJobId::Table(_, id) => (id.as_object_id(), ObjectType::Table),
1692 StreamingJobId::Index(idx) => (idx.as_object_id(), ObjectType::Index),
1693 };
1694
1695 let job_status = self
1696 .metadata_manager
1697 .catalog_controller
1698 .get_streaming_job_status(job_id.id())
1699 .await?;
1700 let version = match job_status {
1701 JobStatus::Initial => {
1702 unreachable!(
1703 "Job with Initial status should not notify frontend and therefore should not arrive here"
1704 );
1705 }
1706 JobStatus::Creating => {
1707 self.stream_manager
1708 .cancel_streaming_jobs(vec![job_id.id()])
1709 .await?;
1710 IGNORED_NOTIFICATION_VERSION
1711 }
1712 JobStatus::Created => self.drop_object(object_type, object_id, drop_mode).await?,
1713 };
1714
1715 Ok(version)
1716 }
1717
1718 fn resolve_stream_parallelism(
1722 &self,
1723 specified: Option<NonZeroUsize>,
1724 max: NonZeroUsize,
1725 cluster_info: &StreamingClusterInfo,
1726 resource_group: String,
1727 ) -> MetaResult<NonZeroUsize> {
1728 let available = NonZeroUsize::new(cluster_info.parallelism(&resource_group));
1729 DdlController::resolve_stream_parallelism_inner(
1730 specified,
1731 max,
1732 available,
1733 &self.env.opts.default_parallelism,
1734 &resource_group,
1735 )
1736 }
1737
1738 fn resolve_stream_parallelism_inner(
1739 specified: Option<NonZeroUsize>,
1740 max: NonZeroUsize,
1741 available: Option<NonZeroUsize>,
1742 default_parallelism: &DefaultParallelism,
1743 resource_group: &str,
1744 ) -> MetaResult<NonZeroUsize> {
1745 let Some(available) = available else {
1746 bail_unavailable!(
1747 "no available slots to schedule in resource group \"{}\", \
1748 have you allocated any compute nodes within this resource group?",
1749 resource_group
1750 );
1751 };
1752
1753 if let Some(specified) = specified {
1754 if specified > max {
1755 bail_invalid_parameter!(
1756 "specified parallelism {} should not exceed max parallelism {}",
1757 specified,
1758 max,
1759 );
1760 }
1761 if specified > available {
1762 tracing::warn!(
1763 resource_group,
1764 specified_parallelism = specified.get(),
1765 available_parallelism = available.get(),
1766 "specified parallelism exceeds available slots, scheduling with specified value",
1767 );
1768 }
1769 return Ok(specified);
1770 }
1771
1772 let default_parallelism = match default_parallelism {
1774 DefaultParallelism::Full => available,
1775 DefaultParallelism::Default(num) => {
1776 if *num > available {
1777 tracing::warn!(
1778 resource_group,
1779 configured_parallelism = num.get(),
1780 available_parallelism = available.get(),
1781 "default parallelism exceeds available slots, scheduling with configured value",
1782 );
1783 }
1784 *num
1785 }
1786 };
1787
1788 if default_parallelism > max {
1789 tracing::warn!(
1790 max_parallelism = max.get(),
1791 resource_group,
1792 "default parallelism exceeds max parallelism, capping to max",
1793 );
1794 }
1795 Ok(default_parallelism.min(max))
1796 }
1797
1798 #[await_tree::instrument]
1804 pub(crate) async fn build_stream_job(
1805 &self,
1806 stream_ctx: StreamContext,
1807 mut stream_job: StreamingJob,
1808 fragment_graph: StreamFragmentGraph,
1809 resource_type: streaming_job_resource_type::ResourceType,
1810 streaming_job_model: streaming_job::Model,
1811 ) -> MetaResult<(CreateStreamingJobContext, StreamJobFragmentsToCreate)> {
1812 let id = stream_job.id();
1813 let specified_parallelism = fragment_graph.specified_parallelism();
1814 let specified_backfill_parallelism = fragment_graph.specified_backfill_parallelism();
1815 let max_parallelism = NonZeroUsize::new(fragment_graph.max_parallelism()).unwrap();
1816
1817 let fragment_backfill_ordering = fragment_graph.create_fragment_backfill_ordering();
1819
1820 let (snapshot_backfill_info, cross_db_snapshot_backfill_info) =
1824 fragment_graph.collect_snapshot_backfill_info()?;
1825 assert!(
1826 snapshot_backfill_info
1827 .iter()
1828 .chain([&cross_db_snapshot_backfill_info])
1829 .flat_map(|info| info.upstream_mv_table_id_to_backfill_epoch.values())
1830 .all(|backfill_epoch| backfill_epoch.is_none()),
1831 "should not set backfill epoch when initially build the job: {:?} {:?}",
1832 snapshot_backfill_info,
1833 cross_db_snapshot_backfill_info
1834 );
1835
1836 let locality_fragment_state_table_mapping =
1837 fragment_graph.find_locality_provider_fragment_state_table_mapping();
1838
1839 self.metadata_manager
1841 .catalog_controller
1842 .validate_cross_db_snapshot_backfill(&cross_db_snapshot_backfill_info)
1843 .await?;
1844
1845 let upstream_table_ids = fragment_graph
1846 .dependent_table_ids()
1847 .iter()
1848 .filter(|id| {
1849 !cross_db_snapshot_backfill_info
1850 .upstream_mv_table_id_to_backfill_epoch
1851 .contains_key(*id)
1852 })
1853 .cloned()
1854 .collect();
1855
1856 let upstream_root_fragments = self
1857 .metadata_manager
1858 .get_upstream_root_fragments(&upstream_table_ids)
1859 .await?;
1860
1861 if snapshot_backfill_info.is_some() {
1862 match stream_job {
1863 StreamingJob::MaterializedView(_)
1864 | StreamingJob::Sink(_)
1865 | StreamingJob::Index(_, _) => {}
1866 StreamingJob::Table(_, _, _) | StreamingJob::Source(_) => {
1867 return Err(
1868 anyhow!("snapshot_backfill not enabled for table and source").into(),
1869 );
1870 }
1871 }
1872 }
1873
1874 let complete_graph = CompleteStreamFragmentGraph::with_upstreams(
1875 fragment_graph,
1876 FragmentGraphUpstreamContext {
1877 upstream_root_fragments,
1878 },
1879 (&stream_job).into(),
1880 )?;
1881 let resource_group = if let Some(group) = resource_type.resource_group() {
1882 group
1883 } else {
1884 self.metadata_manager
1885 .get_database_resource_group(stream_job.database_id())
1886 .await?
1887 };
1888 let is_serverless_backfill = matches!(
1889 &resource_type,
1890 streaming_job_resource_type::ResourceType::ServerlessBackfillResourceGroup(_)
1891 );
1892
1893 let cluster_info = self.metadata_manager.get_streaming_cluster_info().await?;
1895
1896 let initial_parallelism = specified_backfill_parallelism.or(specified_parallelism);
1897 let parallelism = self.resolve_stream_parallelism(
1898 initial_parallelism,
1899 max_parallelism,
1900 &cluster_info,
1901 resource_group.clone(),
1902 )?;
1903
1904 let parallelism = if initial_parallelism.is_some() {
1905 parallelism.get()
1906 } else {
1907 let adaptive_strategy = match stream_ctx.adaptive_parallelism_strategy {
1909 Some(strategy) => strategy,
1910 None => self
1911 .env
1912 .system_params_reader()
1913 .await
1914 .adaptive_parallelism_strategy(),
1915 };
1916 adaptive_strategy.compute_target_parallelism(parallelism.get())
1917 };
1918
1919 let parallelism = NonZeroUsize::new(parallelism).expect("parallelism must be positive");
1920 let actor_graph_builder = ActorGraphBuilder::new(id, complete_graph, parallelism)?;
1921
1922 let ActorGraphBuildResult {
1923 graph,
1924 downstream_fragment_relations,
1925 upstream_fragment_downstreams,
1926 replace_upstream,
1927 } = actor_graph_builder.generate_graph()?;
1928 assert!(replace_upstream.is_empty());
1929
1930 let table_parallelism = match (specified_parallelism, &self.env.opts.default_parallelism) {
1937 (None, DefaultParallelism::Full) => TableParallelism::Adaptive,
1938 _ => TableParallelism::Fixed(parallelism.get()),
1939 };
1940
1941 let stream_job_fragments = StreamJobFragments::new(
1942 id,
1943 graph,
1944 stream_ctx.clone(),
1945 table_parallelism,
1946 max_parallelism.get(),
1947 );
1948
1949 if let Some(mview_fragment) = stream_job_fragments.mview_fragment() {
1950 stream_job.set_table_vnode_count(mview_fragment.vnode_count());
1951 }
1952
1953 let new_upstream_sink = if let StreamingJob::Sink(sink) = &stream_job
1954 && let Ok(table_id) = sink.get_target_table()
1955 {
1956 let tables = self
1957 .metadata_manager
1958 .get_table_catalog_by_ids(&[*table_id])
1959 .await?;
1960 let target_table = tables
1961 .first()
1962 .ok_or_else(|| MetaError::catalog_id_not_found("table", *table_id))?;
1963 let sink_fragment = stream_job_fragments
1964 .sink_fragment()
1965 .ok_or_else(|| anyhow::anyhow!("sink fragment not found for sink {}", sink.id))?;
1966 let mview_fragment_id = self
1967 .metadata_manager
1968 .catalog_controller
1969 .get_mview_fragment_by_id(table_id.as_job_id())
1970 .await?;
1971 let upstream_sink_info = build_upstream_sink_info(
1972 sink.id,
1973 sink.original_target_columns.clone(),
1974 sink_fragment.fragment_id as _,
1975 target_table,
1976 mview_fragment_id,
1977 )?;
1978 Some(upstream_sink_info)
1979 } else {
1980 None
1981 };
1982
1983 let mut cdc_table_snapshot_splits = None;
1984 if let StreamingJob::Table(None, table, TableJobType::SharedCdcSource) = &stream_job
1985 && let Some((_, stream_cdc_scan)) =
1986 parallel_cdc_table_backfill_fragment(stream_job_fragments.fragments.values())
1987 {
1988 {
1989 let splits = try_init_parallel_cdc_table_snapshot_splits(
1991 table.id,
1992 stream_cdc_scan.cdc_table_desc.as_ref().unwrap(),
1993 self.env.meta_store_ref(),
1994 stream_cdc_scan.options.as_ref().unwrap(),
1995 self.env.opts.cdc_table_split_init_insert_batch_size,
1996 self.env.opts.cdc_table_split_init_sleep_interval_splits,
1997 self.env.opts.cdc_table_split_init_sleep_duration_millis,
1998 )
1999 .await?;
2000 cdc_table_snapshot_splits = Some(splits);
2001 }
2002 }
2003
2004 let ctx = CreateStreamingJobContext {
2005 upstream_fragment_downstreams,
2006 database_resource_group: resource_group,
2007 definition: stream_job.definition(),
2008 create_type: stream_job.create_type(),
2009 job_type: (&stream_job).into(),
2010 streaming_job: stream_job,
2011 new_upstream_sink,
2012 option: CreateStreamingJobOption {},
2013 snapshot_backfill_info,
2014 cross_db_snapshot_backfill_info,
2015 fragment_backfill_ordering,
2016 locality_fragment_state_table_mapping,
2017 cdc_table_snapshot_splits,
2018 is_serverless_backfill,
2019 streaming_job_model,
2020 };
2021
2022 Ok((
2023 ctx,
2024 StreamJobFragmentsToCreate {
2025 inner: stream_job_fragments,
2026 downstreams: downstream_fragment_relations,
2027 },
2028 ))
2029 }
2030
2031 pub(crate) async fn build_replace_job(
2037 &self,
2038 stream_ctx: StreamContext,
2039 stream_job: &StreamingJob,
2040 mut fragment_graph: StreamFragmentGraph,
2041 tmp_job_id: JobId,
2042 auto_refresh_schema_sinks: Option<Vec<AutoRefreshSchemaSinkContext>>,
2043 streaming_job_model: streaming_job::Model,
2044 ) -> MetaResult<(ReplaceStreamJobContext, StreamJobFragmentsToCreate)> {
2045 match &stream_job {
2046 StreamingJob::Table(..)
2047 | StreamingJob::Source(..)
2048 | StreamingJob::MaterializedView(..) => {}
2049 StreamingJob::Sink(..) | StreamingJob::Index(..) => {
2050 bail_not_implemented!("schema change for {}", stream_job.job_type_str())
2051 }
2052 }
2053
2054 let id = stream_job.id();
2055
2056 let mut drop_table_associated_source_id = None;
2058 if let StreamingJob::Table(None, _, _) = &stream_job {
2059 drop_table_associated_source_id = self
2060 .metadata_manager
2061 .get_table_associated_source_id(id.as_mv_table_id())
2062 .await?;
2063 }
2064
2065 let old_fragments = self.metadata_manager.get_job_fragments_by_id(id).await?;
2066 let old_internal_table_ids = old_fragments.internal_table_ids();
2067
2068 let mut drop_table_connector_ctx = None;
2070 if let Some(to_remove_source_id) = drop_table_associated_source_id {
2071 debug_assert!(old_internal_table_ids.len() == 1);
2073
2074 drop_table_connector_ctx = Some(DropTableConnectorContext {
2075 to_change_streaming_job_id: id,
2078 to_remove_state_table_id: old_internal_table_ids[0], to_remove_source_id,
2080 });
2081 } else if stream_job.is_materialized_view() {
2082 let old_fragments_upstreams = self
2085 .metadata_manager
2086 .catalog_controller
2087 .upstream_fragments(old_fragments.fragment_ids())
2088 .await?;
2089
2090 let old_state_graph =
2091 state_match::Graph::from_existing(&old_fragments, &old_fragments_upstreams);
2092 let new_state_graph = state_match::Graph::from_building(&fragment_graph);
2093 let result = state_match::match_graph(&new_state_graph, &old_state_graph)
2094 .context("incompatible altering on the streaming job states")?;
2095
2096 fragment_graph.fit_internal_table_ids_with_mapping(result.table_matches);
2097 fragment_graph.fit_snapshot_backfill_epochs(result.snapshot_backfill_epochs);
2098 } else {
2099 let old_internal_tables = self
2102 .metadata_manager
2103 .get_table_catalog_by_ids(&old_internal_table_ids)
2104 .await?;
2105 fragment_graph.fit_internal_tables_trivial(old_internal_tables)?;
2106 }
2107
2108 let original_root_fragment = old_fragments
2111 .root_fragment()
2112 .expect("root fragment not found");
2113
2114 let job_type = StreamingJobType::from(stream_job);
2115
2116 let mut downstream_fragments = self.metadata_manager.get_downstream_fragments(id).await?;
2118
2119 if let Some(auto_refresh_schema_sinks) = &auto_refresh_schema_sinks {
2120 let mut remaining_fragment: HashSet<_> = auto_refresh_schema_sinks
2121 .iter()
2122 .map(|sink| sink.original_fragment.fragment_id)
2123 .collect();
2124 for (_, downstream_fragment) in &mut downstream_fragments {
2125 if let Some(sink) = auto_refresh_schema_sinks.iter().find(|sink| {
2126 sink.original_fragment.fragment_id == downstream_fragment.fragment_id
2127 }) {
2128 assert!(remaining_fragment.remove(&downstream_fragment.fragment_id));
2129 *downstream_fragment = sink.new_fragment.clone();
2132 }
2133 }
2134 assert!(remaining_fragment.is_empty());
2135 }
2136
2137 let complete_graph = match &job_type {
2139 StreamingJobType::Table(TableJobType::General) | StreamingJobType::Source => {
2140 CompleteStreamFragmentGraph::with_downstreams(
2141 fragment_graph,
2142 FragmentGraphDownstreamContext {
2143 original_root_fragment_id: original_root_fragment.fragment_id,
2144 downstream_fragments,
2145 },
2146 job_type,
2147 )?
2148 }
2149 StreamingJobType::Table(TableJobType::SharedCdcSource)
2150 | StreamingJobType::MaterializedView => {
2151 let upstream_root_fragments = self
2153 .metadata_manager
2154 .get_upstream_root_fragments(fragment_graph.dependent_table_ids())
2155 .await?;
2156
2157 CompleteStreamFragmentGraph::with_upstreams_and_downstreams(
2158 fragment_graph,
2159 FragmentGraphUpstreamContext {
2160 upstream_root_fragments,
2161 },
2162 FragmentGraphDownstreamContext {
2163 original_root_fragment_id: original_root_fragment.fragment_id,
2164 downstream_fragments,
2165 },
2166 job_type,
2167 )?
2168 }
2169 _ => unreachable!(),
2170 };
2171
2172 let resource_group = self
2173 .metadata_manager
2174 .get_database_resource_group(stream_job.database_id())
2175 .await?;
2176
2177 let parallelism = NonZeroUsize::new(match old_fragments.assigned_parallelism {
2180 TableParallelism::Fixed(n) => n,
2181 TableParallelism::Adaptive | TableParallelism::Custom => 1,
2182 })
2183 .expect("The number of actors in the original table fragment should be greater than 0");
2184
2185 let actor_graph_builder = ActorGraphBuilder::new(id, complete_graph, parallelism)?;
2186
2187 let ActorGraphBuildResult {
2188 graph,
2189 downstream_fragment_relations,
2190 upstream_fragment_downstreams,
2191 mut replace_upstream,
2192 } = actor_graph_builder.generate_graph()?;
2193
2194 if matches!(
2196 job_type,
2197 StreamingJobType::Source | StreamingJobType::Table(TableJobType::General)
2198 ) {
2199 assert!(upstream_fragment_downstreams.is_empty());
2200 }
2201
2202 let stream_job_fragments = StreamJobFragments::new(
2206 tmp_job_id,
2207 graph,
2208 stream_ctx,
2209 old_fragments.assigned_parallelism,
2210 old_fragments.max_parallelism,
2211 );
2212
2213 if let Some(sinks) = &auto_refresh_schema_sinks {
2214 for sink in sinks {
2215 replace_upstream
2216 .remove(&sink.new_fragment.fragment_id)
2217 .expect("should exist");
2218 }
2219 }
2220
2221 let ctx = ReplaceStreamJobContext {
2225 old_fragments,
2226 replace_upstream,
2227 upstream_fragment_downstreams,
2228 streaming_job: stream_job.clone(),
2229 database_resource_group: resource_group,
2230 tmp_id: tmp_job_id,
2231 drop_table_connector_ctx,
2232 auto_refresh_schema_sinks,
2233 streaming_job_model,
2234 };
2235
2236 Ok((
2237 ctx,
2238 StreamJobFragmentsToCreate {
2239 inner: stream_job_fragments,
2240 downstreams: downstream_fragment_relations,
2241 },
2242 ))
2243 }
2244
2245 async fn alter_name(
2246 &self,
2247 relation: alter_name_request::Object,
2248 new_name: &str,
2249 ) -> MetaResult<NotificationVersion> {
2250 let (obj_type, id): (ObjectType, ObjectId) = match relation {
2251 alter_name_request::Object::TableId(id) => (ObjectType::Table, id.into()),
2252 alter_name_request::Object::ViewId(id) => (ObjectType::View, id.into()),
2253 alter_name_request::Object::IndexId(id) => (ObjectType::Index, id.into()),
2254 alter_name_request::Object::SinkId(id) => (ObjectType::Sink, id.into()),
2255 alter_name_request::Object::SourceId(id) => (ObjectType::Source, id.into()),
2256 alter_name_request::Object::SchemaId(id) => (ObjectType::Schema, id.into()),
2257 alter_name_request::Object::DatabaseId(id) => (ObjectType::Database, id.into()),
2258 alter_name_request::Object::SubscriptionId(id) => (ObjectType::Subscription, id.into()),
2259 };
2260 self.metadata_manager
2261 .catalog_controller
2262 .alter_name(obj_type, id, new_name)
2263 .await
2264 }
2265
2266 async fn alter_swap_rename(
2267 &self,
2268 object: alter_swap_rename_request::Object,
2269 ) -> MetaResult<NotificationVersion> {
2270 let (obj_type, src_id, dst_id) = match object {
2271 alter_swap_rename_request::Object::Schema(_) => unimplemented!("schema swap"),
2272 alter_swap_rename_request::Object::Table(objs) => {
2273 let (src_id, dst_id) = (objs.src_object_id, objs.dst_object_id);
2274 (ObjectType::Table, src_id, dst_id)
2275 }
2276 alter_swap_rename_request::Object::View(objs) => {
2277 let (src_id, dst_id) = (objs.src_object_id, objs.dst_object_id);
2278 (ObjectType::View, src_id, dst_id)
2279 }
2280 alter_swap_rename_request::Object::Source(objs) => {
2281 let (src_id, dst_id) = (objs.src_object_id, objs.dst_object_id);
2282 (ObjectType::Source, src_id, dst_id)
2283 }
2284 alter_swap_rename_request::Object::Sink(objs) => {
2285 let (src_id, dst_id) = (objs.src_object_id, objs.dst_object_id);
2286 (ObjectType::Sink, src_id, dst_id)
2287 }
2288 alter_swap_rename_request::Object::Subscription(objs) => {
2289 let (src_id, dst_id) = (objs.src_object_id, objs.dst_object_id);
2290 (ObjectType::Subscription, src_id, dst_id)
2291 }
2292 };
2293
2294 self.metadata_manager
2295 .catalog_controller
2296 .alter_swap_rename(obj_type, src_id, dst_id)
2297 .await
2298 }
2299
2300 async fn alter_owner(
2301 &self,
2302 object: Object,
2303 owner_id: UserId,
2304 ) -> MetaResult<NotificationVersion> {
2305 let (obj_type, id): (ObjectType, ObjectId) = match object {
2306 Object::TableId(id) => (ObjectType::Table, id.into()),
2307 Object::ViewId(id) => (ObjectType::View, id.into()),
2308 Object::SourceId(id) => (ObjectType::Source, id.into()),
2309 Object::SinkId(id) => (ObjectType::Sink, id.into()),
2310 Object::SchemaId(id) => (ObjectType::Schema, id.into()),
2311 Object::DatabaseId(id) => (ObjectType::Database, id.into()),
2312 Object::SubscriptionId(id) => (ObjectType::Subscription, id.into()),
2313 Object::ConnectionId(id) => (ObjectType::Connection, id.into()),
2314 Object::FunctionId(id) => (ObjectType::Function, id.into()),
2315 Object::SecretId(id) => (ObjectType::Secret, id.into()),
2316 };
2317 self.metadata_manager
2318 .catalog_controller
2319 .alter_owner(obj_type, id, owner_id as _)
2320 .await
2321 }
2322
2323 async fn alter_set_schema(
2324 &self,
2325 object: alter_set_schema_request::Object,
2326 new_schema_id: SchemaId,
2327 ) -> MetaResult<NotificationVersion> {
2328 let (obj_type, id): (ObjectType, ObjectId) = match object {
2329 alter_set_schema_request::Object::TableId(id) => (ObjectType::Table, id.into()),
2330 alter_set_schema_request::Object::ViewId(id) => (ObjectType::View, id.into()),
2331 alter_set_schema_request::Object::SourceId(id) => (ObjectType::Source, id.into()),
2332 alter_set_schema_request::Object::SinkId(id) => (ObjectType::Sink, id.into()),
2333 alter_set_schema_request::Object::FunctionId(id) => (ObjectType::Function, id.into()),
2334 alter_set_schema_request::Object::ConnectionId(id) => {
2335 (ObjectType::Connection, id.into())
2336 }
2337 alter_set_schema_request::Object::SubscriptionId(id) => {
2338 (ObjectType::Subscription, id.into())
2339 }
2340 };
2341 self.metadata_manager
2342 .catalog_controller
2343 .alter_schema(obj_type, id, new_schema_id)
2344 .await
2345 }
2346
2347 pub async fn wait(&self) -> MetaResult<WaitVersion> {
2348 let timeout_ms = 30 * 60 * 1000;
2349 for _ in 0..timeout_ms {
2350 if self
2351 .metadata_manager
2352 .catalog_controller
2353 .list_background_creating_jobs(true, None)
2354 .await?
2355 .is_empty()
2356 {
2357 let catalog_version = self
2358 .metadata_manager
2359 .catalog_controller
2360 .notify_frontend_trivial()
2361 .await;
2362 let hummock_version_id = self.barrier_manager.get_hummock_version_id().await;
2363 return Ok(WaitVersion {
2364 catalog_version,
2365 hummock_version_id,
2366 });
2367 }
2368
2369 sleep(Duration::from_millis(1)).await;
2370 }
2371 Err(MetaError::cancelled(format!(
2372 "timeout after {timeout_ms}ms"
2373 )))
2374 }
2375
2376 async fn comment_on(&self, comment: Comment) -> MetaResult<NotificationVersion> {
2377 self.metadata_manager
2378 .catalog_controller
2379 .comment_on(comment)
2380 .await
2381 }
2382
2383 async fn alter_streaming_job_config(
2384 &self,
2385 job_id: JobId,
2386 entries_to_add: HashMap<String, String>,
2387 keys_to_remove: Vec<String>,
2388 ) -> MetaResult<NotificationVersion> {
2389 self.metadata_manager
2390 .catalog_controller
2391 .alter_streaming_job_config(job_id, entries_to_add, keys_to_remove)
2392 .await
2393 }
2394}
2395
2396fn report_create_object(
2397 job_id: JobId,
2398 event_name: &str,
2399 obj_type: PbTelemetryDatabaseObject,
2400 connector_name: Option<String>,
2401 attr_info: Option<jsonbb::Value>,
2402) {
2403 report_event(
2404 PbTelemetryEventStage::CreateStreamJob,
2405 event_name,
2406 job_id.as_raw_id() as _,
2407 connector_name,
2408 Some(obj_type),
2409 attr_info,
2410 );
2411}
2412
2413pub fn build_upstream_sink_info(
2414 sink_id: SinkId,
2415 original_target_columns: Vec<PbColumnCatalog>,
2416 sink_fragment_id: FragmentId,
2417 target_table: &PbTable,
2418 target_fragment_id: FragmentId,
2419) -> MetaResult<UpstreamSinkInfo> {
2420 let sink_columns = if !original_target_columns.is_empty() {
2421 original_target_columns.clone()
2422 } else {
2423 target_table.columns.clone()
2428 };
2429
2430 let sink_output_fields = sink_columns
2431 .iter()
2432 .map(|col| Field::from(col.column_desc.as_ref().unwrap()).to_prost())
2433 .collect_vec();
2434 let output_indices = (0..sink_output_fields.len())
2435 .map(|i| i as u32)
2436 .collect_vec();
2437
2438 let dist_key_indices: anyhow::Result<Vec<u32>> = try {
2439 let sink_idx_by_col_id = sink_columns
2440 .iter()
2441 .enumerate()
2442 .map(|(idx, col)| {
2443 let column_id = col.column_desc.as_ref().unwrap().column_id;
2444 (column_id, idx as u32)
2445 })
2446 .collect::<HashMap<_, _>>();
2447 target_table
2448 .distribution_key
2449 .iter()
2450 .map(|dist_idx| {
2451 let column_id = target_table.columns[*dist_idx as usize]
2452 .column_desc
2453 .as_ref()
2454 .unwrap()
2455 .column_id;
2456 let sink_idx = sink_idx_by_col_id
2457 .get(&column_id)
2458 .ok_or_else(|| anyhow::anyhow!("column id {} not found in sink", column_id))?;
2459 Ok(*sink_idx)
2460 })
2461 .collect::<anyhow::Result<Vec<_>>>()?
2462 };
2463 let dist_key_indices =
2464 dist_key_indices.map_err(|e| e.context("failed to get distribution key indices"))?;
2465 let downstream_fragment_id = target_fragment_id as _;
2466 let new_downstream_relation = DownstreamFragmentRelation {
2467 downstream_fragment_id,
2468 dispatcher_type: DispatcherType::Hash,
2469 dist_key_indices,
2470 output_mapping: PbDispatchOutputMapping::simple(output_indices),
2471 };
2472 let current_target_columns = target_table.get_columns();
2473 let project_exprs = build_select_node_list(&sink_columns, current_target_columns)?;
2474 Ok(UpstreamSinkInfo {
2475 sink_id,
2476 sink_fragment_id: sink_fragment_id as _,
2477 sink_output_fields,
2478 sink_original_target_columns: original_target_columns,
2479 project_exprs,
2480 new_sink_downstream: new_downstream_relation,
2481 })
2482}
2483
2484pub fn refill_upstream_sink_union_in_table(
2485 union_fragment_root: &mut PbStreamNode,
2486 upstream_sink_infos: &Vec<UpstreamSinkInfo>,
2487) {
2488 visit_stream_node_cont_mut(union_fragment_root, |node| {
2489 if let Some(NodeBody::UpstreamSinkUnion(upstream_sink_union)) = &mut node.node_body {
2490 let init_upstreams = upstream_sink_infos
2491 .iter()
2492 .map(|info| PbUpstreamSinkInfo {
2493 upstream_fragment_id: info.sink_fragment_id,
2494 sink_output_schema: info.sink_output_fields.clone(),
2495 project_exprs: info.project_exprs.clone(),
2496 })
2497 .collect();
2498 upstream_sink_union.init_upstreams = init_upstreams;
2499 false
2500 } else {
2501 true
2502 }
2503 });
2504}
2505
2506#[cfg(test)]
2507mod tests {
2508 use std::num::NonZeroUsize;
2509
2510 use super::*;
2511
2512 #[test]
2513 fn test_specified_parallelism_exceeds_available() {
2514 let result = DdlController::resolve_stream_parallelism_inner(
2515 Some(NonZeroUsize::new(100).unwrap()),
2516 NonZeroUsize::new(256).unwrap(),
2517 Some(NonZeroUsize::new(4).unwrap()),
2518 &DefaultParallelism::Full,
2519 "default",
2520 )
2521 .unwrap();
2522 assert_eq!(result.get(), 100);
2523 }
2524
2525 #[test]
2526 fn test_allows_default_parallelism_over_available() {
2527 let result = DdlController::resolve_stream_parallelism_inner(
2528 None,
2529 NonZeroUsize::new(256).unwrap(),
2530 Some(NonZeroUsize::new(4).unwrap()),
2531 &DefaultParallelism::Default(NonZeroUsize::new(50).unwrap()),
2532 "default",
2533 )
2534 .unwrap();
2535 assert_eq!(result.get(), 50);
2536 }
2537
2538 #[test]
2539 fn test_full_parallelism_capped_by_max() {
2540 let result = DdlController::resolve_stream_parallelism_inner(
2541 None,
2542 NonZeroUsize::new(6).unwrap(),
2543 Some(NonZeroUsize::new(10).unwrap()),
2544 &DefaultParallelism::Full,
2545 "default",
2546 )
2547 .unwrap();
2548 assert_eq!(result.get(), 6);
2549 }
2550
2551 #[test]
2552 fn test_no_available_slots_returns_error() {
2553 let result = DdlController::resolve_stream_parallelism_inner(
2554 None,
2555 NonZeroUsize::new(4).unwrap(),
2556 None,
2557 &DefaultParallelism::Full,
2558 "default",
2559 );
2560 assert!(matches!(
2561 result,
2562 Err(ref e) if matches!(e.inner(), MetaErrorInner::Unavailable(_))
2563 ));
2564 }
2565
2566 #[test]
2567 fn test_specified_over_max_returns_error() {
2568 let result = DdlController::resolve_stream_parallelism_inner(
2569 Some(NonZeroUsize::new(8).unwrap()),
2570 NonZeroUsize::new(4).unwrap(),
2571 Some(NonZeroUsize::new(10).unwrap()),
2572 &DefaultParallelism::Full,
2573 "default",
2574 );
2575 assert!(matches!(
2576 result,
2577 Err(ref e) if matches!(e.inner(), MetaErrorInner::InvalidParameter(_))
2578 ));
2579 }
2580}