1use std::collections::hash_map::Entry;
16use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet};
17use std::num::NonZeroUsize;
18use std::sync::Arc;
19
20use anyhow::{Context, anyhow};
21use futures::TryStreamExt;
22use itertools::Itertools;
23use risingwave_common::bail;
24use risingwave_common::bitmap::Bitmap;
25use risingwave_common::catalog::{FragmentTypeFlag, FragmentTypeMask};
26use risingwave_common::hash::{VnodeCount, VnodeCountCompat};
27use risingwave_common::id::JobId;
28use risingwave_common::system_param::reader::SystemParamsRead;
29use risingwave_common::util::stream_graph_visitor::visit_stream_node_body;
30use risingwave_connector::source::SplitImpl;
31use risingwave_connector::source::cdc::CdcScanOptions;
32use risingwave_meta_model::fragment::DistributionType;
33use risingwave_meta_model::object::ObjectType;
34use risingwave_meta_model::prelude::{
35 Fragment as FragmentModel, FragmentRelation, FragmentSplits, Sink, StreamingJob,
36};
37use risingwave_meta_model::{
38 ActorId, ConnectorSplits, DatabaseId, DispatcherType, ExprContext, FragmentId, I32Array,
39 JobStatus, ObjectId, SchemaId, SinkId, SourceId, StreamNode, StreamingParallelism, TableId,
40 VnodeBitmap, WorkerId, database, fragment, fragment_relation, fragment_splits, object, sink,
41 source, streaming_job, table,
42};
43use risingwave_meta_model_migration::{ExprTrait, OnConflict, SimpleExpr};
44use risingwave_pb::catalog::PbTable;
45use risingwave_pb::common::PbActorLocation;
46use risingwave_pb::meta::subscribe_response::{
47 Info as NotificationInfo, Operation as NotificationOperation,
48};
49use risingwave_pb::meta::table_fragments::fragment::{
50 FragmentDistributionType, PbFragmentDistributionType,
51};
52use risingwave_pb::meta::table_fragments::{PbActorStatus, PbState};
53use risingwave_pb::meta::{FragmentDistribution, PbFragmentWorkerSlotMapping};
54use risingwave_pb::source::{ConnectorSplit, PbConnectorSplits};
55use risingwave_pb::stream_plan;
56use risingwave_pb::stream_plan::stream_node::NodeBody;
57use risingwave_pb::stream_plan::{
58 PbDispatchOutputMapping, PbDispatcherType, PbStreamNode, PbStreamScanType, StreamScanType,
59};
60use sea_orm::ActiveValue::Set;
61use sea_orm::sea_query::Expr;
62use sea_orm::{
63 ColumnTrait, ConnectionTrait, EntityTrait, FromQueryResult, JoinType, PaginatorTrait,
64 QueryFilter, QuerySelect, RelationTrait, TransactionTrait,
65};
66use serde::{Deserialize, Serialize};
67
68use crate::barrier::{SharedActorInfos, SharedFragmentInfo, SnapshotBackfillInfo};
69use crate::controller::catalog::CatalogController;
70use crate::controller::scale::{
71 FragmentRenderMap, LoadedFragmentContext, NoShuffleEnsemble, RenderedGraph, WorkerInfo,
72 find_fragment_no_shuffle_dags_detailed, load_fragment_context_for_jobs,
73 render_actor_assignments, resolve_streaming_job_definition,
74};
75use crate::controller::utils::{
76 FragmentDesc, PartialActorLocation, PartialFragmentStateTables, compose_dispatchers,
77 get_sink_fragment_by_ids, has_table_been_migrated, rebuild_fragment_mapping,
78 resolve_no_shuffle_actor_dispatcher,
79};
80use crate::error::MetaError;
81use crate::manager::{ActiveStreamingWorkerNodes, LocalNotification, NotificationManager};
82use crate::model::{
83 DownstreamFragmentRelation, Fragment, FragmentActorDispatchers, FragmentDownstreamRelation,
84 StreamActor, StreamContext, StreamJobFragments, StreamingJobModelContextExt as _,
85 TableParallelism,
86};
87use crate::rpc::ddl_controller::build_upstream_sink_info;
88use crate::stream::UpstreamSinkInfo;
89use crate::{MetaResult, model};
90
91#[derive(Debug)]
93pub struct InflightActorInfo {
94 pub worker_id: WorkerId,
95 pub vnode_bitmap: Option<Bitmap>,
96 pub splits: Vec<SplitImpl>,
97}
98
99#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
100struct ActorInfo {
101 pub actor_id: ActorId,
102 pub fragment_id: FragmentId,
103 pub splits: ConnectorSplits,
104 pub worker_id: WorkerId,
105 pub vnode_bitmap: Option<VnodeBitmap>,
106 pub expr_context: ExprContext,
107 pub config_override: Arc<str>,
108}
109
110#[derive(Debug)]
111pub struct InflightFragmentInfo {
112 pub fragment_id: FragmentId,
113 pub distribution_type: DistributionType,
114 pub fragment_type_mask: FragmentTypeMask,
115 pub vnode_count: usize,
116 pub nodes: PbStreamNode,
117 pub actors: HashMap<ActorId, InflightActorInfo>,
118 pub state_table_ids: HashSet<TableId>,
119}
120
121#[derive(Clone, Debug)]
122pub struct FragmentParallelismInfo {
123 pub distribution_type: FragmentDistributionType,
124 pub actor_count: usize,
125 pub vnode_count: usize,
126}
127
128#[easy_ext::ext(FragmentTypeMaskExt)]
129pub impl FragmentTypeMask {
130 fn intersects(flag: FragmentTypeFlag) -> SimpleExpr {
131 Expr::col(fragment::Column::FragmentTypeMask)
132 .bit_and(Expr::value(flag as i32))
133 .ne(0)
134 }
135
136 fn intersects_any(flags: impl IntoIterator<Item = FragmentTypeFlag>) -> SimpleExpr {
137 Expr::col(fragment::Column::FragmentTypeMask)
138 .bit_and(Expr::value(FragmentTypeFlag::raw_flag(flags) as i32))
139 .ne(0)
140 }
141
142 fn disjoint(flag: FragmentTypeFlag) -> SimpleExpr {
143 Expr::col(fragment::Column::FragmentTypeMask)
144 .bit_and(Expr::value(flag as i32))
145 .eq(0)
146 }
147}
148
149#[derive(Clone, Debug, FromQueryResult, Serialize, Deserialize)]
150#[serde(rename_all = "camelCase")] pub struct StreamingJobInfo {
152 pub job_id: JobId,
153 pub obj_type: ObjectType,
154 pub name: String,
155 pub job_status: JobStatus,
156 pub parallelism: StreamingParallelism,
157 pub max_parallelism: i32,
158 pub resource_group: String,
159 pub config_override: String,
160 pub database_id: DatabaseId,
161 pub schema_id: SchemaId,
162}
163
164impl NotificationManager {
165 pub(crate) fn notify_fragment_mapping(
166 &self,
167 operation: NotificationOperation,
168 fragment_mappings: Vec<PbFragmentWorkerSlotMapping>,
169 ) {
170 let fragment_ids = fragment_mappings
171 .iter()
172 .map(|mapping| mapping.fragment_id)
173 .collect_vec();
174 if fragment_ids.is_empty() {
175 return;
176 }
177 for fragment_mapping in fragment_mappings {
179 self.notify_frontend_without_version(
180 operation,
181 NotificationInfo::StreamingWorkerSlotMapping(fragment_mapping),
182 );
183 }
184
185 match operation {
187 NotificationOperation::Add | NotificationOperation::Update => {
188 self.notify_local_subscribers(LocalNotification::FragmentMappingsUpsert(
189 fragment_ids,
190 ));
191 }
192 NotificationOperation::Delete => {
193 self.notify_local_subscribers(LocalNotification::FragmentMappingsDelete(
194 fragment_ids,
195 ));
196 }
197 op => {
198 tracing::warn!("unexpected fragment mapping op: {}", op.as_str_name());
199 }
200 }
201 }
202}
203
204impl CatalogController {
205 pub fn prepare_fragment_models_from_fragments(
206 job_id: JobId,
207 fragments: impl Iterator<Item = &Fragment>,
208 ) -> MetaResult<Vec<fragment::Model>> {
209 fragments
210 .map(|fragment| Self::prepare_fragment_model_for_new_job(job_id, fragment))
211 .try_collect()
212 }
213
214 pub fn prepare_fragment_model_for_new_job(
215 job_id: JobId,
216 fragment: &Fragment,
217 ) -> MetaResult<fragment::Model> {
218 let vnode_count = fragment.vnode_count();
219 let Fragment {
220 fragment_id: pb_fragment_id,
221 fragment_type_mask: pb_fragment_type_mask,
222 distribution_type: pb_distribution_type,
223 actors: pb_actors,
224 state_table_ids: pb_state_table_ids,
225 nodes,
226 ..
227 } = fragment;
228
229 let state_table_ids = pb_state_table_ids.clone().into();
230
231 assert!(!pb_actors.is_empty());
232
233 let fragment_parallelism = nodes
234 .node_body
235 .as_ref()
236 .and_then(|body| match body {
237 NodeBody::StreamCdcScan(node) => Some(node),
238 _ => None,
239 })
240 .and_then(|node| node.options.as_ref())
241 .map(CdcScanOptions::from_proto)
242 .filter(|opts| opts.is_parallelized_backfill())
243 .map(|opts| StreamingParallelism::Fixed(opts.backfill_parallelism as usize));
244
245 let stream_node = StreamNode::from(nodes);
246
247 let distribution_type = PbFragmentDistributionType::try_from(*pb_distribution_type)
248 .unwrap()
249 .into();
250
251 #[expect(deprecated)]
252 let fragment = fragment::Model {
253 fragment_id: *pb_fragment_id as _,
254 job_id,
255 fragment_type_mask: (*pb_fragment_type_mask).into(),
256 distribution_type,
257 stream_node,
258 state_table_ids,
259 upstream_fragment_id: Default::default(),
260 vnode_count: vnode_count as _,
261 parallelism: fragment_parallelism,
262 };
263
264 Ok(fragment)
265 }
266
267 fn compose_table_fragments(
268 job_id: JobId,
269 state: PbState,
270 ctx: StreamContext,
271 fragments: Vec<(fragment::Model, Vec<ActorInfo>)>,
272 parallelism: StreamingParallelism,
273 max_parallelism: usize,
274 job_definition: Option<String>,
275 ) -> MetaResult<StreamJobFragments> {
276 let mut pb_fragments = BTreeMap::new();
277 let mut pb_actor_status = BTreeMap::new();
278
279 for (fragment, actors) in fragments {
280 let (fragment, fragment_actor_status, _) =
281 Self::compose_fragment(fragment, actors, job_definition.clone())?;
282
283 pb_fragments.insert(fragment.fragment_id, fragment);
284 pb_actor_status.extend(fragment_actor_status.into_iter());
285 }
286
287 let table_fragments = StreamJobFragments {
288 stream_job_id: job_id,
289 state: state as _,
290 fragments: pb_fragments,
291 actor_status: pb_actor_status,
292 ctx,
293 assigned_parallelism: match parallelism {
294 StreamingParallelism::Custom => TableParallelism::Custom,
295 StreamingParallelism::Adaptive => TableParallelism::Adaptive,
296 StreamingParallelism::Fixed(n) => TableParallelism::Fixed(n as _),
297 },
298 max_parallelism,
299 };
300
301 Ok(table_fragments)
302 }
303
304 #[allow(clippy::type_complexity)]
305 fn compose_fragment(
306 fragment: fragment::Model,
307 actors: Vec<ActorInfo>,
308 job_definition: Option<String>,
309 ) -> MetaResult<(
310 Fragment,
311 HashMap<crate::model::ActorId, PbActorStatus>,
312 HashMap<crate::model::ActorId, PbConnectorSplits>,
313 )> {
314 let fragment::Model {
315 fragment_id,
316 fragment_type_mask,
317 distribution_type,
318 stream_node,
319 state_table_ids,
320 vnode_count,
321 ..
322 } = fragment;
323
324 let stream_node = stream_node.to_protobuf();
325 let mut upstream_fragments = HashSet::new();
326 visit_stream_node_body(&stream_node, |body| {
327 if let NodeBody::Merge(m) = body {
328 assert!(
329 upstream_fragments.insert(m.upstream_fragment_id),
330 "non-duplicate upstream fragment"
331 );
332 }
333 });
334
335 let mut pb_actors = vec![];
336
337 let mut pb_actor_status = HashMap::new();
338 let mut pb_actor_splits = HashMap::new();
339
340 for actor in actors {
341 if actor.fragment_id != fragment_id {
342 bail!(
343 "fragment id {} from actor {} is different from fragment {}",
344 actor.fragment_id,
345 actor.actor_id,
346 fragment_id
347 )
348 }
349
350 let ActorInfo {
351 actor_id,
352 fragment_id,
353 worker_id,
354 splits,
355 vnode_bitmap,
356 expr_context,
357 config_override,
358 ..
359 } = actor;
360
361 let vnode_bitmap =
362 vnode_bitmap.map(|vnode_bitmap| Bitmap::from(vnode_bitmap.to_protobuf()));
363 let pb_expr_context = Some(expr_context.to_protobuf());
364
365 pb_actor_status.insert(
366 actor_id as _,
367 PbActorStatus {
368 location: PbActorLocation::from_worker(worker_id),
369 },
370 );
371
372 pb_actor_splits.insert(actor_id as _, splits.to_protobuf());
373
374 pb_actors.push(StreamActor {
375 actor_id: actor_id as _,
376 fragment_id: fragment_id as _,
377 vnode_bitmap,
378 mview_definition: job_definition.clone().unwrap_or("".to_owned()),
379 expr_context: pb_expr_context,
380 config_override,
381 })
382 }
383
384 let pb_state_table_ids = state_table_ids.0;
385 let pb_distribution_type = PbFragmentDistributionType::from(distribution_type) as _;
386 let pb_fragment = Fragment {
387 fragment_id: fragment_id as _,
388 fragment_type_mask: fragment_type_mask.into(),
389 distribution_type: pb_distribution_type,
390 actors: pb_actors,
391 state_table_ids: pb_state_table_ids,
392 maybe_vnode_count: VnodeCount::set(vnode_count).to_protobuf(),
393 nodes: stream_node,
394 };
395
396 Ok((pb_fragment, pb_actor_status, pb_actor_splits))
397 }
398
399 pub fn running_fragment_parallelisms(
400 &self,
401 id_filter: Option<HashSet<FragmentId>>,
402 ) -> MetaResult<HashMap<FragmentId, FragmentParallelismInfo>> {
403 let info = self.env.shared_actor_infos().read_guard();
404
405 let mut result = HashMap::new();
406 for (fragment_id, fragment) in info.iter_over_fragments() {
407 if let Some(id_filter) = &id_filter
408 && !id_filter.contains(&(*fragment_id as _))
409 {
410 continue; }
412
413 result.insert(
414 *fragment_id as _,
415 FragmentParallelismInfo {
416 distribution_type: fragment.distribution_type.into(),
417 actor_count: fragment.actors.len() as _,
418 vnode_count: fragment.vnode_count,
419 },
420 );
421 }
422
423 Ok(result)
424 }
425
426 pub async fn fragment_job_mapping(&self) -> MetaResult<HashMap<FragmentId, JobId>> {
427 let inner = self.inner.read().await;
428 let fragment_jobs: Vec<(FragmentId, JobId)> = FragmentModel::find()
429 .select_only()
430 .columns([fragment::Column::FragmentId, fragment::Column::JobId])
431 .into_tuple()
432 .all(&inner.db)
433 .await?;
434 Ok(fragment_jobs.into_iter().collect())
435 }
436
437 pub async fn get_fragment_job_id(
438 &self,
439 fragment_ids: Vec<FragmentId>,
440 ) -> MetaResult<Vec<ObjectId>> {
441 let inner = self.inner.read().await;
442
443 let object_ids: Vec<ObjectId> = FragmentModel::find()
444 .select_only()
445 .column(fragment::Column::JobId)
446 .filter(fragment::Column::FragmentId.is_in(fragment_ids))
447 .into_tuple()
448 .all(&inner.db)
449 .await?;
450
451 Ok(object_ids)
452 }
453
454 pub async fn get_fragment_desc_by_id(
455 &self,
456 fragment_id: FragmentId,
457 ) -> MetaResult<Option<(FragmentDesc, Vec<FragmentId>)>> {
458 let inner = self.inner.read().await;
459
460 let fragment_model = match FragmentModel::find_by_id(fragment_id)
461 .one(&inner.db)
462 .await?
463 {
464 Some(fragment) => fragment,
465 None => return Ok(None),
466 };
467
468 let job_parallelism: Option<StreamingParallelism> =
469 StreamingJob::find_by_id(fragment_model.job_id)
470 .select_only()
471 .column(streaming_job::Column::Parallelism)
472 .into_tuple()
473 .one(&inner.db)
474 .await?;
475
476 let upstream_entries: Vec<(FragmentId, DispatcherType)> = FragmentRelation::find()
477 .select_only()
478 .columns([
479 fragment_relation::Column::SourceFragmentId,
480 fragment_relation::Column::DispatcherType,
481 ])
482 .filter(fragment_relation::Column::TargetFragmentId.eq(fragment_id))
483 .into_tuple()
484 .all(&inner.db)
485 .await?;
486
487 let upstreams: Vec<_> = upstream_entries
488 .into_iter()
489 .map(|(source_id, _)| source_id)
490 .collect();
491
492 let root_fragment_map = find_fragment_no_shuffle_dags_detailed(&inner.db, &[fragment_id])
493 .await
494 .map(Self::collect_root_fragment_mapping)?;
495 let root_fragments = root_fragment_map
496 .get(&fragment_id)
497 .cloned()
498 .unwrap_or_default();
499
500 let info = self.env.shared_actor_infos().read_guard();
501 let SharedFragmentInfo { actors, .. } = info
502 .get_fragment(fragment_model.fragment_id as _)
503 .unwrap_or_else(|| {
504 panic!(
505 "Failed to retrieve fragment description: fragment {} (job_id {}) not found in shared actor info",
506 fragment_model.fragment_id,
507 fragment_model.job_id
508 )
509 });
510
511 let parallelism_policy = Self::format_fragment_parallelism_policy(
512 &fragment_model,
513 job_parallelism.as_ref(),
514 &root_fragments,
515 );
516
517 let fragment = FragmentDesc {
518 fragment_id: fragment_model.fragment_id,
519 job_id: fragment_model.job_id,
520 fragment_type_mask: fragment_model.fragment_type_mask,
521 distribution_type: fragment_model.distribution_type,
522 state_table_ids: fragment_model.state_table_ids.clone(),
523 parallelism: actors.len() as _,
524 vnode_count: fragment_model.vnode_count,
525 stream_node: fragment_model.stream_node.clone(),
526 parallelism_policy,
527 };
528
529 Ok(Some((fragment, upstreams)))
530 }
531
532 pub async fn list_fragment_database_ids(
533 &self,
534 select_fragment_ids: Option<Vec<FragmentId>>,
535 ) -> MetaResult<Vec<(FragmentId, DatabaseId)>> {
536 let inner = self.inner.read().await;
537 let select = FragmentModel::find()
538 .select_only()
539 .column(fragment::Column::FragmentId)
540 .column(object::Column::DatabaseId)
541 .join(JoinType::InnerJoin, fragment::Relation::Object.def());
542 let select = if let Some(select_fragment_ids) = select_fragment_ids {
543 select.filter(fragment::Column::FragmentId.is_in(select_fragment_ids))
544 } else {
545 select
546 };
547 Ok(select.into_tuple().all(&inner.db).await?)
548 }
549
550 pub async fn get_job_fragments_by_id(&self, job_id: JobId) -> MetaResult<StreamJobFragments> {
551 let inner = self.inner.read().await;
552
553 let fragments: Vec<_> = FragmentModel::find()
555 .filter(fragment::Column::JobId.eq(job_id))
556 .all(&inner.db)
557 .await?;
558
559 let job_info = StreamingJob::find_by_id(job_id)
560 .one(&inner.db)
561 .await?
562 .ok_or_else(|| anyhow::anyhow!("job {} not found in database", job_id))?;
563
564 let fragment_actors =
565 self.collect_fragment_actor_pairs(fragments, job_info.stream_context())?;
566
567 let job_definition = resolve_streaming_job_definition(&inner.db, &HashSet::from([job_id]))
568 .await?
569 .remove(&job_id);
570
571 Self::compose_table_fragments(
572 job_id,
573 job_info.job_status.into(),
574 job_info.stream_context(),
575 fragment_actors,
576 job_info.parallelism.clone(),
577 job_info.max_parallelism as _,
578 job_definition,
579 )
580 }
581
582 pub async fn get_fragment_actor_dispatchers(
583 &self,
584 fragment_ids: Vec<FragmentId>,
585 ) -> MetaResult<FragmentActorDispatchers> {
586 let inner = self.inner.read().await;
587
588 self.get_fragment_actor_dispatchers_txn(&inner.db, fragment_ids)
589 .await
590 }
591
592 pub async fn get_fragment_actor_dispatchers_txn(
593 &self,
594 c: &impl ConnectionTrait,
595 fragment_ids: Vec<FragmentId>,
596 ) -> MetaResult<FragmentActorDispatchers> {
597 let fragment_relations = FragmentRelation::find()
598 .filter(fragment_relation::Column::SourceFragmentId.is_in(fragment_ids))
599 .all(c)
600 .await?;
601
602 type FragmentActorInfo = (
603 DistributionType,
604 Arc<HashMap<crate::model::ActorId, Option<Bitmap>>>,
605 );
606
607 let shared_info = self.env.shared_actor_infos();
608 let mut fragment_actor_cache: HashMap<FragmentId, FragmentActorInfo> = HashMap::new();
609 let get_fragment_actors = |fragment_id: FragmentId| async move {
610 let result: MetaResult<FragmentActorInfo> = try {
611 let read_guard = shared_info.read_guard();
612
613 let fragment = read_guard.get_fragment(fragment_id as _).unwrap();
614
615 (
616 fragment.distribution_type,
617 Arc::new(
618 fragment
619 .actors
620 .iter()
621 .map(|(actor_id, actor_info)| {
622 (
623 *actor_id,
624 actor_info
625 .vnode_bitmap
626 .as_ref()
627 .map(|bitmap| Bitmap::from(bitmap.to_protobuf())),
628 )
629 })
630 .collect(),
631 ),
632 )
633 };
634 result
635 };
636
637 let mut actor_dispatchers_map: HashMap<_, HashMap<_, Vec<_>>> = HashMap::new();
638 for fragment_relation::Model {
639 source_fragment_id,
640 target_fragment_id,
641 dispatcher_type,
642 dist_key_indices,
643 output_indices,
644 output_type_mapping,
645 } in fragment_relations
646 {
647 let (source_fragment_distribution, source_fragment_actors) = {
648 let (distribution, actors) = {
649 match fragment_actor_cache.entry(source_fragment_id) {
650 Entry::Occupied(entry) => entry.into_mut(),
651 Entry::Vacant(entry) => {
652 entry.insert(get_fragment_actors(source_fragment_id).await?)
653 }
654 }
655 };
656 (*distribution, actors.clone())
657 };
658 let (target_fragment_distribution, target_fragment_actors) = {
659 let (distribution, actors) = {
660 match fragment_actor_cache.entry(target_fragment_id) {
661 Entry::Occupied(entry) => entry.into_mut(),
662 Entry::Vacant(entry) => {
663 entry.insert(get_fragment_actors(target_fragment_id).await?)
664 }
665 }
666 };
667 (*distribution, actors.clone())
668 };
669 let output_mapping = PbDispatchOutputMapping {
670 indices: output_indices.into_u32_array(),
671 types: output_type_mapping.unwrap_or_default().to_protobuf(),
672 };
673 let dispatchers = compose_dispatchers(
674 source_fragment_distribution,
675 &source_fragment_actors,
676 target_fragment_id as _,
677 target_fragment_distribution,
678 &target_fragment_actors,
679 dispatcher_type,
680 dist_key_indices.into_u32_array(),
681 output_mapping,
682 );
683 let actor_dispatchers_map = actor_dispatchers_map
684 .entry(source_fragment_id as _)
685 .or_default();
686 for (actor_id, dispatchers) in dispatchers {
687 actor_dispatchers_map
688 .entry(actor_id as _)
689 .or_default()
690 .push(dispatchers);
691 }
692 }
693 Ok(actor_dispatchers_map)
694 }
695
696 pub async fn get_fragment_downstream_relations(
697 &self,
698 fragment_ids: Vec<FragmentId>,
699 ) -> MetaResult<FragmentDownstreamRelation> {
700 let inner = self.inner.read().await;
701 let mut stream = FragmentRelation::find()
702 .filter(fragment_relation::Column::SourceFragmentId.is_in(fragment_ids))
703 .stream(&inner.db)
704 .await?;
705 let mut relations = FragmentDownstreamRelation::new();
706 while let Some(relation) = stream.try_next().await? {
707 relations
708 .entry(relation.source_fragment_id as _)
709 .or_default()
710 .push(DownstreamFragmentRelation {
711 downstream_fragment_id: relation.target_fragment_id as _,
712 dispatcher_type: relation.dispatcher_type,
713 dist_key_indices: relation.dist_key_indices.into_u32_array(),
714 output_mapping: PbDispatchOutputMapping {
715 indices: relation.output_indices.into_u32_array(),
716 types: relation
717 .output_type_mapping
718 .unwrap_or_default()
719 .to_protobuf(),
720 },
721 });
722 }
723 Ok(relations)
724 }
725
726 pub async fn get_job_fragment_backfill_scan_type(
727 &self,
728 job_id: JobId,
729 ) -> MetaResult<HashMap<crate::model::FragmentId, PbStreamScanType>> {
730 let inner = self.inner.read().await;
731 let fragments: Vec<_> = FragmentModel::find()
732 .filter(fragment::Column::JobId.eq(job_id))
733 .all(&inner.db)
734 .await?;
735
736 let mut result = HashMap::new();
737
738 for fragment::Model {
739 fragment_id,
740 stream_node,
741 ..
742 } in fragments
743 {
744 let stream_node = stream_node.to_protobuf();
745 visit_stream_node_body(&stream_node, |body| {
746 if let NodeBody::StreamScan(node) = body {
747 match node.stream_scan_type() {
748 StreamScanType::Unspecified => {}
749 scan_type => {
750 result.insert(fragment_id as crate::model::FragmentId, scan_type);
751 }
752 }
753 }
754 });
755 }
756
757 Ok(result)
758 }
759
760 pub async fn count_streaming_jobs(&self) -> MetaResult<usize> {
761 let inner = self.inner.read().await;
762 let count = StreamingJob::find().count(&inner.db).await?;
763 Ok(usize::try_from(count).context("streaming job count overflow")?)
764 }
765
766 pub async fn list_streaming_job_infos(&self) -> MetaResult<Vec<StreamingJobInfo>> {
767 let inner = self.inner.read().await;
768 let job_states = StreamingJob::find()
769 .select_only()
770 .column(streaming_job::Column::JobId)
771 .join(JoinType::InnerJoin, streaming_job::Relation::Object.def())
772 .join(JoinType::InnerJoin, object::Relation::Database2.def())
773 .column(object::Column::ObjType)
774 .join(JoinType::LeftJoin, table::Relation::Object1.def().rev())
775 .join(JoinType::LeftJoin, source::Relation::Object.def().rev())
776 .join(JoinType::LeftJoin, sink::Relation::Object.def().rev())
777 .column_as(
778 Expr::if_null(
779 Expr::col((table::Entity, table::Column::Name)),
780 Expr::if_null(
781 Expr::col((source::Entity, source::Column::Name)),
782 Expr::if_null(
783 Expr::col((sink::Entity, sink::Column::Name)),
784 Expr::val("<unknown>"),
785 ),
786 ),
787 ),
788 "name",
789 )
790 .columns([
791 streaming_job::Column::JobStatus,
792 streaming_job::Column::Parallelism,
793 streaming_job::Column::MaxParallelism,
794 ])
795 .column_as(
796 Expr::if_null(
797 Expr::col((
798 streaming_job::Entity,
799 streaming_job::Column::SpecificResourceGroup,
800 )),
801 Expr::col((database::Entity, database::Column::ResourceGroup)),
802 ),
803 "resource_group",
804 )
805 .column_as(
806 Expr::if_null(
807 Expr::col((streaming_job::Entity, streaming_job::Column::ConfigOverride)),
808 Expr::val(""),
809 ),
810 "config_override",
811 )
812 .column(object::Column::DatabaseId)
813 .column(object::Column::SchemaId)
814 .into_model()
815 .all(&inner.db)
816 .await?;
817 Ok(job_states)
818 }
819
820 pub async fn get_max_parallelism_by_id(&self, job_id: JobId) -> MetaResult<usize> {
821 let inner = self.inner.read().await;
822 let max_parallelism: i32 = StreamingJob::find_by_id(job_id)
823 .select_only()
824 .column(streaming_job::Column::MaxParallelism)
825 .into_tuple()
826 .one(&inner.db)
827 .await?
828 .ok_or_else(|| anyhow::anyhow!("job {} not found in database", job_id))?;
829 Ok(max_parallelism as usize)
830 }
831
832 pub async fn get_job_internal_table_ids(&self) -> Option<Vec<(JobId, Vec<TableId>)>> {
834 if let Ok(inner) = self.inner.try_read()
835 && let Ok(job_state_tables) = FragmentModel::find()
836 .select_only()
837 .columns([fragment::Column::JobId, fragment::Column::StateTableIds])
838 .into_tuple::<(JobId, I32Array)>()
839 .all(&inner.db)
840 .await
841 {
842 let mut job_internal_table_ids = HashMap::new();
843 for (job_id, state_table_ids) in job_state_tables {
844 job_internal_table_ids
845 .entry(job_id)
846 .or_insert_with(Vec::new)
847 .extend(
848 state_table_ids
849 .into_inner()
850 .into_iter()
851 .map(|table_id| TableId::new(table_id as _)),
852 );
853 }
854 return Some(job_internal_table_ids.into_iter().collect());
855 }
856 None
857 }
858
859 pub async fn has_any_running_jobs(&self) -> MetaResult<bool> {
860 let inner = self.inner.read().await;
861 let count = FragmentModel::find().count(&inner.db).await?;
862 Ok(count > 0)
863 }
864
865 pub fn worker_actor_count(&self) -> MetaResult<HashMap<WorkerId, usize>> {
866 let read_guard = self.env.shared_actor_infos().read_guard();
867 let actor_cnt: HashMap<WorkerId, _> = read_guard
868 .iter_over_fragments()
869 .flat_map(|(_, fragment)| {
870 fragment
871 .actors
872 .iter()
873 .map(|(actor_id, actor)| (actor.worker_id, *actor_id))
874 })
875 .into_group_map()
876 .into_iter()
877 .map(|(k, v)| (k, v.len()))
878 .collect();
879
880 Ok(actor_cnt)
881 }
882
883 fn collect_fragment_actor_map(
884 &self,
885 fragment_ids: &[FragmentId],
886 stream_context: StreamContext,
887 ) -> MetaResult<HashMap<FragmentId, Vec<ActorInfo>>> {
888 let guard = self.env.shared_actor_infos().read_guard();
889 let pb_expr_context = stream_context.to_expr_context();
890 let expr_context: ExprContext = (&pb_expr_context).into();
891
892 let mut actor_map = HashMap::with_capacity(fragment_ids.len());
893 for fragment_id in fragment_ids {
894 let fragment_info = guard.get_fragment(*fragment_id as _).ok_or_else(|| {
895 anyhow!("fragment {} not found in shared actor info", fragment_id)
896 })?;
897
898 let actors = fragment_info
899 .actors
900 .iter()
901 .map(|(actor_id, actor_info)| ActorInfo {
902 actor_id: *actor_id as _,
903 fragment_id: *fragment_id,
904 splits: ConnectorSplits::from(&PbConnectorSplits {
905 splits: actor_info.splits.iter().map(ConnectorSplit::from).collect(),
906 }),
907 worker_id: actor_info.worker_id as _,
908 vnode_bitmap: actor_info
909 .vnode_bitmap
910 .as_ref()
911 .map(|bitmap| VnodeBitmap::from(&bitmap.to_protobuf())),
912 expr_context: expr_context.clone(),
913 config_override: stream_context.config_override.clone(),
914 })
915 .collect();
916
917 actor_map.insert(*fragment_id, actors);
918 }
919
920 Ok(actor_map)
921 }
922
923 fn collect_fragment_actor_pairs(
924 &self,
925 fragments: Vec<fragment::Model>,
926 stream_context: StreamContext,
927 ) -> MetaResult<Vec<(fragment::Model, Vec<ActorInfo>)>> {
928 let fragment_ids: Vec<_> = fragments.iter().map(|f| f.fragment_id).collect();
929 let mut actor_map = self.collect_fragment_actor_map(&fragment_ids, stream_context)?;
930 fragments
931 .into_iter()
932 .map(|fragment| {
933 let actors = actor_map.remove(&fragment.fragment_id).ok_or_else(|| {
934 anyhow!(
935 "fragment {} missing in shared actor info map",
936 fragment.fragment_id
937 )
938 })?;
939 Ok((fragment, actors))
940 })
941 .collect()
942 }
943
944 pub async fn table_fragments(&self) -> MetaResult<BTreeMap<JobId, StreamJobFragments>> {
946 let inner = self.inner.read().await;
947 let jobs = StreamingJob::find().all(&inner.db).await?;
948
949 let mut job_definition = resolve_streaming_job_definition(
950 &inner.db,
951 &HashSet::from_iter(jobs.iter().map(|job| job.job_id)),
952 )
953 .await?;
954
955 let mut table_fragments = BTreeMap::new();
956 for job in jobs {
957 let fragments = FragmentModel::find()
958 .filter(fragment::Column::JobId.eq(job.job_id))
959 .all(&inner.db)
960 .await?;
961
962 let fragment_actors =
963 self.collect_fragment_actor_pairs(fragments, job.stream_context())?;
964
965 table_fragments.insert(
966 job.job_id,
967 Self::compose_table_fragments(
968 job.job_id,
969 job.job_status.into(),
970 job.stream_context(),
971 fragment_actors,
972 job.parallelism.clone(),
973 job.max_parallelism as _,
974 job_definition.remove(&job.job_id),
975 )?,
976 );
977 }
978
979 Ok(table_fragments)
980 }
981
982 pub async fn upstream_fragments(
983 &self,
984 fragment_ids: impl Iterator<Item = crate::model::FragmentId>,
985 ) -> MetaResult<HashMap<crate::model::FragmentId, HashSet<crate::model::FragmentId>>> {
986 let inner = self.inner.read().await;
987 let mut stream = FragmentRelation::find()
988 .select_only()
989 .columns([
990 fragment_relation::Column::SourceFragmentId,
991 fragment_relation::Column::TargetFragmentId,
992 ])
993 .filter(
994 fragment_relation::Column::TargetFragmentId
995 .is_in(fragment_ids.map(|id| id as FragmentId)),
996 )
997 .into_tuple::<(FragmentId, FragmentId)>()
998 .stream(&inner.db)
999 .await?;
1000 let mut upstream_fragments: HashMap<_, HashSet<_>> = HashMap::new();
1001 while let Some((upstream_fragment_id, downstream_fragment_id)) = stream.try_next().await? {
1002 upstream_fragments
1003 .entry(downstream_fragment_id as crate::model::FragmentId)
1004 .or_default()
1005 .insert(upstream_fragment_id as crate::model::FragmentId);
1006 }
1007 Ok(upstream_fragments)
1008 }
1009
1010 pub fn list_actor_locations(&self) -> MetaResult<Vec<PartialActorLocation>> {
1011 let info = self.env.shared_actor_infos().read_guard();
1012
1013 let actor_locations = info
1014 .iter_over_fragments()
1015 .flat_map(|(fragment_id, fragment)| {
1016 fragment
1017 .actors
1018 .iter()
1019 .map(|(actor_id, actor)| PartialActorLocation {
1020 actor_id: *actor_id as _,
1021 fragment_id: *fragment_id as _,
1022 worker_id: actor.worker_id,
1023 })
1024 })
1025 .collect_vec();
1026
1027 Ok(actor_locations)
1028 }
1029
1030 pub async fn list_actor_info(
1031 &self,
1032 ) -> MetaResult<Vec<(ActorId, FragmentId, ObjectId, SchemaId, ObjectType)>> {
1033 let inner = self.inner.read().await;
1034
1035 let fragment_objects: Vec<(FragmentId, ObjectId, SchemaId, ObjectType)> =
1036 FragmentModel::find()
1037 .select_only()
1038 .join(JoinType::LeftJoin, fragment::Relation::Object.def())
1039 .column(fragment::Column::FragmentId)
1040 .column_as(object::Column::Oid, "job_id")
1041 .column_as(object::Column::SchemaId, "schema_id")
1042 .column_as(object::Column::ObjType, "type")
1043 .into_tuple()
1044 .all(&inner.db)
1045 .await?;
1046
1047 let actor_infos = {
1048 let info = self.env.shared_actor_infos().read_guard();
1049
1050 let mut result = Vec::new();
1051
1052 for (fragment_id, object_id, schema_id, object_type) in fragment_objects {
1053 let Some(fragment) = info.get_fragment(fragment_id as _) else {
1054 return Err(MetaError::unavailable(format!(
1055 "shared actor info missing for fragment {fragment_id} while listing actors"
1056 )));
1057 };
1058
1059 for actor_id in fragment.actors.keys() {
1060 result.push((
1061 *actor_id as _,
1062 fragment.fragment_id as _,
1063 object_id,
1064 schema_id,
1065 object_type,
1066 ));
1067 }
1068 }
1069
1070 result
1071 };
1072
1073 Ok(actor_infos)
1074 }
1075
1076 pub fn get_worker_slot_mappings(&self) -> Vec<PbFragmentWorkerSlotMapping> {
1077 let guard = self.env.shared_actor_info.read_guard();
1078 guard
1079 .iter_over_fragments()
1080 .map(|(_, fragment)| rebuild_fragment_mapping(fragment))
1081 .collect_vec()
1082 }
1083
1084 pub async fn list_fragment_descs(
1085 &self,
1086 is_creating: bool,
1087 ) -> MetaResult<Vec<(FragmentDistribution, Vec<FragmentId>)>> {
1088 let inner = self.inner.read().await;
1089
1090 let txn = inner.db.begin().await?;
1091
1092 let fragments: Vec<_> = if is_creating {
1093 FragmentModel::find()
1094 .join(JoinType::LeftJoin, fragment::Relation::Object.def())
1095 .join(JoinType::LeftJoin, object::Relation::StreamingJob.def())
1096 .filter(
1097 streaming_job::Column::JobStatus
1098 .eq(JobStatus::Initial)
1099 .or(streaming_job::Column::JobStatus.eq(JobStatus::Creating)),
1100 )
1101 .all(&txn)
1102 .await?
1103 } else {
1104 FragmentModel::find().all(&txn).await?
1105 };
1106
1107 let fragment_ids = fragments.iter().map(|f| f.fragment_id).collect_vec();
1108
1109 let job_parallelisms: HashMap<JobId, StreamingParallelism> = if fragments.is_empty() {
1110 HashMap::new()
1111 } else {
1112 let job_ids = fragments.iter().map(|f| f.job_id).unique().collect_vec();
1113 StreamingJob::find()
1114 .select_only()
1115 .columns([
1116 streaming_job::Column::JobId,
1117 streaming_job::Column::Parallelism,
1118 ])
1119 .filter(streaming_job::Column::JobId.is_in(job_ids))
1120 .into_tuple()
1121 .all(&txn)
1122 .await?
1123 .into_iter()
1124 .collect()
1125 };
1126
1127 let upstream_entries: Vec<(FragmentId, FragmentId, DispatcherType)> =
1128 if fragment_ids.is_empty() {
1129 Vec::new()
1130 } else {
1131 FragmentRelation::find()
1132 .select_only()
1133 .columns([
1134 fragment_relation::Column::TargetFragmentId,
1135 fragment_relation::Column::SourceFragmentId,
1136 fragment_relation::Column::DispatcherType,
1137 ])
1138 .filter(fragment_relation::Column::TargetFragmentId.is_in(fragment_ids.clone()))
1139 .into_tuple()
1140 .all(&txn)
1141 .await?
1142 };
1143
1144 let mut all_upstreams: HashMap<FragmentId, Vec<FragmentId>> = HashMap::new();
1145 for (target_id, source_id, _) in upstream_entries {
1146 all_upstreams.entry(target_id).or_default().push(source_id);
1147 }
1148
1149 let root_fragment_map = if fragment_ids.is_empty() {
1150 HashMap::new()
1151 } else {
1152 let ensembles = find_fragment_no_shuffle_dags_detailed(&txn, &fragment_ids).await?;
1153 Self::collect_root_fragment_mapping(ensembles)
1154 };
1155
1156 let guard = self.env.shared_actor_info.read_guard();
1157
1158 let mut result = Vec::new();
1159
1160 for fragment_desc in fragments {
1161 let parallelism = guard
1163 .get_fragment(fragment_desc.fragment_id as _)
1164 .map(|fragment| fragment.actors.len())
1165 .unwrap_or_default();
1166
1167 let root_fragments = root_fragment_map
1168 .get(&fragment_desc.fragment_id)
1169 .cloned()
1170 .unwrap_or_default();
1171
1172 let upstreams = all_upstreams
1173 .remove(&fragment_desc.fragment_id)
1174 .unwrap_or_default();
1175
1176 let parallelism_policy = Self::format_fragment_parallelism_policy(
1177 &fragment_desc,
1178 job_parallelisms.get(&fragment_desc.job_id),
1179 &root_fragments,
1180 );
1181
1182 let fragment = FragmentDistribution {
1183 fragment_id: fragment_desc.fragment_id,
1184 table_id: fragment_desc.job_id,
1185 distribution_type: PbFragmentDistributionType::from(fragment_desc.distribution_type)
1186 as _,
1187 state_table_ids: fragment_desc.state_table_ids.0,
1188 upstream_fragment_ids: upstreams.clone(),
1189 fragment_type_mask: fragment_desc.fragment_type_mask as _,
1190 parallelism: parallelism as _,
1191 vnode_count: fragment_desc.vnode_count as _,
1192 node: Some(fragment_desc.stream_node.to_protobuf()),
1193 parallelism_policy,
1194 };
1195
1196 result.push((fragment, upstreams));
1197 }
1198 Ok(result)
1199 }
1200
1201 fn collect_root_fragment_mapping(
1203 ensembles: Vec<NoShuffleEnsemble>,
1204 ) -> HashMap<FragmentId, Vec<FragmentId>> {
1205 let mut mapping = HashMap::new();
1206
1207 for ensemble in ensembles {
1208 let mut roots: Vec<_> = ensemble.entry_fragments().collect();
1209 roots.sort_unstable();
1210 roots.dedup();
1211
1212 if roots.is_empty() {
1213 continue;
1214 }
1215
1216 let root_set: HashSet<_> = roots.iter().copied().collect();
1217
1218 for fragment_id in ensemble.component_fragments() {
1219 if root_set.contains(&fragment_id) {
1220 mapping.insert(fragment_id, Vec::new());
1221 } else {
1222 mapping.insert(fragment_id, roots.clone());
1223 }
1224 }
1225 }
1226
1227 mapping
1228 }
1229
1230 fn format_fragment_parallelism_policy(
1231 fragment: &fragment::Model,
1232 job_parallelism: Option<&StreamingParallelism>,
1233 root_fragments: &[FragmentId],
1234 ) -> String {
1235 if fragment.distribution_type == DistributionType::Single {
1236 return "single".to_owned();
1237 }
1238
1239 if let Some(parallelism) = fragment.parallelism.as_ref() {
1240 return format!(
1241 "override({})",
1242 Self::format_streaming_parallelism(parallelism)
1243 );
1244 }
1245
1246 if !root_fragments.is_empty() {
1247 let mut upstreams = root_fragments.to_vec();
1248 upstreams.sort_unstable();
1249 upstreams.dedup();
1250
1251 return format!("upstream_fragment({upstreams:?})");
1252 }
1253
1254 let inherited = job_parallelism
1255 .map(Self::format_streaming_parallelism)
1256 .unwrap_or_else(|| "unknown".to_owned());
1257 format!("inherit({inherited})")
1258 }
1259
1260 fn format_streaming_parallelism(parallelism: &StreamingParallelism) -> String {
1261 match parallelism {
1262 StreamingParallelism::Adaptive => "adaptive".to_owned(),
1263 StreamingParallelism::Fixed(n) => format!("fixed({n})"),
1264 StreamingParallelism::Custom => "custom".to_owned(),
1265 }
1266 }
1267
1268 pub async fn list_sink_actor_mapping(
1269 &self,
1270 ) -> MetaResult<HashMap<SinkId, (String, Vec<ActorId>)>> {
1271 let inner = self.inner.read().await;
1272 let sink_id_names: Vec<(SinkId, String)> = Sink::find()
1273 .select_only()
1274 .columns([sink::Column::SinkId, sink::Column::Name])
1275 .into_tuple()
1276 .all(&inner.db)
1277 .await?;
1278 let (sink_ids, _): (Vec<_>, Vec<_>) = sink_id_names.iter().cloned().unzip();
1279
1280 let sink_name_mapping: HashMap<SinkId, String> = sink_id_names.into_iter().collect();
1281
1282 let actor_with_type: Vec<(ActorId, SinkId)> = {
1283 let info = self.env.shared_actor_infos().read_guard();
1284
1285 info.iter_over_fragments()
1286 .filter(|(_, fragment)| {
1287 sink_ids.contains(&fragment.job_id.as_sink_id())
1288 && fragment.fragment_type_mask.contains(FragmentTypeFlag::Sink)
1289 })
1290 .flat_map(|(_, fragment)| {
1291 fragment
1292 .actors
1293 .keys()
1294 .map(move |actor_id| (*actor_id as _, fragment.job_id.as_sink_id()))
1295 })
1296 .collect()
1297 };
1298
1299 let mut sink_actor_mapping = HashMap::new();
1300 for (actor_id, sink_id) in actor_with_type {
1301 sink_actor_mapping
1302 .entry(sink_id)
1303 .or_insert_with(|| (sink_name_mapping.get(&sink_id).unwrap().clone(), vec![]))
1304 .1
1305 .push(actor_id);
1306 }
1307
1308 Ok(sink_actor_mapping)
1309 }
1310
1311 pub async fn list_fragment_state_tables(&self) -> MetaResult<Vec<PartialFragmentStateTables>> {
1312 let inner = self.inner.read().await;
1313 let fragment_state_tables: Vec<PartialFragmentStateTables> = FragmentModel::find()
1314 .select_only()
1315 .columns([
1316 fragment::Column::FragmentId,
1317 fragment::Column::JobId,
1318 fragment::Column::StateTableIds,
1319 ])
1320 .into_partial_model()
1321 .all(&inner.db)
1322 .await?;
1323 Ok(fragment_state_tables)
1324 }
1325
1326 pub async fn load_all_actors_dynamic(
1329 &self,
1330 database_id: Option<DatabaseId>,
1331 worker_nodes: &ActiveStreamingWorkerNodes,
1332 ) -> MetaResult<FragmentRenderMap> {
1333 let loaded = self.load_fragment_context(database_id).await?;
1334
1335 if loaded.is_empty() {
1336 return Ok(HashMap::new());
1337 }
1338
1339 let adaptive_parallelism_strategy = {
1340 let system_params_reader = self.env.system_params_reader().await;
1341 system_params_reader.adaptive_parallelism_strategy()
1342 };
1343
1344 let available_workers: BTreeMap<_, _> = worker_nodes
1345 .current()
1346 .values()
1347 .filter(|worker| worker.is_streaming_schedulable())
1348 .map(|worker| {
1349 (
1350 worker.id,
1351 WorkerInfo {
1352 parallelism: NonZeroUsize::new(worker.compute_node_parallelism()).unwrap(),
1353 resource_group: worker.resource_group(),
1354 },
1355 )
1356 })
1357 .collect();
1358
1359 let RenderedGraph { fragments, .. } = render_actor_assignments(
1360 self.env.actor_id_generator(),
1361 &available_workers,
1362 adaptive_parallelism_strategy,
1363 &loaded,
1364 )?;
1365
1366 tracing::trace!(?fragments, "reload all actors");
1367
1368 Ok(fragments)
1369 }
1370
1371 pub async fn load_fragment_context(
1373 &self,
1374 database_id: Option<DatabaseId>,
1375 ) -> MetaResult<LoadedFragmentContext> {
1376 let inner = self.inner.read().await;
1377 let txn = inner.db.begin().await?;
1378
1379 let mut query = StreamingJob::find()
1380 .select_only()
1381 .column(streaming_job::Column::JobId);
1382
1383 if let Some(database_id) = database_id {
1384 query = query
1385 .join(JoinType::InnerJoin, streaming_job::Relation::Object.def())
1386 .filter(object::Column::DatabaseId.eq(database_id));
1387 }
1388
1389 let jobs: Vec<JobId> = query.into_tuple().all(&txn).await?;
1390
1391 let jobs: HashSet<JobId> = jobs.into_iter().collect();
1392
1393 if jobs.is_empty() {
1394 return Ok(LoadedFragmentContext::default());
1395 }
1396
1397 load_fragment_context_for_jobs(&txn, jobs).await
1398 }
1399
1400 #[await_tree::instrument]
1401 pub async fn fill_snapshot_backfill_epoch(
1402 &self,
1403 fragment_ids: impl Iterator<Item = FragmentId>,
1404 snapshot_backfill_info: Option<&SnapshotBackfillInfo>,
1405 cross_db_snapshot_backfill_info: &SnapshotBackfillInfo,
1406 ) -> MetaResult<()> {
1407 let inner = self.inner.write().await;
1408 let txn = inner.db.begin().await?;
1409 for fragment_id in fragment_ids {
1410 let fragment = FragmentModel::find_by_id(fragment_id)
1411 .one(&txn)
1412 .await?
1413 .context(format!("fragment {} not found", fragment_id))?;
1414 let mut node = fragment.stream_node.to_protobuf();
1415 if crate::stream::fill_snapshot_backfill_epoch(
1416 &mut node,
1417 snapshot_backfill_info,
1418 cross_db_snapshot_backfill_info,
1419 )? {
1420 let node = StreamNode::from(&node);
1421 FragmentModel::update(fragment::ActiveModel {
1422 fragment_id: Set(fragment_id),
1423 stream_node: Set(node),
1424 ..Default::default()
1425 })
1426 .exec(&txn)
1427 .await?;
1428 }
1429 }
1430 txn.commit().await?;
1431 Ok(())
1432 }
1433
1434 pub fn get_running_actors_of_fragment(
1436 &self,
1437 fragment_id: FragmentId,
1438 ) -> MetaResult<HashSet<model::ActorId>> {
1439 let info = self.env.shared_actor_infos().read_guard();
1440
1441 let actors = info
1442 .get_fragment(fragment_id as _)
1443 .map(|SharedFragmentInfo { actors, .. }| actors.keys().copied().collect())
1444 .unwrap_or_default();
1445
1446 Ok(actors)
1447 }
1448
1449 pub async fn get_running_actors_for_source_backfill(
1452 &self,
1453 source_backfill_fragment_id: FragmentId,
1454 source_fragment_id: FragmentId,
1455 ) -> MetaResult<Vec<(ActorId, ActorId)>> {
1456 let inner = self.inner.read().await;
1457 let txn = inner.db.begin().await?;
1458
1459 let fragment_relation: DispatcherType = FragmentRelation::find()
1460 .select_only()
1461 .column(fragment_relation::Column::DispatcherType)
1462 .filter(fragment_relation::Column::SourceFragmentId.eq(source_fragment_id))
1463 .filter(fragment_relation::Column::TargetFragmentId.eq(source_backfill_fragment_id))
1464 .into_tuple()
1465 .one(&txn)
1466 .await?
1467 .ok_or_else(|| {
1468 anyhow!(
1469 "no fragment connection from source fragment {} to source backfill fragment {}",
1470 source_fragment_id,
1471 source_backfill_fragment_id
1472 )
1473 })?;
1474
1475 if fragment_relation != DispatcherType::NoShuffle {
1476 return Err(anyhow!("expect NoShuffle but get {:?}", fragment_relation).into());
1477 }
1478
1479 let load_fragment_distribution_type = |txn, fragment_id: FragmentId| async move {
1480 let result: MetaResult<DistributionType> = try {
1481 FragmentModel::find_by_id(fragment_id)
1482 .select_only()
1483 .column(fragment::Column::DistributionType)
1484 .into_tuple()
1485 .one(txn)
1486 .await?
1487 .ok_or_else(|| anyhow!("failed to find fragment: {}", fragment_id))?
1488 };
1489 result
1490 };
1491
1492 let source_backfill_distribution_type =
1493 load_fragment_distribution_type(&txn, source_backfill_fragment_id).await?;
1494 let source_distribution_type =
1495 load_fragment_distribution_type(&txn, source_fragment_id).await?;
1496
1497 let load_fragment_actor_distribution =
1498 |actor_info: &SharedActorInfos,
1499 fragment_id: FragmentId|
1500 -> HashMap<crate::model::ActorId, Option<Bitmap>> {
1501 let guard = actor_info.read_guard();
1502
1503 guard
1504 .get_fragment(fragment_id as _)
1505 .map(|fragment| {
1506 fragment
1507 .actors
1508 .iter()
1509 .map(|(actor_id, actor)| {
1510 (
1511 *actor_id as _,
1512 actor
1513 .vnode_bitmap
1514 .as_ref()
1515 .map(|bitmap| Bitmap::from(bitmap.to_protobuf())),
1516 )
1517 })
1518 .collect()
1519 })
1520 .unwrap_or_default()
1521 };
1522
1523 let source_backfill_actors: HashMap<crate::model::ActorId, Option<Bitmap>> =
1524 load_fragment_actor_distribution(
1525 self.env.shared_actor_infos(),
1526 source_backfill_fragment_id,
1527 );
1528
1529 let source_actors =
1530 load_fragment_actor_distribution(self.env.shared_actor_infos(), source_fragment_id);
1531
1532 Ok(resolve_no_shuffle_actor_dispatcher(
1533 source_distribution_type,
1534 &source_actors,
1535 source_backfill_distribution_type,
1536 &source_backfill_actors,
1537 )
1538 .into_iter()
1539 .map(|(source_actor, source_backfill_actor)| {
1540 (source_backfill_actor as _, source_actor as _)
1541 })
1542 .collect())
1543 }
1544
1545 pub async fn get_root_fragments(
1558 &self,
1559 job_ids: Vec<JobId>,
1560 ) -> MetaResult<(
1561 HashMap<JobId, (SharedFragmentInfo, PbStreamNode)>,
1562 HashMap<ActorId, WorkerId>,
1563 )> {
1564 let inner = self.inner.read().await;
1565
1566 let all_fragments = FragmentModel::find()
1567 .filter(fragment::Column::JobId.is_in(job_ids))
1568 .all(&inner.db)
1569 .await?;
1570 let mut root_fragments = HashMap::<JobId, fragment::Model>::new();
1572 for fragment in all_fragments {
1573 let mask = FragmentTypeMask::from(fragment.fragment_type_mask);
1574 if mask.contains_any([FragmentTypeFlag::Mview, FragmentTypeFlag::Sink]) {
1575 _ = root_fragments.insert(fragment.job_id, fragment);
1576 } else if mask.contains(FragmentTypeFlag::Source) {
1577 _ = root_fragments.try_insert(fragment.job_id, fragment);
1580 }
1581 }
1582
1583 let mut root_fragments_pb = HashMap::new();
1584
1585 let info = self.env.shared_actor_infos().read_guard();
1586
1587 let root_fragment_to_jobs: HashMap<_, _> = root_fragments
1588 .iter()
1589 .map(|(job_id, fragment)| (fragment.fragment_id, *job_id))
1590 .collect();
1591
1592 for fragment in root_fragment_to_jobs.keys() {
1593 let fragment_info = info.get_fragment(*fragment).context(format!(
1594 "root fragment {} not found in shared actor info",
1595 fragment
1596 ))?;
1597
1598 let job_id = root_fragment_to_jobs[&fragment_info.fragment_id];
1599 let fragment = root_fragments
1600 .get(&job_id)
1601 .context(format!("root fragment for job {} not found", job_id))?;
1602
1603 root_fragments_pb.insert(
1604 job_id,
1605 (fragment_info.clone(), fragment.stream_node.to_protobuf()),
1606 );
1607 }
1608
1609 let mut all_actor_locations = HashMap::new();
1610
1611 for (_, SharedFragmentInfo { actors, .. }) in info.iter_over_fragments() {
1612 for (actor_id, actor_info) in actors {
1613 all_actor_locations.insert(*actor_id as ActorId, actor_info.worker_id);
1614 }
1615 }
1616
1617 Ok((root_fragments_pb, all_actor_locations))
1618 }
1619
1620 pub async fn get_root_fragment(
1621 &self,
1622 job_id: JobId,
1623 ) -> MetaResult<(SharedFragmentInfo, HashMap<ActorId, WorkerId>)> {
1624 let (mut root_fragments, actors) = self.get_root_fragments(vec![job_id]).await?;
1625 let (root_fragment, _) = root_fragments
1626 .remove(&job_id)
1627 .context(format!("root fragment for job {} not found", job_id))?;
1628
1629 Ok((root_fragment, actors))
1630 }
1631
1632 pub async fn get_downstream_fragments(
1634 &self,
1635 job_id: JobId,
1636 ) -> MetaResult<(
1637 Vec<(
1638 stream_plan::DispatcherType,
1639 SharedFragmentInfo,
1640 PbStreamNode,
1641 )>,
1642 HashMap<ActorId, WorkerId>,
1643 )> {
1644 let (root_fragment, actor_locations) = self.get_root_fragment(job_id).await?;
1645
1646 let inner = self.inner.read().await;
1647 let txn = inner.db.begin().await?;
1648 let downstream_fragment_relations: Vec<fragment_relation::Model> = FragmentRelation::find()
1649 .filter(
1650 fragment_relation::Column::SourceFragmentId
1651 .eq(root_fragment.fragment_id as FragmentId),
1652 )
1653 .all(&txn)
1654 .await?;
1655
1656 let downstream_fragment_ids = downstream_fragment_relations
1657 .iter()
1658 .map(|model| model.target_fragment_id as FragmentId)
1659 .collect::<HashSet<_>>();
1660
1661 let downstream_fragment_nodes: Vec<(FragmentId, StreamNode)> = FragmentModel::find()
1662 .select_only()
1663 .columns([fragment::Column::FragmentId, fragment::Column::StreamNode])
1664 .filter(fragment::Column::FragmentId.is_in(downstream_fragment_ids))
1665 .into_tuple()
1666 .all(&txn)
1667 .await?;
1668
1669 let downstream_fragment_nodes: HashMap<_, _> =
1670 downstream_fragment_nodes.into_iter().collect();
1671
1672 let mut downstream_fragments = vec![];
1673
1674 let info = self.env.shared_actor_infos().read_guard();
1675
1676 let fragment_map: HashMap<_, _> = downstream_fragment_relations
1677 .iter()
1678 .map(|model| (model.target_fragment_id, model.dispatcher_type))
1679 .collect();
1680
1681 for fragment_id in fragment_map.keys() {
1682 let fragment_info @ SharedFragmentInfo { actors, .. } =
1683 info.get_fragment(*fragment_id).unwrap();
1684
1685 let dispatcher_type = fragment_map[fragment_id];
1686
1687 if actors.is_empty() {
1688 bail!("No fragment found for fragment id {}", fragment_id);
1689 }
1690
1691 let dispatch_type = PbDispatcherType::from(dispatcher_type);
1692
1693 let nodes = downstream_fragment_nodes
1694 .get(fragment_id)
1695 .context(format!(
1696 "downstream fragment node for id {} not found",
1697 fragment_id
1698 ))?
1699 .to_protobuf();
1700
1701 downstream_fragments.push((dispatch_type, fragment_info.clone(), nodes));
1702 }
1703 Ok((downstream_fragments, actor_locations))
1704 }
1705
1706 pub async fn load_source_fragment_ids(
1707 &self,
1708 ) -> MetaResult<HashMap<SourceId, BTreeSet<FragmentId>>> {
1709 let inner = self.inner.read().await;
1710 let fragments: Vec<(FragmentId, StreamNode)> = FragmentModel::find()
1711 .select_only()
1712 .columns([fragment::Column::FragmentId, fragment::Column::StreamNode])
1713 .filter(FragmentTypeMask::intersects(FragmentTypeFlag::Source))
1714 .into_tuple()
1715 .all(&inner.db)
1716 .await?;
1717
1718 let mut source_fragment_ids = HashMap::new();
1719 for (fragment_id, stream_node) in fragments {
1720 if let Some(source_id) = stream_node.to_protobuf().find_stream_source() {
1721 source_fragment_ids
1722 .entry(source_id)
1723 .or_insert_with(BTreeSet::new)
1724 .insert(fragment_id);
1725 }
1726 }
1727 Ok(source_fragment_ids)
1728 }
1729
1730 pub async fn load_backfill_fragment_ids(
1731 &self,
1732 ) -> MetaResult<HashMap<SourceId, BTreeSet<(FragmentId, FragmentId)>>> {
1733 let inner = self.inner.read().await;
1734 let fragments: Vec<(FragmentId, StreamNode)> = FragmentModel::find()
1735 .select_only()
1736 .columns([fragment::Column::FragmentId, fragment::Column::StreamNode])
1737 .filter(FragmentTypeMask::intersects(FragmentTypeFlag::SourceScan))
1738 .into_tuple()
1739 .all(&inner.db)
1740 .await?;
1741
1742 let mut source_fragment_ids = HashMap::new();
1743 for (fragment_id, stream_node) in fragments {
1744 if let Some((source_id, upstream_source_fragment_id)) =
1745 stream_node.to_protobuf().find_source_backfill()
1746 {
1747 source_fragment_ids
1748 .entry(source_id)
1749 .or_insert_with(BTreeSet::new)
1750 .insert((fragment_id, upstream_source_fragment_id));
1751 }
1752 }
1753 Ok(source_fragment_ids)
1754 }
1755
1756 pub async fn get_all_upstream_sink_infos(
1757 &self,
1758 target_table: &PbTable,
1759 target_fragment_id: FragmentId,
1760 ) -> MetaResult<Vec<UpstreamSinkInfo>> {
1761 let incoming_sinks = self.get_table_incoming_sinks(target_table.id).await?;
1762
1763 let inner = self.inner.read().await;
1764 let txn = inner.db.begin().await?;
1765
1766 let sink_ids = incoming_sinks.iter().map(|s| s.id).collect_vec();
1767 let sink_fragment_ids = get_sink_fragment_by_ids(&txn, sink_ids).await?;
1768
1769 let mut upstream_sink_infos = Vec::with_capacity(incoming_sinks.len());
1770 for pb_sink in &incoming_sinks {
1771 let sink_fragment_id =
1772 sink_fragment_ids
1773 .get(&pb_sink.id)
1774 .cloned()
1775 .ok_or(anyhow::anyhow!(
1776 "sink fragment not found for sink id {}",
1777 pb_sink.id
1778 ))?;
1779 let upstream_info = build_upstream_sink_info(
1780 pb_sink,
1781 sink_fragment_id,
1782 target_table,
1783 target_fragment_id,
1784 )?;
1785 upstream_sink_infos.push(upstream_info);
1786 }
1787
1788 Ok(upstream_sink_infos)
1789 }
1790
1791 pub async fn get_mview_fragment_by_id(&self, job_id: JobId) -> MetaResult<FragmentId> {
1792 let inner = self.inner.read().await;
1793 let txn = inner.db.begin().await?;
1794
1795 let mview_fragment: Vec<FragmentId> = FragmentModel::find()
1796 .select_only()
1797 .column(fragment::Column::FragmentId)
1798 .filter(
1799 fragment::Column::JobId
1800 .eq(job_id)
1801 .and(FragmentTypeMask::intersects(FragmentTypeFlag::Mview)),
1802 )
1803 .into_tuple()
1804 .all(&txn)
1805 .await?;
1806
1807 if mview_fragment.len() != 1 {
1808 return Err(anyhow::anyhow!(
1809 "expected exactly one mview fragment for job {}, found {}",
1810 job_id,
1811 mview_fragment.len()
1812 )
1813 .into());
1814 }
1815
1816 Ok(mview_fragment.into_iter().next().unwrap())
1817 }
1818
1819 pub async fn has_table_been_migrated(&self, table_id: TableId) -> MetaResult<bool> {
1820 let inner = self.inner.read().await;
1821 let txn = inner.db.begin().await?;
1822 has_table_been_migrated(&txn, table_id).await
1823 }
1824
1825 pub async fn update_fragment_splits<C>(
1826 &self,
1827 txn: &C,
1828 fragment_splits: &HashMap<FragmentId, Vec<SplitImpl>>,
1829 ) -> MetaResult<()>
1830 where
1831 C: ConnectionTrait,
1832 {
1833 if fragment_splits.is_empty() {
1834 return Ok(());
1835 }
1836
1837 let models: Vec<fragment_splits::ActiveModel> = fragment_splits
1838 .iter()
1839 .map(|(fragment_id, splits)| fragment_splits::ActiveModel {
1840 fragment_id: Set(*fragment_id as _),
1841 splits: Set(Some(ConnectorSplits::from(&PbConnectorSplits {
1842 splits: splits.iter().map(Into::into).collect_vec(),
1843 }))),
1844 })
1845 .collect();
1846
1847 FragmentSplits::insert_many(models)
1848 .on_conflict(
1849 OnConflict::column(fragment_splits::Column::FragmentId)
1850 .update_column(fragment_splits::Column::Splits)
1851 .to_owned(),
1852 )
1853 .exec(txn)
1854 .await?;
1855
1856 Ok(())
1857 }
1858}
1859
1860#[cfg(test)]
1861mod tests {
1862 use std::collections::{BTreeMap, HashMap, HashSet};
1863
1864 use itertools::Itertools;
1865 use risingwave_common::catalog::{FragmentTypeFlag, FragmentTypeMask};
1866 use risingwave_common::hash::{ActorMapping, VirtualNode, VnodeCount};
1867 use risingwave_common::id::JobId;
1868 use risingwave_common::util::iter_util::ZipEqDebug;
1869 use risingwave_common::util::stream_graph_visitor::visit_stream_node_body;
1870 use risingwave_meta_model::fragment::DistributionType;
1871 use risingwave_meta_model::*;
1872 use risingwave_pb::meta::table_fragments::fragment::PbFragmentDistributionType;
1873 use risingwave_pb::plan_common::PbExprContext;
1874 use risingwave_pb::source::{PbConnectorSplit, PbConnectorSplits};
1875 use risingwave_pb::stream_plan::stream_node::PbNodeBody;
1876 use risingwave_pb::stream_plan::{MergeNode, PbStreamNode, PbUnionNode};
1877
1878 use super::ActorInfo;
1879 use crate::MetaResult;
1880 use crate::controller::catalog::CatalogController;
1881 use crate::model::{Fragment, StreamActor};
1882
1883 type ActorUpstreams = BTreeMap<crate::model::FragmentId, HashSet<crate::model::ActorId>>;
1884
1885 type FragmentActorUpstreams = HashMap<crate::model::ActorId, ActorUpstreams>;
1886
1887 const TEST_FRAGMENT_ID: FragmentId = FragmentId::new(1);
1888
1889 const TEST_UPSTREAM_FRAGMENT_ID: FragmentId = FragmentId::new(2);
1890
1891 const TEST_JOB_ID: JobId = JobId::new(1);
1892
1893 const TEST_STATE_TABLE_ID: TableId = TableId::new(1000);
1894
1895 fn generate_upstream_actor_ids_for_actor(actor_id: ActorId) -> ActorUpstreams {
1896 let mut upstream_actor_ids = BTreeMap::new();
1897 upstream_actor_ids.insert(
1898 TEST_UPSTREAM_FRAGMENT_ID,
1899 HashSet::from_iter([(actor_id + 100)]),
1900 );
1901 upstream_actor_ids.insert(
1902 (TEST_UPSTREAM_FRAGMENT_ID + 1) as _,
1903 HashSet::from_iter([(actor_id + 200)]),
1904 );
1905 upstream_actor_ids
1906 }
1907
1908 fn generate_merger_stream_node(actor_upstream_actor_ids: &ActorUpstreams) -> PbStreamNode {
1909 let mut input = vec![];
1910 for &upstream_fragment_id in actor_upstream_actor_ids.keys() {
1911 input.push(PbStreamNode {
1912 node_body: Some(PbNodeBody::Merge(Box::new(MergeNode {
1913 upstream_fragment_id,
1914 ..Default::default()
1915 }))),
1916 ..Default::default()
1917 });
1918 }
1919
1920 PbStreamNode {
1921 input,
1922 node_body: Some(PbNodeBody::Union(PbUnionNode {})),
1923 ..Default::default()
1924 }
1925 }
1926
1927 #[tokio::test]
1928 async fn test_extract_fragment() -> MetaResult<()> {
1929 let actor_count = 3u32;
1930 let upstream_actor_ids: FragmentActorUpstreams = (0..actor_count)
1931 .map(|actor_id| {
1932 (
1933 actor_id.into(),
1934 generate_upstream_actor_ids_for_actor(actor_id.into()),
1935 )
1936 })
1937 .collect();
1938
1939 let actor_bitmaps = ActorMapping::new_uniform(
1940 (0..actor_count).map(|i| i.into()),
1941 VirtualNode::COUNT_FOR_TEST,
1942 )
1943 .to_bitmaps();
1944
1945 let stream_node = generate_merger_stream_node(upstream_actor_ids.values().next().unwrap());
1946
1947 let pb_actors = (0..actor_count)
1948 .map(|actor_id| StreamActor {
1949 actor_id: actor_id.into(),
1950 fragment_id: TEST_FRAGMENT_ID as _,
1951 vnode_bitmap: actor_bitmaps.get(&actor_id.into()).cloned(),
1952 mview_definition: "".to_owned(),
1953 expr_context: Some(PbExprContext {
1954 time_zone: String::from("America/New_York"),
1955 strict_mode: false,
1956 }),
1957 config_override: "".into(),
1958 })
1959 .collect_vec();
1960
1961 let pb_fragment = Fragment {
1962 fragment_id: TEST_FRAGMENT_ID as _,
1963 fragment_type_mask: FragmentTypeMask::from(FragmentTypeFlag::Source as u32),
1964 distribution_type: PbFragmentDistributionType::Hash as _,
1965 actors: pb_actors,
1966 state_table_ids: vec![TEST_STATE_TABLE_ID as _],
1967 maybe_vnode_count: VnodeCount::for_test().to_protobuf(),
1968 nodes: stream_node,
1969 };
1970
1971 let fragment =
1972 CatalogController::prepare_fragment_model_for_new_job(TEST_JOB_ID, &pb_fragment)?;
1973
1974 check_fragment(fragment, pb_fragment);
1975
1976 Ok(())
1977 }
1978
1979 #[tokio::test]
1980 async fn test_compose_fragment() -> MetaResult<()> {
1981 let actor_count = 3u32;
1982
1983 let upstream_actor_ids: FragmentActorUpstreams = (0..actor_count)
1984 .map(|actor_id| {
1985 (
1986 actor_id.into(),
1987 generate_upstream_actor_ids_for_actor(actor_id.into()),
1988 )
1989 })
1990 .collect();
1991
1992 let mut actor_bitmaps = ActorMapping::new_uniform(
1993 (0..actor_count).map(|i| i.into()),
1994 VirtualNode::COUNT_FOR_TEST,
1995 )
1996 .to_bitmaps();
1997
1998 let actors = (0..actor_count)
1999 .map(|actor_id| {
2000 let actor_splits = ConnectorSplits::from(&PbConnectorSplits {
2001 splits: vec![PbConnectorSplit {
2002 split_type: "dummy".to_owned(),
2003 ..Default::default()
2004 }],
2005 });
2006
2007 ActorInfo {
2008 actor_id: actor_id.into(),
2009 fragment_id: TEST_FRAGMENT_ID,
2010 splits: actor_splits,
2011 worker_id: 0.into(),
2012 vnode_bitmap: actor_bitmaps
2013 .remove(&actor_id.into())
2014 .map(|bitmap| bitmap.to_protobuf())
2015 .as_ref()
2016 .map(VnodeBitmap::from),
2017 expr_context: ExprContext::from(&PbExprContext {
2018 time_zone: String::from("America/New_York"),
2019 strict_mode: false,
2020 }),
2021 config_override: "a.b.c = true".into(),
2022 }
2023 })
2024 .collect_vec();
2025
2026 let stream_node = {
2027 let template_actor = actors.first().cloned().unwrap();
2028
2029 let template_upstream_actor_ids = upstream_actor_ids
2030 .get(&(template_actor.actor_id as _))
2031 .unwrap();
2032
2033 generate_merger_stream_node(template_upstream_actor_ids)
2034 };
2035
2036 #[expect(deprecated)]
2037 let fragment = fragment::Model {
2038 fragment_id: TEST_FRAGMENT_ID,
2039 job_id: TEST_JOB_ID,
2040 fragment_type_mask: 0,
2041 distribution_type: DistributionType::Hash,
2042 stream_node: StreamNode::from(&stream_node),
2043 state_table_ids: TableIdArray(vec![TEST_STATE_TABLE_ID]),
2044 upstream_fragment_id: Default::default(),
2045 vnode_count: VirtualNode::COUNT_FOR_TEST as _,
2046 parallelism: None,
2047 };
2048
2049 let (pb_fragment, pb_actor_status, pb_actor_splits) =
2050 CatalogController::compose_fragment(fragment.clone(), actors.clone(), None).unwrap();
2051
2052 assert_eq!(pb_actor_status.len(), actor_count as usize);
2053 assert!(
2054 pb_actor_status
2055 .values()
2056 .all(|actor_status| actor_status.location.is_some())
2057 );
2058 assert_eq!(pb_actor_splits.len(), actor_count as usize);
2059
2060 let pb_actors = pb_fragment.actors.clone();
2061
2062 check_fragment(fragment, pb_fragment);
2063 check_actors(
2064 actors,
2065 &upstream_actor_ids,
2066 pb_actors,
2067 pb_actor_splits,
2068 &stream_node,
2069 );
2070
2071 Ok(())
2072 }
2073
2074 fn check_actors(
2075 actors: Vec<ActorInfo>,
2076 actor_upstreams: &FragmentActorUpstreams,
2077 pb_actors: Vec<StreamActor>,
2078 pb_actor_splits: HashMap<ActorId, PbConnectorSplits>,
2079 stream_node: &PbStreamNode,
2080 ) {
2081 for (
2082 ActorInfo {
2083 actor_id,
2084 fragment_id,
2085 splits,
2086 worker_id: _,
2087 vnode_bitmap,
2088 expr_context,
2089 ..
2090 },
2091 StreamActor {
2092 actor_id: pb_actor_id,
2093 fragment_id: pb_fragment_id,
2094 vnode_bitmap: pb_vnode_bitmap,
2095 mview_definition,
2096 expr_context: pb_expr_context,
2097 ..
2098 },
2099 ) in actors.into_iter().zip_eq_debug(pb_actors.into_iter())
2100 {
2101 assert_eq!(actor_id, pb_actor_id as ActorId);
2102 assert_eq!(fragment_id, pb_fragment_id as FragmentId);
2103
2104 assert_eq!(
2105 vnode_bitmap.map(|bitmap| bitmap.to_protobuf().into()),
2106 pb_vnode_bitmap,
2107 );
2108
2109 assert_eq!(mview_definition, "");
2110
2111 visit_stream_node_body(stream_node, |body| {
2112 if let PbNodeBody::Merge(m) = body {
2113 assert!(
2114 actor_upstreams
2115 .get(&(actor_id as _))
2116 .unwrap()
2117 .contains_key(&m.upstream_fragment_id)
2118 );
2119 }
2120 });
2121
2122 assert_eq!(
2123 splits,
2124 pb_actor_splits
2125 .get(&pb_actor_id)
2126 .map(ConnectorSplits::from)
2127 .unwrap_or_default()
2128 );
2129
2130 assert_eq!(Some(expr_context.to_protobuf()), pb_expr_context);
2131 }
2132 }
2133
2134 fn check_fragment(fragment: fragment::Model, pb_fragment: Fragment) {
2135 let Fragment {
2136 fragment_id,
2137 fragment_type_mask,
2138 distribution_type: pb_distribution_type,
2139 actors: _,
2140 state_table_ids: pb_state_table_ids,
2141 maybe_vnode_count: _,
2142 nodes,
2143 } = pb_fragment;
2144
2145 assert_eq!(fragment_id, TEST_FRAGMENT_ID);
2146 assert_eq!(fragment_type_mask, fragment.fragment_type_mask.into());
2147 assert_eq!(
2148 pb_distribution_type,
2149 PbFragmentDistributionType::from(fragment.distribution_type)
2150 );
2151
2152 assert_eq!(pb_state_table_ids, fragment.state_table_ids.0);
2153 assert_eq!(fragment.stream_node.to_protobuf(), nodes);
2154 }
2155
2156 #[test]
2157 fn test_parallelism_policy_with_root_fragments() {
2158 #[expect(deprecated)]
2159 let fragment = fragment::Model {
2160 fragment_id: 3.into(),
2161 job_id: TEST_JOB_ID,
2162 fragment_type_mask: 0,
2163 distribution_type: DistributionType::Hash,
2164 stream_node: StreamNode::from(&PbStreamNode::default()),
2165 state_table_ids: TableIdArray::default(),
2166 upstream_fragment_id: Default::default(),
2167 vnode_count: 0,
2168 parallelism: None,
2169 };
2170
2171 let job_parallelism = StreamingParallelism::Fixed(4);
2172
2173 let policy = super::CatalogController::format_fragment_parallelism_policy(
2174 &fragment,
2175 Some(&job_parallelism),
2176 &[],
2177 );
2178
2179 assert_eq!(policy, "inherit(fixed(4))");
2180 }
2181
2182 #[test]
2183 fn test_parallelism_policy_with_upstream_roots() {
2184 #[expect(deprecated)]
2185 let fragment = fragment::Model {
2186 fragment_id: 5.into(),
2187 job_id: TEST_JOB_ID,
2188 fragment_type_mask: 0,
2189 distribution_type: DistributionType::Hash,
2190 stream_node: StreamNode::from(&PbStreamNode::default()),
2191 state_table_ids: TableIdArray::default(),
2192 upstream_fragment_id: Default::default(),
2193 vnode_count: 0,
2194 parallelism: None,
2195 };
2196
2197 let policy = super::CatalogController::format_fragment_parallelism_policy(
2198 &fragment,
2199 None,
2200 &[3.into(), 1.into(), 2.into(), 1.into()],
2201 );
2202
2203 assert_eq!(policy, "upstream_fragment([1, 2, 3])");
2204 }
2205}