1use std::sync::Arc;
16use std::time::Duration;
17
18use otlp_embedded::TraceServiceServer;
19use regex::Regex;
20use risingwave_common::monitor::{RouterExt, TcpConfig};
21use risingwave_common::secret::LocalSecretManager;
22use risingwave_common::session_config::SessionConfig;
23use risingwave_common::system_param::reader::SystemParamsRead;
24use risingwave_common::telemetry::manager::TelemetryManager;
25use risingwave_common::telemetry::{report_scarf_enabled, report_to_scarf, telemetry_env_enabled};
26use risingwave_common::util::tokio_util::sync::CancellationToken;
27use risingwave_common_service::{MetricsManager, TracingExtractLayer};
28use risingwave_meta::MetaStoreBackend;
29use risingwave_meta::barrier::GlobalBarrierManager;
30use risingwave_meta::controller::catalog::CatalogController;
31use risingwave_meta::controller::cluster::ClusterController;
32use risingwave_meta::hummock::IcebergCompactorManager;
33use risingwave_meta::manager::iceberg_compaction::IcebergCompactionManager;
34use risingwave_meta::manager::{META_NODE_ID, MetadataManager};
35use risingwave_meta::rpc::ElectionClientRef;
36use risingwave_meta::rpc::election::dummy::DummyElectionClient;
37use risingwave_meta::rpc::intercept::MetricsMiddlewareLayer;
38use risingwave_meta::stream::{GlobalRefreshManager, ScaleController};
39use risingwave_meta_service::AddressInfo;
40use risingwave_meta_service::backup_service::BackupServiceImpl;
41use risingwave_meta_service::cloud_service::CloudServiceImpl;
42use risingwave_meta_service::cluster_limit_service::ClusterLimitServiceImpl;
43use risingwave_meta_service::cluster_service::ClusterServiceImpl;
44use risingwave_meta_service::ddl_service::DdlServiceImpl;
45use risingwave_meta_service::event_log_service::EventLogServiceImpl;
46use risingwave_meta_service::health_service::HealthServiceImpl;
47use risingwave_meta_service::heartbeat_service::HeartbeatServiceImpl;
48use risingwave_meta_service::hosted_iceberg_catalog_service::HostedIcebergCatalogServiceImpl;
49use risingwave_meta_service::hummock_service::HummockServiceImpl;
50use risingwave_meta_service::meta_member_service::MetaMemberServiceImpl;
51use risingwave_meta_service::monitor_service::MonitorServiceImpl;
52use risingwave_meta_service::notification_service::NotificationServiceImpl;
53use risingwave_meta_service::scale_service::ScaleServiceImpl;
54use risingwave_meta_service::serving_service::ServingServiceImpl;
55use risingwave_meta_service::session_config::SessionParamsServiceImpl;
56use risingwave_meta_service::sink_coordination_service::SinkCoordinationServiceImpl;
57use risingwave_meta_service::stream_service::StreamServiceImpl;
58use risingwave_meta_service::system_params_service::SystemParamsServiceImpl;
59use risingwave_meta_service::telemetry_service::TelemetryInfoServiceImpl;
60use risingwave_meta_service::user_service::UserServiceImpl;
61use risingwave_pb::backup_service::backup_service_server::BackupServiceServer;
62use risingwave_pb::cloud_service::cloud_service_server::CloudServiceServer;
63use risingwave_pb::connector_service::sink_coordination_service_server::SinkCoordinationServiceServer;
64use risingwave_pb::ddl_service::ddl_service_server::DdlServiceServer;
65use risingwave_pb::health::health_server::HealthServer;
66use risingwave_pb::hummock::hummock_manager_service_server::HummockManagerServiceServer;
67use risingwave_pb::meta::SystemParams;
68use risingwave_pb::meta::cluster_limit_service_server::ClusterLimitServiceServer;
69use risingwave_pb::meta::cluster_service_server::ClusterServiceServer;
70use risingwave_pb::meta::event_log_service_server::EventLogServiceServer;
71use risingwave_pb::meta::heartbeat_service_server::HeartbeatServiceServer;
72use risingwave_pb::meta::hosted_iceberg_catalog_service_server::HostedIcebergCatalogServiceServer;
73use risingwave_pb::meta::meta_member_service_server::MetaMemberServiceServer;
74use risingwave_pb::meta::notification_service_server::NotificationServiceServer;
75use risingwave_pb::meta::scale_service_server::ScaleServiceServer;
76use risingwave_pb::meta::serving_service_server::ServingServiceServer;
77use risingwave_pb::meta::session_param_service_server::SessionParamServiceServer;
78use risingwave_pb::meta::stream_manager_service_server::StreamManagerServiceServer;
79use risingwave_pb::meta::system_params_service_server::SystemParamsServiceServer;
80use risingwave_pb::meta::telemetry_info_service_server::TelemetryInfoServiceServer;
81use risingwave_pb::monitor_service::monitor_service_server::MonitorServiceServer;
82use risingwave_pb::user::user_service_server::UserServiceServer;
83use sea_orm::{ConnectionTrait, DbBackend};
84use thiserror_ext::AsReport;
85use tokio::sync::watch;
86
87use crate::backup_restore::BackupManager;
88use crate::barrier::BarrierScheduler;
89use crate::controller::SqlMetaStore;
90use crate::controller::system_param::SystemParamsController;
91use crate::hummock::HummockManager;
92use crate::manager::sink_coordination::SinkCoordinatorManager;
93use crate::manager::{IdleManager, MetaOpts, MetaSrvEnv};
94use crate::rpc::election::sql::{MySqlDriver, PostgresDriver, SqlBackendElectionClient};
95use crate::rpc::metrics::{GLOBAL_META_METRICS, start_info_monitor, start_worker_info_monitor};
96use crate::serving::ServingVnodeMapping;
97use crate::stream::{GlobalStreamManager, SourceManager};
98use crate::telemetry::{MetaReportCreator, MetaTelemetryInfoFetcher};
99use crate::{MetaError, MetaResult, hummock, serving};
100
101pub mod started {
104 use std::sync::atomic::AtomicBool;
105 use std::sync::atomic::Ordering::Relaxed;
106
107 static STARTED: AtomicBool = AtomicBool::new(false);
108
109 pub(crate) fn set() {
111 STARTED.store(true, Relaxed);
112 }
113
114 pub fn get() -> bool {
116 STARTED.load(Relaxed)
117 }
118}
119
120pub async fn rpc_serve(
124 address_info: AddressInfo,
125 meta_store_backend: MetaStoreBackend,
126 max_cluster_heartbeat_interval: Duration,
127 lease_interval_secs: u64,
128 server_config: risingwave_common::config::ServerConfig,
129 opts: MetaOpts,
130 init_system_params: SystemParams,
131 init_session_config: SessionConfig,
132 shutdown: CancellationToken,
133) -> MetaResult<()> {
134 let meta_store_impl = SqlMetaStore::connect(meta_store_backend.clone()).await?;
135
136 let election_client = match meta_store_backend {
137 MetaStoreBackend::Mem => {
138 Arc::new(DummyElectionClient::new(
140 address_info.advertise_addr.clone(),
141 ))
142 }
143 MetaStoreBackend::Sql { .. } => {
144 let id = address_info.advertise_addr.clone();
146 let conn = meta_store_impl.conn.clone();
147 let election_client: ElectionClientRef = match conn.get_database_backend() {
148 DbBackend::Sqlite => Arc::new(DummyElectionClient::new(id)),
149 DbBackend::Postgres => {
150 Arc::new(SqlBackendElectionClient::new(id, PostgresDriver::new(conn)))
151 }
152 DbBackend::MySql => {
153 Arc::new(SqlBackendElectionClient::new(id, MySqlDriver::new(conn)))
154 }
155 };
156 election_client.init().await?;
157
158 election_client
159 }
160 };
161
162 rpc_serve_with_store(
163 meta_store_impl,
164 election_client,
165 address_info,
166 max_cluster_heartbeat_interval,
167 lease_interval_secs,
168 server_config,
169 opts,
170 init_system_params,
171 init_session_config,
172 shutdown,
173 )
174 .await
175}
176
177pub async fn rpc_serve_with_store(
182 meta_store_impl: SqlMetaStore,
183 election_client: ElectionClientRef,
184 address_info: AddressInfo,
185 max_cluster_heartbeat_interval: Duration,
186 lease_interval_secs: u64,
187 server_config: risingwave_common::config::ServerConfig,
188 opts: MetaOpts,
189 init_system_params: SystemParams,
190 init_session_config: SessionConfig,
191 shutdown: CancellationToken,
192) -> MetaResult<()> {
193 let (election_shutdown_tx, election_shutdown_rx) = watch::channel(());
195
196 let election_handle = tokio::spawn({
197 let shutdown = shutdown.clone();
198 let election_client = election_client.clone();
199
200 async move {
201 while let Err(e) = election_client
202 .run_once(lease_interval_secs as i64, election_shutdown_rx.clone())
203 .await
204 {
205 tracing::error!(error = %e.as_report(), "election error happened");
206 }
207 shutdown.cancel();
209 }
210 });
211
212 if !election_client.is_leader() {
217 let follower_shutdown = shutdown.child_token();
219
220 let follower_handle = tokio::spawn(start_service_as_election_follower(
221 follower_shutdown.clone(),
222 address_info.clone(),
223 election_client.clone(),
224 ));
225
226 let mut is_leader_watcher = election_client.subscribe();
228
229 while !*is_leader_watcher.borrow_and_update() {
230 tokio::select! {
231 _ = shutdown.cancelled() => return Ok(()),
233
234 res = is_leader_watcher.changed() => {
235 if res.is_err() {
236 tracing::error!("leader watcher recv failed");
237 }
238 }
239 }
240 }
241
242 tracing::info!("elected as leader, shutting down follower services");
243 follower_shutdown.cancel();
244 let _ = follower_handle.await;
245 }
246
247 let result = start_service_as_election_leader(
249 meta_store_impl,
250 address_info,
251 max_cluster_heartbeat_interval,
252 opts,
253 init_system_params,
254 init_session_config,
255 server_config,
256 election_client,
257 shutdown,
258 )
259 .await;
260
261 election_shutdown_tx.send(()).ok();
263 let _ = election_handle.await;
264
265 result
266}
267
268pub async fn start_service_as_election_follower(
272 shutdown: CancellationToken,
273 address_info: AddressInfo,
274 election_client: ElectionClientRef,
275) {
276 tracing::info!("starting follower services");
277
278 let meta_member_srv = MetaMemberServiceImpl::new(election_client);
279
280 let health_srv = HealthServiceImpl::new();
281
282 let server = tonic::transport::Server::builder()
283 .layer(MetricsMiddlewareLayer::new(Arc::new(
284 GLOBAL_META_METRICS.clone(),
285 )))
286 .layer(TracingExtractLayer::new())
287 .add_service(MetaMemberServiceServer::new(meta_member_srv))
288 .add_service(HealthServer::new(health_srv))
289 .monitored_serve_with_shutdown(
290 address_info.listen_addr,
291 "grpc-meta-follower-service",
292 TcpConfig {
293 tcp_nodelay: true,
294 keepalive_duration: None,
295 },
296 shutdown.clone().cancelled_owned(),
297 );
298 let server_handle = tokio::spawn(server);
299 started::set();
300
301 shutdown.cancelled().await;
303 let _ = server_handle.await;
306}
307
308pub async fn start_service_as_election_leader(
312 meta_store_impl: SqlMetaStore,
313 address_info: AddressInfo,
314 max_cluster_heartbeat_interval: Duration,
315 opts: MetaOpts,
316 init_system_params: SystemParams,
317 init_session_config: SessionConfig,
318 server_config: risingwave_common::config::ServerConfig,
319 election_client: ElectionClientRef,
320 shutdown: CancellationToken,
321) -> MetaResult<()> {
322 tracing::info!("starting leader services");
323
324 let env = MetaSrvEnv::new(
325 opts.clone(),
326 init_system_params,
327 init_session_config,
328 meta_store_impl,
329 )
330 .await?;
331 tracing::info!("MetaSrvEnv started");
332 let _ = env.may_start_watch_license_key_file()?;
333 let system_params_reader = env.system_params_reader().await;
334
335 let data_directory = system_params_reader.data_directory();
336 if !is_correct_data_directory(data_directory) {
337 return Err(MetaError::system_params(format!(
338 "The data directory {:?} is misconfigured.
339 Please use a combination of uppercase and lowercase letters and numbers, i.e. [a-z, A-Z, 0-9].
340 The string cannot start or end with '/', and consecutive '/' are not allowed.
341 The data directory cannot be empty and its length should not exceed 800 characters.",
342 data_directory
343 )));
344 }
345
346 let cluster_controller = Arc::new(
347 ClusterController::new(env.clone(), max_cluster_heartbeat_interval)
348 .await
349 .unwrap(),
350 );
351 let catalog_controller = Arc::new(CatalogController::new(env.clone()).await?);
352 let metadata_manager = MetadataManager::new(cluster_controller, catalog_controller);
353
354 let serving_vnode_mapping = Arc::new(ServingVnodeMapping::default());
355 let max_serving_parallelism = env
356 .session_params_manager_impl_ref()
357 .get_params()
358 .await
359 .batch_parallelism()
360 .map(|p| p.get());
361 serving::on_meta_start(
362 env.notification_manager_ref(),
363 &metadata_manager,
364 serving_vnode_mapping.clone(),
365 max_serving_parallelism,
366 )
367 .await;
368
369 let compactor_manager = Arc::new(
370 hummock::CompactorManager::with_meta(env.clone())
371 .await
372 .unwrap(),
373 );
374 tracing::info!("CompactorManager started");
375
376 let heartbeat_srv = HeartbeatServiceImpl::new(metadata_manager.clone());
377 tracing::info!("HeartbeatServiceImpl started");
378
379 let (compactor_streams_change_tx, compactor_streams_change_rx) =
380 tokio::sync::mpsc::unbounded_channel();
381
382 let meta_metrics = Arc::new(GLOBAL_META_METRICS.clone());
383
384 let hummock_manager = hummock::HummockManager::new(
385 env.clone(),
386 metadata_manager.clone(),
387 meta_metrics.clone(),
388 compactor_manager.clone(),
389 compactor_streams_change_tx,
390 )
391 .await
392 .unwrap();
393 tracing::info!("HummockManager started");
394 let object_store_media_type = hummock_manager.object_store_media_type();
395
396 let meta_member_srv = MetaMemberServiceImpl::new(election_client.clone());
397
398 let prometheus_client = opts.prometheus_endpoint.as_ref().map(|x| {
399 use std::str::FromStr;
400 prometheus_http_query::Client::from_str(x).unwrap()
401 });
402 let prometheus_selector = opts.prometheus_selector.unwrap_or_default();
403 let diagnose_command = Arc::new(risingwave_meta::manager::diagnose::DiagnoseCommand::new(
404 metadata_manager.clone(),
405 env.await_tree_reg().clone(),
406 hummock_manager.clone(),
407 env.event_log_manager_ref(),
408 prometheus_client.clone(),
409 prometheus_selector.clone(),
410 opts.redact_sql_option_keywords.clone(),
411 env.system_params_manager_impl_ref(),
412 ));
413
414 let trace_state = otlp_embedded::State::new(otlp_embedded::Config {
415 max_length: opts.cached_traces_num,
416 max_memory_usage: opts.cached_traces_memory_limit_bytes,
417 });
418 let trace_srv = otlp_embedded::TraceServiceImpl::new(trace_state.clone());
419
420 #[cfg(not(madsim))]
421 let _dashboard_task = if let Some(ref dashboard_addr) = address_info.dashboard_addr {
422 use risingwave_common::config::RpcClientConfig;
423 use risingwave_rpc_client::MonitorClientPool;
424
425 let dashboard_service = crate::dashboard::DashboardService {
426 await_tree_reg: env.await_tree_reg().clone(),
427 dashboard_addr: *dashboard_addr,
428 prometheus_client,
429 prometheus_selector,
430 metadata_manager: metadata_manager.clone(),
431 hummock_manager: hummock_manager.clone(),
432 monitor_clients: MonitorClientPool::new(1, RpcClientConfig::default()),
433 diagnose_command,
434 profile_service: risingwave_common_service::ProfileServiceImpl::new(
435 server_config.clone(),
436 ),
437 trace_state,
438 };
439 let task = tokio::spawn(dashboard_service.serve());
440 Some(task)
441 } else {
442 None
443 };
444
445 let (barrier_scheduler, scheduled_barriers) =
446 BarrierScheduler::new_pair(hummock_manager.clone(), meta_metrics.clone());
447 tracing::info!("BarrierScheduler started");
448
449 let backup_manager = BackupManager::new(
451 env.clone(),
452 hummock_manager.clone(),
453 meta_metrics.clone(),
454 system_params_reader.backup_storage_url(),
455 system_params_reader.backup_storage_directory(),
456 )
457 .await?;
458 tracing::info!("BackupManager started");
459
460 LocalSecretManager::init(
461 opts.temp_secret_file_dir,
462 env.cluster_id().to_string(),
463 META_NODE_ID,
464 );
465 tracing::info!("LocalSecretManager started");
466
467 let notification_srv = NotificationServiceImpl::new(
468 env.clone(),
469 metadata_manager.clone(),
470 hummock_manager.clone(),
471 backup_manager.clone(),
472 serving_vnode_mapping.clone(),
473 )
474 .await?;
475 tracing::info!("NotificationServiceImpl started");
476
477 let source_manager = Arc::new(
478 SourceManager::new(
479 barrier_scheduler.clone(),
480 metadata_manager.clone(),
481 meta_metrics.clone(),
482 env.clone(),
483 )
484 .await?,
485 );
486 tracing::info!("SourceManager started");
487
488 let (iceberg_compaction_stat_tx, iceberg_compaction_stat_rx) =
489 tokio::sync::mpsc::unbounded_channel();
490 let (sink_manager, shutdown_handle) = SinkCoordinatorManager::start_worker(
491 env.meta_store_ref().conn.clone(),
492 hummock_manager.clone(),
493 metadata_manager.clone(),
494 iceberg_compaction_stat_tx,
495 );
496 tracing::info!("SinkCoordinatorManager started");
497 let mut sub_tasks = vec![shutdown_handle];
499
500 let iceberg_compactor_manager = Arc::new(IcebergCompactorManager::new());
501
502 let (iceberg_compaction_mgr, iceberg_compactor_event_rx) = IcebergCompactionManager::build(
504 env.clone(),
505 metadata_manager.clone(),
506 iceberg_compactor_manager.clone(),
507 meta_metrics.clone(),
508 );
509
510 sub_tasks.push(IcebergCompactionManager::compaction_stat_loop(
511 iceberg_compaction_mgr.clone(),
512 iceberg_compaction_stat_rx,
513 ));
514
515 sub_tasks.push(IcebergCompactionManager::gc_loop(
516 iceberg_compaction_mgr.clone(),
517 env.opts.iceberg_gc_interval_sec,
518 ));
519
520 let refresh_scheduler_interval = Duration::from_secs(env.opts.refresh_scheduler_interval_sec);
521 let (refresh_manager, refresh_handle, refresh_shutdown) = GlobalRefreshManager::start(
522 metadata_manager.clone(),
523 barrier_scheduler.clone(),
524 &env,
525 refresh_scheduler_interval,
526 )
527 .await?;
528 sub_tasks.push((refresh_handle, refresh_shutdown));
529
530 let scale_controller = Arc::new(ScaleController::new(
531 &metadata_manager,
532 source_manager.clone(),
533 env.clone(),
534 ));
535
536 let (barrier_manager, join_handle, shutdown_rx) = GlobalBarrierManager::start(
537 scheduled_barriers,
538 env.clone(),
539 metadata_manager.clone(),
540 hummock_manager.clone(),
541 source_manager.clone(),
542 sink_manager.clone(),
543 scale_controller.clone(),
544 barrier_scheduler.clone(),
545 refresh_manager.clone(),
546 )
547 .await;
548 tracing::info!("GlobalBarrierManager started");
549 sub_tasks.push((join_handle, shutdown_rx));
550
551 {
552 let source_manager = source_manager.clone();
553 tokio::spawn(async move {
554 source_manager.run().await.unwrap();
555 });
556 }
557
558 let stream_manager = Arc::new(
559 GlobalStreamManager::new(
560 env.clone(),
561 metadata_manager.clone(),
562 barrier_scheduler.clone(),
563 source_manager.clone(),
564 scale_controller.clone(),
565 )
566 .unwrap(),
567 );
568
569 hummock_manager
570 .may_fill_backward_state_table_info()
571 .await
572 .unwrap();
573
574 let ddl_srv = DdlServiceImpl::new(
575 env.clone(),
576 metadata_manager.clone(),
577 stream_manager.clone(),
578 source_manager.clone(),
579 barrier_manager.clone(),
580 sink_manager.clone(),
581 meta_metrics.clone(),
582 iceberg_compaction_mgr.clone(),
583 barrier_scheduler.clone(),
584 )
585 .await;
586
587 if env.opts.enable_legacy_table_migration {
588 sub_tasks.push(ddl_srv.start_migrate_table_fragments());
589 }
590
591 let user_srv = UserServiceImpl::new(metadata_manager.clone());
592
593 let scale_srv = ScaleServiceImpl::new(
594 metadata_manager.clone(),
595 stream_manager.clone(),
596 barrier_manager.clone(),
597 env.clone(),
598 );
599
600 let cluster_srv = ClusterServiceImpl::new(metadata_manager.clone(), barrier_manager.clone());
601 let stream_srv = StreamServiceImpl::new(
602 env.clone(),
603 barrier_scheduler.clone(),
604 barrier_manager.clone(),
605 stream_manager.clone(),
606 metadata_manager.clone(),
607 refresh_manager.clone(),
608 );
609 let sink_coordination_srv = SinkCoordinationServiceImpl::new(sink_manager);
610 let hummock_srv = HummockServiceImpl::new(
611 hummock_manager.clone(),
612 metadata_manager.clone(),
613 backup_manager.clone(),
614 iceberg_compaction_mgr.clone(),
615 );
616
617 let health_srv = HealthServiceImpl::new();
618 let backup_srv = BackupServiceImpl::new(backup_manager.clone());
619 let telemetry_srv = TelemetryInfoServiceImpl::new(env.meta_store());
620 let system_params_srv = SystemParamsServiceImpl::new(
621 env.system_params_manager_impl_ref(),
622 env.opts.license_key_path.is_some(),
623 );
624 let session_params_srv = SessionParamsServiceImpl::new(env.session_params_manager_impl_ref());
625 let serving_srv =
626 ServingServiceImpl::new(serving_vnode_mapping.clone(), metadata_manager.clone());
627 let cloud_srv = CloudServiceImpl::new();
628 let event_log_srv = EventLogServiceImpl::new(env.event_log_manager_ref());
629 let cluster_limit_srv = ClusterLimitServiceImpl::new(env.clone(), metadata_manager.clone());
630 let hosted_iceberg_catalog_srv = HostedIcebergCatalogServiceImpl::new(env.clone());
631 let monitor_srv = MonitorServiceImpl::new(
632 metadata_manager.clone(),
633 env.await_tree_reg().clone(),
634 server_config,
635 );
636
637 if let Some(prometheus_addr) = address_info.prometheus_addr {
638 MetricsManager::boot_metrics_service(prometheus_addr.to_string())
639 }
640
641 sub_tasks.extend(hummock::start_hummock_workers(
643 hummock_manager.clone(),
644 backup_manager.clone(),
645 &env.opts,
646 ));
647 sub_tasks.push(start_worker_info_monitor(
648 metadata_manager.clone(),
649 election_client.clone(),
650 Duration::from_secs(env.opts.node_num_monitor_interval_sec),
651 meta_metrics.clone(),
652 ));
653 sub_tasks.push(start_info_monitor(
654 metadata_manager.clone(),
655 hummock_manager.clone(),
656 env.system_params_manager_impl_ref(),
657 meta_metrics.clone(),
658 ));
659 sub_tasks.push(SystemParamsController::start_params_notifier(
660 env.system_params_manager_impl_ref(),
661 ));
662 sub_tasks.push(HummockManager::hummock_timer_task(
663 hummock_manager.clone(),
664 Some(backup_manager),
665 ));
666 sub_tasks.extend(HummockManager::compaction_event_loop(
667 hummock_manager.clone(),
668 compactor_streams_change_rx,
669 ));
670
671 sub_tasks.extend(IcebergCompactionManager::iceberg_compaction_event_loop(
672 iceberg_compaction_mgr.clone(),
673 iceberg_compactor_event_rx,
674 ));
675
676 sub_tasks.push(serving::start_serving_vnode_mapping_worker(
677 env.notification_manager_ref(),
678 metadata_manager.clone(),
679 serving_vnode_mapping,
680 env.session_params_manager_impl_ref(),
681 ));
682
683 {
684 sub_tasks.push(ClusterController::start_heartbeat_checker(
685 metadata_manager.cluster_controller.clone(),
686 Duration::from_secs(1),
687 ));
688
689 if !env.opts.disable_automatic_parallelism_control {
690 sub_tasks.push(stream_manager.start_auto_parallelism_monitor());
691 }
692 }
693
694 let _idle_checker_handle = IdleManager::start_idle_checker(
695 env.idle_manager_ref(),
696 Duration::from_secs(30),
697 shutdown.clone(),
698 );
699
700 let (abort_sender, abort_recv) = tokio::sync::oneshot::channel();
701 let notification_mgr = env.notification_manager_ref();
702 let stream_abort_handler = tokio::spawn(async move {
703 let _ = abort_recv.await;
704 notification_mgr.abort_all();
705 compactor_manager.abort_all_compactors();
706 });
707 sub_tasks.push((stream_abort_handler, abort_sender));
708
709 let telemetry_manager = TelemetryManager::new(
710 Arc::new(MetaTelemetryInfoFetcher::new(env.cluster_id().clone())),
711 Arc::new(MetaReportCreator::new(
712 metadata_manager.clone(),
713 object_store_media_type,
714 )),
715 );
716
717 if env.opts.telemetry_enabled && telemetry_env_enabled() {
719 sub_tasks.push(telemetry_manager.start().await);
720 } else {
721 tracing::info!("Telemetry didn't start due to meta backend or config");
722 }
723 if !cfg!(madsim) && report_scarf_enabled() {
724 tokio::spawn(report_to_scarf());
725 } else {
726 tracing::info!("Scarf reporting is disabled");
727 };
728
729 if let Some(pair) = env.event_log_manager_ref().take_join_handle() {
730 sub_tasks.push(pair);
731 }
732
733 tracing::info!("Assigned cluster id {:?}", *env.cluster_id());
734 tracing::info!("Starting meta services");
735
736 let event = risingwave_pb::meta::event_log::EventMetaNodeStart {
737 advertise_addr: address_info.advertise_addr,
738 listen_addr: address_info.listen_addr.to_string(),
739 opts: serde_json::to_string(&env.opts).unwrap(),
740 };
741 env.event_log_manager_ref().add_event_logs(vec![
742 risingwave_pb::meta::event_log::Event::MetaNodeStart(event),
743 ]);
744
745 let server_builder = tonic::transport::Server::builder()
746 .layer(MetricsMiddlewareLayer::new(meta_metrics))
747 .layer(TracingExtractLayer::new())
748 .add_service(HeartbeatServiceServer::new(heartbeat_srv))
749 .add_service(ClusterServiceServer::new(cluster_srv))
750 .add_service(StreamManagerServiceServer::new(stream_srv))
751 .add_service(
752 HummockManagerServiceServer::new(hummock_srv).max_decoding_message_size(usize::MAX),
753 )
754 .add_service(NotificationServiceServer::new(notification_srv))
755 .add_service(MetaMemberServiceServer::new(meta_member_srv))
756 .add_service(DdlServiceServer::new(ddl_srv).max_decoding_message_size(usize::MAX))
757 .add_service(UserServiceServer::new(user_srv))
758 .add_service(CloudServiceServer::new(cloud_srv))
759 .add_service(ScaleServiceServer::new(scale_srv).max_decoding_message_size(usize::MAX))
760 .add_service(HealthServer::new(health_srv))
761 .add_service(BackupServiceServer::new(backup_srv))
762 .add_service(SystemParamsServiceServer::new(system_params_srv))
763 .add_service(SessionParamServiceServer::new(session_params_srv))
764 .add_service(TelemetryInfoServiceServer::new(telemetry_srv))
765 .add_service(ServingServiceServer::new(serving_srv))
766 .add_service(
767 SinkCoordinationServiceServer::new(sink_coordination_srv)
768 .max_decoding_message_size(usize::MAX),
769 )
770 .add_service(
771 EventLogServiceServer::new(event_log_srv).max_decoding_message_size(usize::MAX),
772 )
773 .add_service(ClusterLimitServiceServer::new(cluster_limit_srv))
774 .add_service(HostedIcebergCatalogServiceServer::new(
775 hosted_iceberg_catalog_srv,
776 ))
777 .add_service(MonitorServiceServer::new(monitor_srv));
778
779 #[cfg(not(madsim))] let server_builder = server_builder.add_service(TraceServiceServer::new(trace_srv));
781
782 let server = server_builder.monitored_serve_with_shutdown(
783 address_info.listen_addr,
784 "grpc-meta-leader-service",
785 TcpConfig {
786 tcp_nodelay: true,
787 keepalive_duration: None,
788 },
789 shutdown.clone().cancelled_owned(),
790 );
791 started::set();
792 let _server_handle = tokio::spawn(server);
793
794 shutdown.cancelled().await;
796 Ok(())
799}
800
801fn is_correct_data_directory(data_directory: &str) -> bool {
802 let data_directory_regex = Regex::new(r"^[0-9a-zA-Z_/-]{1,}$").unwrap();
803 if data_directory.is_empty()
804 || !data_directory_regex.is_match(data_directory)
805 || data_directory.ends_with('/')
806 || data_directory.starts_with('/')
807 || data_directory.contains("//")
808 || data_directory.len() > 800
809 {
810 return false;
811 }
812 true
813}