risingwave_meta/manager/
env.rs

1// Copyright 2025 RisingWave Labs
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use std::ops::Deref;
16use std::path::PathBuf;
17use std::sync::Arc;
18use std::sync::atomic::AtomicU32;
19
20use anyhow::Context;
21use risingwave_common::config::{
22    CompactionConfig, DefaultParallelism, ObjectStoreConfig, RpcClientConfig,
23};
24use risingwave_common::session_config::SessionConfig;
25use risingwave_common::system_param::reader::SystemParamsReader;
26use risingwave_common::{bail, system_param};
27use risingwave_meta_model::prelude::Cluster;
28use risingwave_pb::meta::SystemParams;
29use risingwave_rpc_client::{
30    FrontendClientPool, FrontendClientPoolRef, StreamClientPool, StreamClientPoolRef,
31};
32use risingwave_sqlparser::ast::RedactSqlOptionKeywordsRef;
33use sea_orm::EntityTrait;
34
35use crate::MetaResult;
36use crate::barrier::SharedActorInfos;
37use crate::barrier::cdc_progress::{CdcTableBackfillTracker, CdcTableBackfillTrackerRef};
38use crate::controller::SqlMetaStore;
39use crate::controller::id::{
40    IdGeneratorManager as SqlIdGeneratorManager, IdGeneratorManagerRef as SqlIdGeneratorManagerRef,
41};
42use crate::controller::session_params::{SessionParamsController, SessionParamsControllerRef};
43use crate::controller::system_param::{SystemParamsController, SystemParamsControllerRef};
44use crate::hummock::sequence::SequenceGenerator;
45use crate::manager::event_log::{EventLogManagerRef, start_event_log_manager};
46use crate::manager::{IdleManager, IdleManagerRef, NotificationManager, NotificationManagerRef};
47use crate::model::ClusterId;
48
49/// [`MetaSrvEnv`] is the global environment in Meta service. The instance will be shared by all
50/// kind of managers inside Meta.
51#[derive(Clone)]
52pub struct MetaSrvEnv {
53    /// id generator manager.
54    id_gen_manager_impl: SqlIdGeneratorManagerRef,
55
56    /// system param manager.
57    system_param_manager_impl: SystemParamsControllerRef,
58
59    /// session param manager.
60    session_param_manager_impl: SessionParamsControllerRef,
61
62    /// meta store.
63    meta_store_impl: SqlMetaStore,
64
65    /// notification manager.
66    notification_manager: NotificationManagerRef,
67
68    pub shared_actor_info: SharedActorInfos,
69
70    /// stream client pool memorization.
71    stream_client_pool: StreamClientPoolRef,
72
73    /// rpc client pool for frontend nodes.
74    frontend_client_pool: FrontendClientPoolRef,
75
76    /// idle status manager.
77    idle_manager: IdleManagerRef,
78
79    event_log_manager: EventLogManagerRef,
80
81    /// Unique identifier of the cluster.
82    cluster_id: ClusterId,
83
84    pub hummock_seq: Arc<SequenceGenerator>,
85
86    /// The await-tree registry of the current meta node.
87    await_tree_reg: await_tree::Registry,
88
89    /// options read by all services
90    pub opts: Arc<MetaOpts>,
91
92    pub cdc_table_backfill_tracker: CdcTableBackfillTrackerRef,
93
94    actor_id_generator: Arc<AtomicU32>,
95}
96
97/// Options shared by all meta service instances
98#[derive(Clone, serde::Serialize)]
99pub struct MetaOpts {
100    /// Whether to enable the recovery of the cluster. If disabled, the meta service will exit on
101    /// abnormal cases.
102    pub enable_recovery: bool,
103    /// Whether to disable the auto-scaling feature.
104    pub disable_automatic_parallelism_control: bool,
105    /// The number of streaming jobs per scaling operation.
106    pub parallelism_control_batch_size: usize,
107    /// The period of parallelism control trigger.
108    pub parallelism_control_trigger_period_sec: u64,
109    /// The first delay of parallelism control.
110    pub parallelism_control_trigger_first_delay_sec: u64,
111    /// The maximum number of barriers in-flight in the compute nodes.
112    pub in_flight_barrier_nums: usize,
113    /// After specified seconds of idle (no mview or flush), the process will be exited.
114    /// 0 for infinite, process will never be exited due to long idle time.
115    pub max_idle_ms: u64,
116    /// Whether run in compaction detection test mode
117    pub compaction_deterministic_test: bool,
118    /// Default parallelism of units for all streaming jobs.
119    pub default_parallelism: DefaultParallelism,
120
121    /// Interval of invoking a vacuum job, to remove stale metadata from meta store and objects
122    /// from object store.
123    pub vacuum_interval_sec: u64,
124    /// The spin interval inside a vacuum job. It avoids the vacuum job monopolizing resources of
125    /// meta node.
126    pub vacuum_spin_interval_ms: u64,
127    /// Interval of invoking iceberg garbage collection, to expire old snapshots.
128    pub iceberg_gc_interval_sec: u64,
129    pub time_travel_vacuum_interval_sec: u64,
130    /// Interval of hummock version checkpoint.
131    pub hummock_version_checkpoint_interval_sec: u64,
132    pub enable_hummock_data_archive: bool,
133    pub hummock_time_travel_snapshot_interval: u64,
134    pub hummock_time_travel_sst_info_fetch_batch_size: usize,
135    pub hummock_time_travel_sst_info_insert_batch_size: usize,
136    pub hummock_time_travel_epoch_version_insert_batch_size: usize,
137    pub hummock_gc_history_insert_batch_size: usize,
138    pub hummock_time_travel_filter_out_objects_batch_size: usize,
139    pub hummock_time_travel_filter_out_objects_v1: bool,
140    pub hummock_time_travel_filter_out_objects_list_version_batch_size: usize,
141    pub hummock_time_travel_filter_out_objects_list_delta_batch_size: usize,
142    /// The minimum delta log number a new checkpoint should compact, otherwise the checkpoint
143    /// attempt is rejected. Greater value reduces object store IO, meanwhile it results in
144    /// more loss of in memory `HummockVersionCheckpoint::stale_objects` state when meta node is
145    /// restarted.
146    pub min_delta_log_num_for_hummock_version_checkpoint: u64,
147    /// Objects within `min_sst_retention_time_sec` won't be deleted by hummock full GC, even they
148    /// are dangling.
149    pub min_sst_retention_time_sec: u64,
150    /// Interval of automatic hummock full GC.
151    pub full_gc_interval_sec: u64,
152    /// Max number of object per full GC job can fetch.
153    pub full_gc_object_limit: u64,
154    /// Duration in seconds to retain garbage collection history data.
155    pub gc_history_retention_time_sec: u64,
156    /// Max number of inflight time travel query.
157    pub max_inflight_time_travel_query: u64,
158    /// Enable sanity check when SSTs are committed
159    pub enable_committed_sst_sanity_check: bool,
160    /// Schedule compaction for all compaction groups with this interval.
161    pub periodic_compaction_interval_sec: u64,
162    /// Interval of reporting the number of nodes in the cluster.
163    pub node_num_monitor_interval_sec: u64,
164    /// Whether to protect the drop table operation with incoming sink.
165    pub protect_drop_table_with_incoming_sink: bool,
166    /// The Prometheus endpoint for Meta Dashboard Service.
167    /// The Dashboard service uses this in the following ways:
168    /// 1. Query Prometheus for relevant metrics to find Stream Graph Bottleneck, and display it.
169    /// 2. Provide cluster diagnostics, at `/api/monitor/diagnose` to troubleshoot cluster.
170    ///    These are just examples which show how the Meta Dashboard Service queries Prometheus.
171    pub prometheus_endpoint: Option<String>,
172
173    /// The additional selector used when querying Prometheus.
174    pub prometheus_selector: Option<String>,
175
176    /// The VPC id of the cluster.
177    pub vpc_id: Option<String>,
178
179    /// A usable security group id to assign to a vpc endpoint
180    pub security_group_id: Option<String>,
181
182    /// Default tag for the endpoint created when creating a privatelink connection.
183    /// Will be appended to the tags specified in the `tags` field in with clause in `create
184    /// connection`.
185    pub privatelink_endpoint_default_tags: Option<Vec<(String, String)>>,
186
187    /// Schedule `space_reclaim_compaction` for all compaction groups with this interval.
188    pub periodic_space_reclaim_compaction_interval_sec: u64,
189
190    /// telemetry enabled in config file or not
191    pub telemetry_enabled: bool,
192    /// Schedule `ttl_reclaim_compaction` for all compaction groups with this interval.
193    pub periodic_ttl_reclaim_compaction_interval_sec: u64,
194
195    /// Schedule `tombstone_reclaim_compaction` for all compaction groups with this interval.
196    pub periodic_tombstone_reclaim_compaction_interval_sec: u64,
197
198    /// Schedule `periodic_scheduling_compaction_group_split_interval_sec` for all compaction groups with this interval.
199    pub periodic_scheduling_compaction_group_split_interval_sec: u64,
200
201    /// Whether config object storage bucket lifecycle to purge stale data.
202    pub do_not_config_object_storage_lifecycle: bool,
203
204    pub partition_vnode_count: u32,
205
206    /// threshold of high write throughput of state-table, unit: B/sec
207    pub table_high_write_throughput_threshold: u64,
208    /// threshold of low write throughput of state-table, unit: B/sec
209    pub table_low_write_throughput_threshold: u64,
210
211    pub compaction_task_max_heartbeat_interval_secs: u64,
212    pub compaction_task_max_progress_interval_secs: u64,
213    pub compaction_config: Option<CompactionConfig>,
214
215    /// hybrid compaction group config
216    ///
217    /// `hybrid_partition_vnode_count` determines the granularity of vnodes in the hybrid compaction group for SST alignment.
218    /// When `hybrid_partition_vnode_count` > 0, in hybrid compaction group
219    /// - Tables with high write throughput will be split at vnode granularity
220    /// - Tables with high size tables will be split by table granularity
221    ///   When `hybrid_partition_vnode_count` = 0,no longer be special alignment operations for the hybrid compaction group
222    pub hybrid_partition_node_count: u32,
223
224    pub event_log_enabled: bool,
225    pub event_log_channel_max_size: u32,
226    pub advertise_addr: String,
227    /// The number of traces to be cached in-memory by the tracing collector
228    /// embedded in the meta node.
229    pub cached_traces_num: u32,
230    /// The maximum memory usage in bytes for the tracing collector embedded
231    /// in the meta node.
232    pub cached_traces_memory_limit_bytes: usize,
233
234    /// l0 picker whether to select trivial move task
235    pub enable_trivial_move: bool,
236
237    /// l0 multi level picker whether to check the overlap accuracy between sub levels
238    pub enable_check_task_level_overlap: bool,
239    pub enable_dropped_column_reclaim: bool,
240
241    /// Whether to split the compaction group when the size of the group exceeds the threshold.
242    pub split_group_size_ratio: f64,
243
244    /// The interval in seconds for the refresh scheduler to check and trigger scheduled refreshes.
245    pub refresh_scheduler_interval_sec: u64,
246
247    /// To split the compaction group when the high throughput statistics of the group exceeds the threshold.
248    pub table_stat_high_write_throughput_ratio_for_split: f64,
249
250    /// To merge the compaction group when the low throughput statistics of the group exceeds the threshold.
251    pub table_stat_low_write_throughput_ratio_for_merge: f64,
252
253    /// The window seconds of table throughput statistic history for split compaction group.
254    pub table_stat_throuput_window_seconds_for_split: usize,
255
256    /// The window seconds of table throughput statistic history for merge compaction group.
257    pub table_stat_throuput_window_seconds_for_merge: usize,
258
259    /// The configuration of the object store
260    pub object_store_config: ObjectStoreConfig,
261
262    /// The maximum number of trivial move tasks to be picked in a single loop
263    pub max_trivial_move_task_count_per_loop: usize,
264
265    /// The maximum number of times to probe for `PullTaskEvent`
266    pub max_get_task_probe_times: usize,
267
268    pub compact_task_table_size_partition_threshold_low: u64,
269    pub compact_task_table_size_partition_threshold_high: u64,
270
271    pub periodic_scheduling_compaction_group_merge_interval_sec: u64,
272
273    pub compaction_group_merge_dimension_threshold: f64,
274
275    // The private key for the secret store, used when the secret is stored in the meta.
276    pub secret_store_private_key: Option<Vec<u8>>,
277    /// The path of the temp secret file directory.
278    pub temp_secret_file_dir: String,
279
280    // Cluster limits
281    pub actor_cnt_per_worker_parallelism_hard_limit: usize,
282    pub actor_cnt_per_worker_parallelism_soft_limit: usize,
283
284    pub license_key_path: Option<PathBuf>,
285
286    pub compute_client_config: RpcClientConfig,
287    pub stream_client_config: RpcClientConfig,
288    pub frontend_client_config: RpcClientConfig,
289    pub redact_sql_option_keywords: RedactSqlOptionKeywordsRef,
290
291    pub cdc_table_split_init_sleep_interval_splits: u64,
292    pub cdc_table_split_init_sleep_duration_millis: u64,
293    pub cdc_table_split_init_insert_batch_size: u64,
294
295    pub enable_legacy_table_migration: bool,
296    pub pause_on_next_bootstrap_offline: bool,
297}
298
299impl MetaOpts {
300    /// Default opts for testing. Some tests need `enable_recovery=true`
301    pub fn test(enable_recovery: bool) -> Self {
302        Self {
303            enable_recovery,
304            disable_automatic_parallelism_control: false,
305            parallelism_control_batch_size: 1,
306            parallelism_control_trigger_period_sec: 10,
307            parallelism_control_trigger_first_delay_sec: 30,
308            in_flight_barrier_nums: 40,
309            max_idle_ms: 0,
310            compaction_deterministic_test: false,
311            default_parallelism: DefaultParallelism::Full,
312            vacuum_interval_sec: 30,
313            time_travel_vacuum_interval_sec: 30,
314            vacuum_spin_interval_ms: 0,
315            iceberg_gc_interval_sec: 3600,
316            hummock_version_checkpoint_interval_sec: 30,
317            enable_hummock_data_archive: false,
318            hummock_time_travel_snapshot_interval: 0,
319            hummock_time_travel_sst_info_fetch_batch_size: 10_000,
320            hummock_time_travel_sst_info_insert_batch_size: 10,
321            hummock_time_travel_epoch_version_insert_batch_size: 1000,
322            hummock_gc_history_insert_batch_size: 1000,
323            hummock_time_travel_filter_out_objects_batch_size: 1000,
324            hummock_time_travel_filter_out_objects_v1: false,
325            hummock_time_travel_filter_out_objects_list_version_batch_size: 10,
326            hummock_time_travel_filter_out_objects_list_delta_batch_size: 1000,
327            min_delta_log_num_for_hummock_version_checkpoint: 1,
328            min_sst_retention_time_sec: 3600 * 24 * 7,
329            full_gc_interval_sec: 3600 * 24 * 7,
330            full_gc_object_limit: 100_000,
331            gc_history_retention_time_sec: 3600 * 24 * 7,
332            max_inflight_time_travel_query: 1000,
333            enable_committed_sst_sanity_check: false,
334            periodic_compaction_interval_sec: 60,
335            node_num_monitor_interval_sec: 10,
336            protect_drop_table_with_incoming_sink: false,
337            prometheus_endpoint: None,
338            prometheus_selector: None,
339            vpc_id: None,
340            security_group_id: None,
341            privatelink_endpoint_default_tags: None,
342            periodic_space_reclaim_compaction_interval_sec: 60,
343            telemetry_enabled: false,
344            periodic_ttl_reclaim_compaction_interval_sec: 60,
345            periodic_tombstone_reclaim_compaction_interval_sec: 60,
346            periodic_scheduling_compaction_group_split_interval_sec: 60,
347            compact_task_table_size_partition_threshold_low: 128 * 1024 * 1024,
348            compact_task_table_size_partition_threshold_high: 512 * 1024 * 1024,
349            table_high_write_throughput_threshold: 128 * 1024 * 1024,
350            table_low_write_throughput_threshold: 64 * 1024 * 1024,
351            do_not_config_object_storage_lifecycle: true,
352            partition_vnode_count: 32,
353            compaction_task_max_heartbeat_interval_secs: 0,
354            compaction_task_max_progress_interval_secs: 1,
355            compaction_config: None,
356            hybrid_partition_node_count: 4,
357            event_log_enabled: false,
358            event_log_channel_max_size: 1,
359            advertise_addr: "".to_owned(),
360            cached_traces_num: 1,
361            cached_traces_memory_limit_bytes: usize::MAX,
362            enable_trivial_move: true,
363            enable_check_task_level_overlap: true,
364            enable_dropped_column_reclaim: false,
365            object_store_config: ObjectStoreConfig::default(),
366            max_trivial_move_task_count_per_loop: 256,
367            max_get_task_probe_times: 5,
368            secret_store_private_key: Some(
369                hex::decode("0123456789abcdef0123456789abcdef").unwrap(),
370            ),
371            temp_secret_file_dir: "./secrets".to_owned(),
372            actor_cnt_per_worker_parallelism_hard_limit: usize::MAX,
373            actor_cnt_per_worker_parallelism_soft_limit: usize::MAX,
374            split_group_size_ratio: 0.9,
375            table_stat_high_write_throughput_ratio_for_split: 0.5,
376            table_stat_low_write_throughput_ratio_for_merge: 0.7,
377            table_stat_throuput_window_seconds_for_split: 60,
378            table_stat_throuput_window_seconds_for_merge: 240,
379            periodic_scheduling_compaction_group_merge_interval_sec: 60 * 10,
380            compaction_group_merge_dimension_threshold: 1.2,
381            license_key_path: None,
382            compute_client_config: RpcClientConfig::default(),
383            stream_client_config: RpcClientConfig::default(),
384            frontend_client_config: RpcClientConfig::default(),
385            redact_sql_option_keywords: Arc::new(Default::default()),
386            cdc_table_split_init_sleep_interval_splits: 1000,
387            cdc_table_split_init_sleep_duration_millis: 10,
388            cdc_table_split_init_insert_batch_size: 1000,
389            enable_legacy_table_migration: true,
390            refresh_scheduler_interval_sec: 60,
391            pause_on_next_bootstrap_offline: false,
392        }
393    }
394}
395
396impl MetaSrvEnv {
397    pub async fn new(
398        opts: MetaOpts,
399        mut init_system_params: SystemParams,
400        init_session_config: SessionConfig,
401        meta_store_impl: SqlMetaStore,
402    ) -> MetaResult<Self> {
403        let idle_manager = Arc::new(IdleManager::new(opts.max_idle_ms));
404        let stream_client_pool =
405            Arc::new(StreamClientPool::new(1, opts.stream_client_config.clone())); // typically no need for plural clients
406        let frontend_client_pool = Arc::new(FrontendClientPool::new(
407            1,
408            opts.frontend_client_config.clone(),
409        ));
410        let event_log_manager = Arc::new(start_event_log_manager(
411            opts.event_log_enabled,
412            opts.event_log_channel_max_size,
413        ));
414
415        // When license key path is specified, license key from system parameters can be easily
416        // overwritten. So we simply reject this case.
417        if opts.license_key_path.is_some()
418            && init_system_params.license_key
419                != system_param::default::license_key_opt().map(Into::into)
420        {
421            bail!(
422                "argument `--license-key-path` (or env var `RW_LICENSE_KEY_PATH`) and \
423                 system parameter `license_key` (or env var `RW_LICENSE_KEY`) may not \
424                 be set at the same time"
425            );
426        }
427
428        let cluster_first_launch = meta_store_impl.up().await.context(
429            "Failed to initialize the meta store, \
430            this may happen if there's existing metadata incompatible with the current version of RisingWave, \
431            e.g., downgrading from a newer release or a nightly build to an older one. \
432            For a single-node deployment, you may want to reset all data by deleting the data directory, \
433            typically located at `~/.risingwave`.",
434        )?;
435
436        let notification_manager =
437            Arc::new(NotificationManager::new(meta_store_impl.clone()).await);
438        let cluster_id = Cluster::find()
439            .one(&meta_store_impl.conn)
440            .await?
441            .map(|c| c.cluster_id.to_string().into())
442            .unwrap();
443
444        // For new clusters:
445        // - the name of the object store needs to be prefixed according to the object id.
446        //
447        // For old clusters
448        // - the prefix is ​​not divided for the sake of compatibility.
449        init_system_params.use_new_object_prefix_strategy = Some(cluster_first_launch);
450
451        let system_param_controller = Arc::new(
452            SystemParamsController::new(
453                meta_store_impl.clone(),
454                notification_manager.clone(),
455                init_system_params,
456            )
457            .await?,
458        );
459        let session_param_controller = Arc::new(
460            SessionParamsController::new(
461                meta_store_impl.clone(),
462                notification_manager.clone(),
463                init_session_config,
464            )
465            .await?,
466        );
467        let cdc_table_backfill_tracker = CdcTableBackfillTracker::new(meta_store_impl.clone())
468            .await?
469            .into();
470        Ok(Self {
471            id_gen_manager_impl: Arc::new(SqlIdGeneratorManager::new(&meta_store_impl.conn).await?),
472            system_param_manager_impl: system_param_controller,
473            session_param_manager_impl: session_param_controller,
474            meta_store_impl: meta_store_impl.clone(),
475            shared_actor_info: SharedActorInfos::new(notification_manager.clone()),
476            notification_manager,
477            stream_client_pool,
478            frontend_client_pool,
479            idle_manager,
480            event_log_manager,
481            cluster_id,
482            hummock_seq: Arc::new(SequenceGenerator::new(meta_store_impl.conn.clone())),
483            opts: opts.into(),
484            // Await trees on the meta node is lightweight, thus always enabled.
485            await_tree_reg: await_tree::Registry::new(Default::default()),
486            cdc_table_backfill_tracker,
487            actor_id_generator: Arc::new(AtomicU32::new(0)),
488        })
489    }
490
491    pub fn meta_store(&self) -> SqlMetaStore {
492        self.meta_store_impl.clone()
493    }
494
495    pub fn meta_store_ref(&self) -> &SqlMetaStore {
496        &self.meta_store_impl
497    }
498
499    pub fn id_gen_manager(&self) -> &SqlIdGeneratorManagerRef {
500        &self.id_gen_manager_impl
501    }
502
503    pub fn notification_manager_ref(&self) -> NotificationManagerRef {
504        self.notification_manager.clone()
505    }
506
507    pub fn notification_manager(&self) -> &NotificationManager {
508        self.notification_manager.deref()
509    }
510
511    pub fn idle_manager_ref(&self) -> IdleManagerRef {
512        self.idle_manager.clone()
513    }
514
515    pub fn idle_manager(&self) -> &IdleManager {
516        self.idle_manager.deref()
517    }
518
519    pub fn actor_id_generator(&self) -> &AtomicU32 {
520        self.actor_id_generator.deref()
521    }
522
523    pub async fn system_params_reader(&self) -> SystemParamsReader {
524        self.system_param_manager_impl.get_params().await
525    }
526
527    pub fn system_params_manager_impl_ref(&self) -> SystemParamsControllerRef {
528        self.system_param_manager_impl.clone()
529    }
530
531    pub fn session_params_manager_impl_ref(&self) -> SessionParamsControllerRef {
532        self.session_param_manager_impl.clone()
533    }
534
535    pub fn stream_client_pool_ref(&self) -> StreamClientPoolRef {
536        self.stream_client_pool.clone()
537    }
538
539    pub fn stream_client_pool(&self) -> &StreamClientPool {
540        self.stream_client_pool.deref()
541    }
542
543    pub fn frontend_client_pool(&self) -> &FrontendClientPool {
544        self.frontend_client_pool.deref()
545    }
546
547    pub fn cluster_id(&self) -> &ClusterId {
548        &self.cluster_id
549    }
550
551    pub fn event_log_manager_ref(&self) -> EventLogManagerRef {
552        self.event_log_manager.clone()
553    }
554
555    pub fn await_tree_reg(&self) -> &await_tree::Registry {
556        &self.await_tree_reg
557    }
558
559    pub fn shared_actor_infos(&self) -> &SharedActorInfos {
560        &self.shared_actor_info
561    }
562
563    pub fn cdc_table_backfill_tracker(&self) -> CdcTableBackfillTrackerRef {
564        self.cdc_table_backfill_tracker.clone()
565    }
566}
567
568#[cfg(any(test, feature = "test"))]
569impl MetaSrvEnv {
570    // Instance for test.
571    pub async fn for_test() -> Self {
572        Self::for_test_opts(MetaOpts::test(false), |_| ()).await
573    }
574
575    pub async fn for_test_opts(
576        opts: MetaOpts,
577        on_test_system_params: impl FnOnce(&mut risingwave_pb::meta::PbSystemParams),
578    ) -> Self {
579        let mut system_params = risingwave_common::system_param::system_params_for_test();
580        on_test_system_params(&mut system_params);
581        Self::new(
582            opts,
583            system_params,
584            Default::default(),
585            SqlMetaStore::for_test().await,
586        )
587        .await
588        .unwrap()
589    }
590}