risingwave_common/session_config/
mod.rs

1// Copyright 2022 RisingWave Labs
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15mod iceberg_query_storage_mode;
16mod non_zero64;
17mod opt;
18pub mod parallelism;
19mod query_mode;
20mod search_path;
21pub mod sink_decouple;
22mod statement_timeout;
23mod transaction_isolation_level;
24mod visibility_mode;
25
26use chrono_tz::Tz;
27pub use iceberg_query_storage_mode::IcebergQueryStorageMode;
28use itertools::Itertools;
29pub use opt::OptionConfig;
30pub use query_mode::QueryMode;
31use risingwave_common_proc_macro::{ConfigDoc, SessionConfig};
32pub use search_path::{SearchPath, USER_NAME_WILD_CARD};
33use serde::{Deserialize, Serialize};
34pub use statement_timeout::StatementTimeout;
35use thiserror::Error;
36
37use self::non_zero64::ConfigNonZeroU64;
38use crate::config::mutate::TomlTableMutateExt;
39use crate::config::streaming::{JoinEncodingType, OverWindowCachePolicy};
40use crate::config::{ConfigMergeError, StreamingConfig, merge_streaming_config_section};
41use crate::hash::VirtualNode;
42use crate::session_config::parallelism::{ConfigBackfillParallelism, ConfigParallelism};
43use crate::session_config::sink_decouple::SinkDecouple;
44use crate::session_config::transaction_isolation_level::IsolationLevel;
45pub use crate::session_config::visibility_mode::VisibilityMode;
46use crate::{PG_VERSION, SERVER_ENCODING, SERVER_VERSION_NUM, STANDARD_CONFORMING_STRINGS};
47
48pub const SESSION_CONFIG_LIST_SEP: &str = ", ";
49
50#[derive(Error, Debug)]
51pub enum SessionConfigError {
52    #[error("Invalid value `{value}` for `{entry}`")]
53    InvalidValue {
54        entry: &'static str,
55        value: String,
56        source: anyhow::Error,
57    },
58
59    #[error("Unrecognized config entry `{0}`")]
60    UnrecognizedEntry(String),
61}
62
63type SessionConfigResult<T> = std::result::Result<T, SessionConfigError>;
64
65// NOTE(kwannoel): We declare it separately as a constant,
66// otherwise seems like it can't infer the type of -1 when written inline.
67const DISABLE_BACKFILL_RATE_LIMIT: i32 = -1;
68const DISABLE_SOURCE_RATE_LIMIT: i32 = -1;
69const DISABLE_DML_RATE_LIMIT: i32 = -1;
70const DISABLE_SINK_RATE_LIMIT: i32 = -1;
71
72/// Default to bypass cluster limits iff in debug mode.
73const BYPASS_CLUSTER_LIMITS: bool = cfg!(debug_assertions);
74
75/// This is the Session Config of RisingWave.
76///
77/// All config entries implement `Display` and `FromStr` for getter and setter, to be read and
78/// altered within a session.
79///
80/// Users can change the default value of a configuration entry using `ALTER SYSTEM SET`. To
81/// facilitate this, a `serde` implementation is used as the wire format for retrieving initial
82/// configurations and updates from the meta service. It's important to note that the meta
83/// service stores the overridden value of each configuration entry per row with `Display` in
84/// the meta store, rather than using the `serde` format. However, we still delegate the `serde`
85/// impl of all fields to `Display`/`FromStr` to make it consistent.
86#[serde_with::apply(_ => #[serde_as(as = "serde_with::DisplayFromStr")] )]
87#[serde_with::serde_as]
88#[derive(Clone, Debug, Deserialize, Serialize, SessionConfig, ConfigDoc, PartialEq)]
89pub struct SessionConfig {
90    /// If `RW_IMPLICIT_FLUSH` is on, then every INSERT/UPDATE/DELETE statement will block
91    /// until the entire dataflow is refreshed. In other words, every related table & MV will
92    /// be able to see the write.
93    #[parameter(default = false, alias = "rw_implicit_flush")]
94    implicit_flush: bool,
95
96    /// If `CREATE_COMPACTION_GROUP_FOR_MV` is on, dedicated compaction groups will be created in
97    /// MV creation.
98    #[parameter(default = false)]
99    create_compaction_group_for_mv: bool,
100
101    /// A temporary config variable to force query running in either local or distributed mode.
102    /// The default value is auto which means let the system decide to run batch queries in local
103    /// or distributed mode automatically.
104    #[parameter(default = QueryMode::default())]
105    query_mode: QueryMode,
106
107    /// For Iceberg engine tables, which storage to use for batch SELECT: Iceberg (columnar) or
108    /// Hummock (row). Only affects batch SELECT on tables with ENGINE = ICEBERG.
109    #[parameter(default = IcebergQueryStorageMode::default())]
110    iceberg_query_storage_mode: IcebergQueryStorageMode,
111
112    /// Sets the number of digits displayed for floating-point values.
113    /// See <https://www.postgresql.org/docs/current/runtime-config-client.html#:~:text=for%20more%20information.-,extra_float_digits,-(integer)>
114    #[parameter(default = 1)]
115    extra_float_digits: i32,
116
117    /// Sets the application name to be reported in statistics and logs.
118    /// See <https://www.postgresql.org/docs/14/runtime-config-logging.html#:~:text=What%20to%20Log-,application_name,-(string)>
119    #[parameter(default = "", flags = "REPORT")]
120    application_name: String,
121
122    /// It is typically set by an application upon connection to the server.
123    /// see <https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-DATESTYLE>
124    #[parameter(default = "", rename = "datestyle")]
125    date_style: String,
126
127    /// Force the use of lookup join instead of hash join when possible for local batch execution.
128    #[parameter(default = true, alias = "rw_batch_enable_lookup_join")]
129    batch_enable_lookup_join: bool,
130
131    /// Enable usage of sortAgg instead of hash agg when order property is satisfied in batch
132    /// execution
133    #[parameter(default = true, alias = "rw_batch_enable_sort_agg")]
134    batch_enable_sort_agg: bool,
135
136    /// Enable distributed DML, so an insert, delete, and update statement can be executed in a distributed way (e.g. running in multiple compute nodes).
137    /// No atomicity guarantee in this mode. Its goal is to gain the best ingestion performance for initial batch ingestion where users always can drop their table when failure happens.
138    #[parameter(default = false, rename = "batch_enable_distributed_dml")]
139    batch_enable_distributed_dml: bool,
140
141    /// Evaluate expression in strict mode for batch queries.
142    /// If set to false, an expression failure will not cause an error but leave a null value
143    /// on the result set.
144    #[parameter(default = true)]
145    batch_expr_strict_mode: bool,
146
147    /// The max gap allowed to transform small range scan into multi point lookup.
148    #[parameter(default = 8)]
149    max_split_range_gap: i32,
150
151    /// Sets the order in which schemas are searched when an object (table, data type, function, etc.)
152    /// is referenced by a simple name with no schema specified.
153    /// See <https://www.postgresql.org/docs/14/runtime-config-client.html#GUC-SEARCH-PATH>
154    #[parameter(default = SearchPath::default())]
155    search_path: SearchPath,
156
157    /// If `VISIBILITY_MODE` is all, we will support querying data without checkpoint.
158    #[parameter(default = VisibilityMode::default())]
159    visibility_mode: VisibilityMode,
160
161    /// See <https://www.postgresql.org/docs/current/transaction-iso.html>
162    #[parameter(default = IsolationLevel::default())]
163    transaction_isolation: IsolationLevel,
164
165    /// Select as of specific epoch.
166    /// Sets the historical epoch for querying data. If 0, querying latest data.
167    #[parameter(default = ConfigNonZeroU64::default())]
168    query_epoch: ConfigNonZeroU64,
169
170    /// Session timezone. Defaults to UTC.
171    #[parameter(default = "UTC", check_hook = check_timezone)]
172    timezone: String,
173
174    /// The execution parallelism for streaming queries, including tables, materialized views,
175    /// indexes, and sinks. Defaults to `default`, which preserves the legacy adaptive
176    /// scheduling behavior during effective resolution.
177    #[parameter(default = ConfigParallelism::Default)]
178    streaming_parallelism: ConfigParallelism,
179
180    /// Specific parallelism for backfill. Only `default` and a fixed positive integer are
181    /// supported here. Adaptive backfill strategies are deferred to a later change.
182    #[parameter(
183        default = ConfigBackfillParallelism::Default,
184        check_hook = check_streaming_parallelism_for_backfill
185    )]
186    streaming_parallelism_for_backfill: ConfigBackfillParallelism,
187
188    /// Specific parallelism for table. Defaults to `default`, which preserves the legacy
189    /// bounded adaptive behavior only when the global parallelism itself remains `default`.
190    /// Otherwise it follows the explicit global parallelism.
191    #[parameter(default = ConfigParallelism::Default)]
192    streaming_parallelism_for_table: ConfigParallelism,
193
194    /// Specific parallelism for sink. By default, it will fall back to `STREAMING_PARALLELISM`.
195    #[parameter(default = ConfigParallelism::Default)]
196    streaming_parallelism_for_sink: ConfigParallelism,
197
198    /// Specific parallelism for index. By default, it will fall back to `STREAMING_PARALLELISM`.
199    #[parameter(default = ConfigParallelism::Default)]
200    streaming_parallelism_for_index: ConfigParallelism,
201
202    /// Specific parallelism for source. Defaults to `default`, which preserves the legacy
203    /// bounded adaptive behavior only when the global parallelism itself remains `default`.
204    /// Otherwise it follows the explicit global parallelism.
205    #[parameter(default = ConfigParallelism::Default)]
206    streaming_parallelism_for_source: ConfigParallelism,
207
208    /// Specific parallelism for materialized view. By default, it will fall back to `STREAMING_PARALLELISM`.
209    #[parameter(default = ConfigParallelism::Default)]
210    streaming_parallelism_for_materialized_view: ConfigParallelism,
211
212    /// Enable delta join for streaming queries. Defaults to false.
213    #[parameter(default = false, alias = "rw_streaming_enable_delta_join")]
214    streaming_enable_delta_join: bool,
215
216    /// Enable bushy join for streaming queries. Defaults to true.
217    #[parameter(default = true, alias = "rw_streaming_enable_bushy_join")]
218    streaming_enable_bushy_join: bool,
219
220    /// Force filtering to be done inside the join whenever there's a choice between optimizations.
221    /// Defaults to false.
222    #[parameter(default = false, alias = "rw_streaming_force_filter_inside_join")]
223    streaming_force_filter_inside_join: bool,
224
225    /// Enable arrangement backfill for streaming queries. Defaults to true.
226    /// When set to true, the parallelism of the upstream fragment will be
227    /// decoupled from the parallelism of the downstream scan fragment.
228    /// Or more generally, the parallelism of the upstream table / index / mv
229    /// will be decoupled from the parallelism of the downstream table / index / mv / sink.
230    #[parameter(default = true)]
231    streaming_use_arrangement_backfill: bool,
232
233    #[parameter(default = true)]
234    streaming_use_snapshot_backfill: bool,
235
236    /// Enable serverless backfill for streaming queries. Defaults to false.
237    #[parameter(default = false)]
238    enable_serverless_backfill: bool,
239
240    /// Allow `jsonb` in stream key
241    #[parameter(default = false, alias = "rw_streaming_allow_jsonb_in_stream_key")]
242    streaming_allow_jsonb_in_stream_key: bool,
243
244    /// Unsafe: allow impure expressions on non-append-only streams without materialization.
245    ///
246    /// This may lead to inconsistent results or panics due to re-evaluation on updates/retracts.
247    #[parameter(default = false)]
248    streaming_unsafe_allow_unmaterialized_impure_expr: bool,
249
250    /// Separate consecutive `StreamHashJoin` by no-shuffle `StreamExchange`
251    #[parameter(default = false)]
252    streaming_separate_consecutive_join: bool,
253
254    /// Separate `StreamSink` by no-shuffle `StreamExchange`
255    #[parameter(default = false)]
256    streaming_separate_sink: bool,
257
258    /// Determine which encoding will be used to encode join rows in operator cache.
259    ///
260    /// This overrides the corresponding entry from the `[streaming.developer]` section in the config file,
261    /// taking effect for new streaming jobs created in the current session.
262    #[parameter(default = None)]
263    streaming_join_encoding: OptionConfig<JoinEncodingType>,
264
265    /// Enable join ordering for streaming and batch queries. Defaults to true.
266    #[parameter(default = true, alias = "rw_enable_join_ordering")]
267    enable_join_ordering: bool,
268
269    /// Enable two phase agg optimization. Defaults to true.
270    /// Setting this to true will always set `FORCE_TWO_PHASE_AGG` to false.
271    #[parameter(default = true, flags = "SETTER", alias = "rw_enable_two_phase_agg")]
272    enable_two_phase_agg: bool,
273
274    /// Force two phase agg optimization whenever there's a choice between
275    /// optimizations. Defaults to false.
276    /// Setting this to true will always set `ENABLE_TWO_PHASE_AGG` to false.
277    #[parameter(default = false, flags = "SETTER", alias = "rw_force_two_phase_agg")]
278    force_two_phase_agg: bool,
279
280    /// Enable sharing of common sub-plans.
281    /// This means that DAG structured query plans can be constructed,
282    #[parameter(default = true, alias = "rw_enable_share_plan")]
283    /// rather than only tree structured query plans.
284    enable_share_plan: bool,
285
286    /// Enable split distinct agg
287    #[parameter(default = false, alias = "rw_force_split_distinct_agg")]
288    force_split_distinct_agg: bool,
289
290    /// See <https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-INTERVALSTYLE>
291    #[parameter(default = "", rename = "intervalstyle")]
292    interval_style: String,
293
294    /// If `BATCH_PARALLELISM` is non-zero, batch queries will use this parallelism.
295    #[parameter(default = ConfigNonZeroU64::default())]
296    batch_parallelism: ConfigNonZeroU64,
297
298    /// The version of PostgreSQL that Risingwave claims to be.
299    #[parameter(default = PG_VERSION)]
300    server_version: String,
301
302    /// The version of PostgreSQL that Risingwave claims to be.
303    #[parameter(default = SERVER_VERSION_NUM)]
304    server_version_num: i32,
305
306    /// see <https://www.postgresql.org/docs/15/runtime-config-client.html#GUC-CLIENT-MIN-MESSAGES>
307    #[parameter(default = "notice")]
308    client_min_messages: String,
309
310    /// see <https://www.postgresql.org/docs/15/runtime-config-client.html#GUC-CLIENT-ENCODING>
311    #[parameter(default = SERVER_ENCODING, check_hook = check_client_encoding)]
312    client_encoding: String,
313
314    /// Enable decoupling sink and internal streaming graph or not
315    #[parameter(default = SinkDecouple::default())]
316    sink_decouple: SinkDecouple,
317
318    /// See <https://www.postgresql.org/docs/current/runtime-config-compatible.html#RUNTIME-CONFIG-COMPATIBLE-VERSION>
319    /// Unused in RisingWave, support for compatibility.
320    #[parameter(default = false)]
321    synchronize_seqscans: bool,
322
323    /// Abort query statement that takes more than the specified amount of time in sec. If
324    /// `log_min_error_statement` is set to ERROR or lower, the statement that timed out will also be
325    /// logged. If this value is specified without units, it is taken as milliseconds. A value of
326    /// zero (the default) disables the timeout.
327    #[parameter(default = StatementTimeout::default())]
328    statement_timeout: StatementTimeout,
329
330    /// Terminate any session that has been idle (that is, waiting for a client query) within an open transaction for longer than the specified amount of time in milliseconds.
331    #[parameter(default = 60000u32)]
332    idle_in_transaction_session_timeout: u32,
333
334    /// See <https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-LOCK-TIMEOUT>
335    /// Unused in RisingWave, support for compatibility.
336    #[parameter(default = 0)]
337    lock_timeout: i32,
338
339    /// For limiting the startup time of a shareable CDC streaming source when the source is being created. Unit: seconds.
340    #[parameter(default = 60)]
341    cdc_source_wait_streaming_start_timeout: i32,
342
343    /// see <https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-ROW-SECURITY>.
344    /// Unused in RisingWave, support for compatibility.
345    #[parameter(default = true)]
346    row_security: bool,
347
348    /// see <https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-STANDARD-CONFORMING-STRINGS>
349    #[parameter(default = STANDARD_CONFORMING_STRINGS)]
350    standard_conforming_strings: String,
351
352    /// Set streaming rate limit (rows per second) for each parallelism for mv / source / sink backfilling
353    /// If set to -1, disable rate limit.
354    /// If set to 0, this pauses the snapshot read / source read.
355    #[parameter(default = DISABLE_BACKFILL_RATE_LIMIT)]
356    backfill_rate_limit: i32,
357
358    /// Set streaming rate limit (rows per second) for each parallelism for mv / source backfilling, source reads.
359    /// If set to -1, disable rate limit.
360    /// If set to 0, this pauses the snapshot read / source read.
361    #[parameter(default = DISABLE_SOURCE_RATE_LIMIT)]
362    source_rate_limit: i32,
363
364    /// Set streaming rate limit (rows per second) for each parallelism for table DML.
365    /// If set to -1, disable rate limit.
366    /// If set to 0, this pauses the DML.
367    #[parameter(default = DISABLE_DML_RATE_LIMIT)]
368    dml_rate_limit: i32,
369
370    /// Set sink rate limit (rows per second) for each parallelism for external sink.
371    /// If set to -1, disable rate limit.
372    /// If set to 0, this pauses the sink.
373    #[parameter(default = DISABLE_SINK_RATE_LIMIT)]
374    sink_rate_limit: i32,
375
376    /// Cache policy for partition cache in streaming over window.
377    /// Can be `full`, `recent`, `recent_first_n` or `recent_last_n`.
378    ///
379    /// This overrides the corresponding entry from the `[streaming.developer]` section in the config file,
380    /// taking effect for new streaming jobs created in the current session.
381    #[parameter(default = None, alias = "rw_streaming_over_window_cache_policy")]
382    streaming_over_window_cache_policy: OptionConfig<OverWindowCachePolicy>,
383
384    /// Run DDL statements in background
385    #[parameter(default = false)]
386    background_ddl: bool,
387
388    /// Enable shared source. Currently only for Kafka.
389    ///
390    /// When enabled, `CREATE SOURCE` will create a source streaming job, and `CREATE MATERIALIZED VIEWS` from the source
391    /// will forward the data from the same source streaming job, and also backfill prior data from the external source.
392    #[parameter(default = true)]
393    streaming_use_shared_source: bool,
394
395    /// Enable in-memory cache for `AsOf` join executor.
396    ///
397    /// When enabled (default), `AsOf` join uses the cache-based implementation.
398    ///
399    /// When disabled, `AsOf` join uses a no-cache implementation that directly queries
400    /// the state table on-demand, reducing unnecessary data fetches for cache.
401    #[parameter(default = true)]
402    streaming_asof_join_use_cache: bool,
403
404    /// Shows the server-side character set encoding. At present, this parameter can be shown but not set, because the encoding is determined at database creation time.
405    #[parameter(default = SERVER_ENCODING)]
406    server_encoding: String,
407
408    #[parameter(default = "hex", check_hook = check_bytea_output)]
409    bytea_output: String,
410
411    /// Bypass checks on cluster limits
412    ///
413    /// When enabled, `CREATE MATERIALIZED VIEW` will not fail if the cluster limit is hit.
414    #[parameter(default = BYPASS_CLUSTER_LIMITS)]
415    bypass_cluster_limits: bool,
416
417    /// The maximum number of parallelism a streaming query can use. Defaults to 256.
418    ///
419    /// Compared to `STREAMING_PARALLELISM`, which configures the initial parallelism, this configures
420    /// the maximum parallelism a streaming query can use in the future, if the cluster size changes or
421    /// users manually change the parallelism with `ALTER .. SET PARALLELISM`.
422    ///
423    /// It's not always a good idea to set this to a very large number, as it may cause performance
424    /// degradation when performing range scans on the table or the materialized view.
425    // a.k.a. vnode count
426    #[parameter(default = VirtualNode::COUNT_FOR_COMPAT, check_hook = check_streaming_max_parallelism)]
427    streaming_max_parallelism: usize,
428
429    /// Used to provide the connection information for the iceberg engine.
430    /// Format: `iceberg_engine_connection` = `schema_name.connection_name`.
431    #[parameter(default = "", check_hook = check_iceberg_engine_connection)]
432    iceberg_engine_connection: String,
433
434    /// Whether the streaming join should be unaligned or not.
435    #[parameter(default = false)]
436    streaming_enable_unaligned_join: bool,
437
438    /// The timeout for reading from the buffer of the sync log store on barrier.
439    /// Every epoch we will attempt to read the full buffer of the sync log store.
440    /// If we hit the timeout, we will stop reading and continue.
441    ///
442    /// This overrides the corresponding entry from the `[streaming.developer]` section in the config file,
443    /// taking effect for new streaming jobs created in the current session.
444    #[parameter(default = None)]
445    streaming_sync_log_store_pause_duration_ms: OptionConfig<usize>,
446
447    /// The max buffer size for sync logstore, before we start flushing.
448    ///
449    /// This overrides the corresponding entry from the `[streaming.developer]` section in the config file,
450    /// taking effect for new streaming jobs created in the current session.
451    #[parameter(default = None)]
452    streaming_sync_log_store_buffer_size: OptionConfig<usize>,
453
454    /// Whether to disable purifying the definition of the table or source upon retrieval.
455    /// Only set this if encountering issues with functionalities like `SHOW` or `ALTER TABLE/SOURCE`.
456    /// This config may be removed in the future.
457    #[parameter(default = false, flags = "NO_ALTER_SYS")]
458    disable_purify_definition: bool,
459
460    /// The `ef_search` used in querying hnsw vector index
461    #[parameter(default = 40_usize)] // default value borrowed from pg_vector
462    batch_hnsw_ef_search: usize,
463
464    /// Enable index selection for queries
465    #[parameter(default = true)]
466    enable_index_selection: bool,
467
468    /// Enable mv selection for queries
469    #[parameter(default = false)]
470    enable_mv_selection: bool,
471
472    /// Enable locality backfill for streaming queries. Defaults to false.
473    #[parameter(default = false)]
474    enable_locality_backfill: bool,
475
476    /// Duration in seconds before notifying the user that a long-running DDL operation (e.g., DROP TABLE, CANCEL JOBS)
477    /// is still running. Set to 0 to disable notifications. Defaults to 30 seconds.
478    #[parameter(default = 30u32)]
479    slow_ddl_notification_secs: u32,
480
481    /// Unsafe: Enable storage retention for non-append-only tables.
482    /// Enabling this can lead to streaming inconsistency and node panic
483    /// if there is any row INSERT/UPDATE/DELETE operation corresponding to the ttled primary key.
484    #[parameter(default = false)]
485    unsafe_enable_storage_retention_for_non_append_only_tables: bool,
486
487    /// Enable DataFusion Engine
488    /// When enabled, queries involving Iceberg tables will be executed using the DataFusion engine.
489    #[parameter(default = true)]
490    enable_datafusion_engine: bool,
491
492    /// Prefer hash join over sort merge join in DataFusion engine
493    /// When enabled, the DataFusion engine will prioritize hash joins for query execution plans,
494    /// potentially improving performance for certain workloads, but may cause OOM for large datasets.
495    #[parameter(default = true)]
496    datafusion_prefer_hash_join: bool,
497
498    /// Emit chunks in upsert format for `UPDATE` and `DELETE` DMLs.
499    /// May lead to undefined behavior if the table is created with `ON CONFLICT DO NOTHING`.
500    ///
501    /// When enabled:
502    /// - `UPDATE` will only emit `Insert` records for new rows, instead of `Update` records.
503    /// - `DELETE` will only include key columns and pad the rest with NULL, instead of emitting complete rows.
504    #[parameter(default = false)]
505    upsert_dml: bool,
506}
507
508fn check_iceberg_engine_connection(val: &str) -> Result<(), String> {
509    if val.is_empty() {
510        return Ok(());
511    }
512
513    let parts: Vec<&str> = val.split('.').collect();
514    if parts.len() != 2 {
515        return Err("Invalid iceberg engine connection format, Should be set to this format: schema_name.connection_name.".to_owned());
516    }
517
518    Ok(())
519}
520
521fn check_timezone(val: &str) -> Result<(), String> {
522    // Check if the provided string is a valid timezone.
523    Tz::from_str_insensitive(val).map_err(|_e| "Not a valid timezone")?;
524    Ok(())
525}
526
527fn check_client_encoding(val: &str) -> Result<(), String> {
528    // https://github.com/postgres/postgres/blob/REL_15_3/src/common/encnames.c#L525
529    let clean = val.replace(|c: char| !c.is_ascii_alphanumeric(), "");
530    if !clean.eq_ignore_ascii_case("UTF8") {
531        Err("Only support 'UTF8' for CLIENT_ENCODING".to_owned())
532    } else {
533        Ok(())
534    }
535}
536
537fn check_bytea_output(val: &str) -> Result<(), String> {
538    if val == "hex" {
539        Ok(())
540    } else {
541        Err("Only support 'hex' for BYTEA_OUTPUT".to_owned())
542    }
543}
544
545/// Check if the provided value is a valid max parallelism.
546fn check_streaming_max_parallelism(val: &usize) -> Result<(), String> {
547    match val {
548        // TODO(var-vnode): this is to prevent confusion with singletons, after we distinguish
549        // them better, we may allow 1 as the max parallelism (though not much point).
550        0 | 1 => Err("STREAMING_MAX_PARALLELISM must be greater than 1".to_owned()),
551        2..=VirtualNode::MAX_COUNT => Ok(()),
552        _ => Err(format!(
553            "STREAMING_MAX_PARALLELISM must be less than or equal to {}",
554            VirtualNode::MAX_COUNT
555        )),
556    }
557}
558
559fn check_streaming_parallelism_for_backfill(val: &ConfigBackfillParallelism) -> Result<(), String> {
560    match val {
561        ConfigBackfillParallelism::Default | ConfigBackfillParallelism::Fixed(_) => Ok(()),
562        ConfigBackfillParallelism::Adaptive
563        | ConfigBackfillParallelism::Bounded(_)
564        | ConfigBackfillParallelism::Ratio(_) => Err(
565            "Only `default` or fixed backfill parallelism is supported here; adaptive backfill strategy is deferred to a later change.".to_owned(),
566        ),
567    }
568}
569
570impl SessionConfig {
571    pub fn set_force_two_phase_agg(
572        &mut self,
573        val: bool,
574        reporter: &mut impl ConfigReporter,
575    ) -> SessionConfigResult<bool> {
576        let set_val = self.set_force_two_phase_agg_inner(val, reporter)?;
577        if self.force_two_phase_agg {
578            self.set_enable_two_phase_agg(true, reporter)
579        } else {
580            Ok(set_val)
581        }
582    }
583
584    pub fn set_enable_two_phase_agg(
585        &mut self,
586        val: bool,
587        reporter: &mut impl ConfigReporter,
588    ) -> SessionConfigResult<bool> {
589        let set_val = self.set_enable_two_phase_agg_inner(val, reporter)?;
590        if !self.force_two_phase_agg {
591            self.set_force_two_phase_agg(false, reporter)
592        } else {
593            Ok(set_val)
594        }
595    }
596}
597
598pub struct VariableInfo {
599    pub name: String,
600    pub setting: String,
601    pub description: String,
602}
603
604/// Report status or notice to caller.
605pub trait ConfigReporter {
606    fn report_status(&mut self, key: &str, new_val: String);
607}
608
609// Report nothing.
610impl ConfigReporter for () {
611    fn report_status(&mut self, _key: &str, _new_val: String) {}
612}
613
614def_anyhow_newtype! {
615    pub SessionConfigToOverrideError,
616    toml::ser::Error => "failed to serialize session config",
617    ConfigMergeError => transparent,
618}
619
620impl SessionConfig {
621    /// Generate an initial override for the streaming config from the session config.
622    pub fn to_initial_streaming_config_override(
623        &self,
624    ) -> Result<String, SessionConfigToOverrideError> {
625        let mut table = toml::Table::new();
626
627        // TODO: make this more type safe.
628        // We `unwrap` here to assert the hard-coded keys are correct.
629        if let Some(v) = self.streaming_join_encoding.as_ref() {
630            table
631                .upsert("streaming.developer.join_encoding_type", v)
632                .unwrap();
633        }
634        if let Some(v) = self.streaming_sync_log_store_pause_duration_ms.as_ref() {
635            table
636                .upsert("streaming.developer.sync_log_store_pause_duration_ms", v)
637                .unwrap();
638        }
639        if let Some(v) = self.streaming_sync_log_store_buffer_size.as_ref() {
640            table
641                .upsert("streaming.developer.sync_log_store_buffer_size", v)
642                .unwrap();
643        }
644        if let Some(v) = self.streaming_over_window_cache_policy.as_ref() {
645            table
646                .upsert("streaming.developer.over_window_cache_policy", v)
647                .unwrap();
648        }
649
650        let res = toml::to_string(&table)?;
651
652        // Validate all fields are valid by trying to merge it to the default config.
653        if !res.is_empty() {
654            let merged =
655                merge_streaming_config_section(&StreamingConfig::default(), res.as_str())?.unwrap();
656
657            let unrecognized_keys = merged.unrecognized_keys().collect_vec();
658            if !unrecognized_keys.is_empty() {
659                bail!("unrecognized configs: {:?}", unrecognized_keys);
660            }
661        }
662
663        Ok(res)
664    }
665}
666
667#[cfg(test)]
668mod test {
669    use expect_test::expect;
670
671    use super::*;
672
673    #[derive(SessionConfig)]
674    struct TestConfig {
675        #[parameter(default = 1, flags = "NO_ALTER_SYS", alias = "test_param_alias" | "alias_param_test")]
676        test_param: i32,
677    }
678
679    #[test]
680    fn test_session_config_alias() {
681        let mut config = TestConfig::default();
682        config.set("test_param", "2".to_owned(), &mut ()).unwrap();
683        assert_eq!(config.get("test_param_alias").unwrap(), "2");
684        config
685            .set("alias_param_test", "3".to_owned(), &mut ())
686            .unwrap();
687        assert_eq!(config.get("test_param_alias").unwrap(), "3");
688        assert!(TestConfig::check_no_alter_sys("test_param").unwrap());
689    }
690
691    #[test]
692    fn test_initial_streaming_config_override() {
693        let mut config = SessionConfig::default();
694        config
695            .set_streaming_join_encoding(Some(JoinEncodingType::Cpu).into(), &mut ())
696            .unwrap();
697        config
698            .set_streaming_over_window_cache_policy(
699                Some(OverWindowCachePolicy::RecentFirstN).into(),
700                &mut (),
701            )
702            .unwrap();
703
704        // Check the converted config override string.
705        let override_str = config.to_initial_streaming_config_override().unwrap();
706        expect![[r#"
707            [streaming.developer]
708            join_encoding_type = "cpu_optimized"
709            over_window_cache_policy = "recent_first_n"
710        "#]]
711        .assert_eq(&override_str);
712
713        // Try merging it to the default streaming config.
714        let merged = merge_streaming_config_section(&StreamingConfig::default(), &override_str)
715            .unwrap()
716            .unwrap();
717        assert_eq!(merged.developer.join_encoding_type, JoinEncodingType::Cpu);
718        assert_eq!(
719            merged.developer.over_window_cache_policy,
720            OverWindowCachePolicy::RecentFirstN
721        );
722    }
723
724    #[test]
725    fn test_streaming_parallelism_defaults() {
726        let config = SessionConfig::default();
727
728        assert_eq!(config.streaming_parallelism(), ConfigParallelism::Default);
729        assert_eq!(
730            config.streaming_parallelism_for_table(),
731            ConfigParallelism::Default
732        );
733        assert_eq!(
734            config.streaming_parallelism_for_source(),
735            ConfigParallelism::Default
736        );
737        assert_eq!(
738            config.streaming_parallelism_for_sink(),
739            ConfigParallelism::Default
740        );
741        assert_eq!(
742            config.streaming_parallelism_for_index(),
743            ConfigParallelism::Default
744        );
745        assert_eq!(
746            config.streaming_parallelism_for_materialized_view(),
747            ConfigParallelism::Default
748        );
749    }
750
751    #[test]
752    fn test_streaming_parallelism_default_round_trip() {
753        let mut config = SessionConfig::default();
754
755        assert_eq!(config.get("streaming_parallelism").unwrap(), "default");
756        assert_eq!(
757            config.get("streaming_parallelism_for_table").unwrap(),
758            "default"
759        );
760        assert_eq!(
761            config.get("streaming_parallelism_for_source").unwrap(),
762            "default"
763        );
764
765        config
766            .set("streaming_parallelism", "default".to_owned(), &mut ())
767            .unwrap();
768        assert_eq!(config.get("streaming_parallelism").unwrap(), "default");
769
770        config
771            .set("streaming_parallelism", "bounded(16)".to_owned(), &mut ())
772            .unwrap();
773        config
774            .set(
775                "streaming_parallelism_for_table",
776                "bounded(8)".to_owned(),
777                &mut (),
778            )
779            .unwrap();
780        config
781            .set(
782                "streaming_parallelism_for_source",
783                "bounded(8)".to_owned(),
784                &mut (),
785            )
786            .unwrap();
787
788        assert_eq!(
789            config.reset("streaming_parallelism", &mut ()).unwrap(),
790            "default"
791        );
792        assert_eq!(
793            config
794                .reset("streaming_parallelism_for_table", &mut ())
795                .unwrap(),
796            "default"
797        );
798        assert_eq!(
799            config
800                .reset("streaming_parallelism_for_source", &mut ())
801                .unwrap(),
802            "default"
803        );
804    }
805
806    #[test]
807    fn test_streaming_parallelism_for_backfill_accepts_default_and_fixed() {
808        let mut config = SessionConfig::default();
809
810        config
811            .set(
812                "streaming_parallelism_for_backfill",
813                "default".to_owned(),
814                &mut (),
815            )
816            .unwrap();
817        assert_eq!(
818            config.get("streaming_parallelism_for_backfill").unwrap(),
819            "default"
820        );
821
822        config
823            .set(
824                "streaming_parallelism_for_backfill",
825                "2".to_owned(),
826                &mut (),
827            )
828            .unwrap();
829        assert_eq!(config.streaming_parallelism_for_backfill().to_string(), "2");
830    }
831
832    #[test]
833    fn test_streaming_parallelism_for_backfill_rejects_adaptive_modes() {
834        let mut config = SessionConfig::default();
835        let expected = "Only `default` or fixed backfill parallelism is supported here; adaptive backfill strategy is deferred to a later change.";
836
837        for value in ["adaptive", "bounded(2)", "ratio(0.5)"] {
838            let err = config
839                .set(
840                    "streaming_parallelism_for_backfill",
841                    value.to_owned(),
842                    &mut (),
843                )
844                .unwrap_err();
845
846            match err {
847                SessionConfigError::InvalidValue {
848                    entry,
849                    value: actual_value,
850                    source,
851                } => {
852                    assert_eq!(entry, "streaming_parallelism_for_backfill");
853                    assert_eq!(actual_value, value);
854                    assert_eq!(source.to_string(), expected);
855                }
856                other => panic!("unexpected error: {other:?}"),
857            }
858        }
859    }
860}