risingwave_connector/
allow_alter_on_fly_fields.rs

1// Copyright 2025 RisingWave Labs
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15// THIS FILE IS AUTO_GENERATED. DO NOT EDIT
16// UPDATE WITH: ./risedev generate-with-options
17// This file is rewritten by `tests::test_allow_alter_on_fly_fields_rust_up_to_date` with
18// `UPDATE_EXPECT=1`.
19// To update content, change source/sink/connection WITH options definitions (for example,
20// `#[with_option(allow_alter_on_fly)]` on struct fields), then run `./risedev generate-with-options`.
21// `./risedev generate-with-options` runs two UPDATE_EXPECT tests:
22// 1) refresh `with_options_{source,sink,connection}.yaml`;
23// 2) regenerate this file from those YAML files.
24
25#![rustfmt::skip]
26
27use std::collections::{HashMap, HashSet};
28use std::sync::LazyLock;
29use crate::error::ConnectorError;
30use crate::sink::remote::JdbcSink;
31use crate::sink::Sink;
32
33macro_rules! use_source_properties {
34    ({ $({ $variant_name:ident, $prop_name:ty, $split:ty }),* }) => {
35        $(
36            #[allow(unused_imports)]
37            pub(super) use $prop_name;
38        )*
39    };
40}
41
42mod source_properties {
43    use crate::for_all_sources;
44    use crate::source::base::SourceProperties;
45
46    for_all_sources!(use_source_properties);
47
48    /// Implements a function that maps a source name string to the Rust type name of the corresponding property type.
49    /// Usage: `impl_source_name_to_prop_type_name!();` will generate:
50    /// ```ignore
51    /// pub fn source_name_to_prop_type_name(source_name: &str) -> Option<&'static str>
52    /// ```
53    macro_rules! impl_source_name_to_prop_type_name_inner {
54        ({ $({$variant:ident, $prop_name:ty, $split:ty}),* }) => {
55            pub fn source_name_to_prop_type_name(source_name: &str) -> Option<&'static str> {
56                match source_name {
57                    $(
58                        <$prop_name>::SOURCE_NAME => Some(std::any::type_name::<$prop_name>()),
59                    )*
60                    _ => None,
61                }
62            }
63        };
64    }
65
66    macro_rules! impl_source_name_to_prop_type_name {
67        () => {
68            $crate::for_all_sources! { impl_source_name_to_prop_type_name_inner }
69        };
70    }
71
72    impl_source_name_to_prop_type_name!();
73}
74
75mod sink_properties {
76    use crate::use_all_sink_configs;
77    use crate::sink::Sink;
78    use crate::sink::file_sink::fs::FsSink;
79
80    use_all_sink_configs!();
81
82    macro_rules! impl_sink_name_to_config_type_name_inner {
83        ({ $({ $variant_name:ident, $sink_type:ty, $config_type:ty }),* }) => {
84            pub fn sink_name_to_config_type_name(sink_name: &str) -> Option<&'static str> {
85                match sink_name {
86                $(
87                    <$sink_type>::SINK_NAME => Some(std::any::type_name::<$config_type>()),
88                )*
89                    _ => None,
90                }
91            }
92        };
93    }
94
95    macro_rules! impl_sink_name_to_config_type_name {
96        () => {
97            $crate::for_all_sinks! { impl_sink_name_to_config_type_name_inner }
98        };
99    }
100
101    impl_sink_name_to_config_type_name!();
102}
103
104/// Map of source connector names to their `allow_alter_on_fly` field names
105pub static SOURCE_ALLOW_ALTER_ON_FLY_FIELDS: LazyLock<HashMap<String, HashSet<String>>> = LazyLock::new(|| {
106    use source_properties::*;
107    let mut map = HashMap::new();
108    // CDC Properties - added for schema.change.failure.policy
109    map.try_insert(
110        std::any::type_name::<MysqlCdcProperties>().to_owned(),
111        [
112            "cdc.source.wait.streaming.start.timeout".to_owned(),
113            "debezium.max.queue.size".to_owned(),
114            "debezium.queue.memory.ratio".to_owned(),
115            "password".to_owned(),
116        ].into_iter().collect(),
117    ).unwrap();
118    map.try_insert(
119        std::any::type_name::<PostgresCdcProperties>().to_owned(),
120        [
121            "cdc.source.wait.streaming.start.timeout".to_owned(),
122            "debezium.max.queue.size".to_owned(),
123            "debezium.queue.memory.ratio".to_owned(),
124            "password".to_owned(),
125        ].into_iter().collect(),
126    ).unwrap();
127    map.try_insert(
128        std::any::type_name::<SqlServerCdcProperties>().to_owned(),
129        [
130            "cdc.source.wait.streaming.start.timeout".to_owned(),
131            "debezium.max.queue.size".to_owned(),
132            "debezium.queue.memory.ratio".to_owned(),
133            "password".to_owned(),
134        ].into_iter().collect(),
135    ).unwrap();
136
137    map.try_insert(
138        std::any::type_name::<MongodbCdcProperties>().to_owned(),
139        [
140            "cdc.source.wait.streaming.start.timeout".to_owned(),
141            "debezium.max.queue.size".to_owned(),
142            "debezium.queue.memory.ratio".to_owned(),
143        ].into_iter().collect(),
144    ).unwrap();
145    // KafkaProperties
146    map.try_insert(
147        std::any::type_name::<KafkaProperties>().to_owned(),
148        [
149            "group.id.prefix".to_owned(),
150            "properties.sync.call.timeout".to_owned(),
151            "properties.security.protocol".to_owned(),
152            "properties.ssl.endpoint.identification.algorithm".to_owned(),
153            "properties.sasl.mechanism".to_owned(),
154            "properties.sasl.username".to_owned(),
155            "properties.sasl.password".to_owned(),
156            "properties.message.max.bytes".to_owned(),
157            "properties.receive.message.max.bytes".to_owned(),
158            "properties.statistics.interval.ms".to_owned(),
159            "properties.client.id".to_owned(),
160            "properties.enable.ssl.certificate.verification".to_owned(),
161            "properties.queued.min.messages".to_owned(),
162            "properties.queued.max.messages.kbytes".to_owned(),
163            "properties.fetch.wait.max.ms".to_owned(),
164            "properties.fetch.queue.backoff.ms".to_owned(),
165            "properties.fetch.max.bytes".to_owned(),
166            "properties.enable.auto.commit".to_owned(),
167        ].into_iter().collect(),
168    ).unwrap();
169    // PubsubProperties
170    map.try_insert(
171        std::any::type_name::<PubsubProperties>().to_owned(),
172        [
173            "pubsub.ack_deadline_seconds".to_owned(),
174        ].into_iter().collect(),
175    ).unwrap();
176    map
177});
178
179/// Map of sink connector names to their `allow_alter_on_fly` field names
180pub static SINK_ALLOW_ALTER_ON_FLY_FIELDS: LazyLock<HashMap<String, HashSet<String>>> = LazyLock::new(|| {
181    use sink_properties::*;
182    let mut map = HashMap::new();
183    // ClickHouseConfig
184    map.try_insert(
185        std::any::type_name::<ClickHouseConfig>().to_owned(),
186        [
187            "commit_checkpoint_interval".to_owned(),
188        ].into_iter().collect(),
189    ).unwrap();
190    // DeltaLakeConfig
191    map.try_insert(
192        std::any::type_name::<DeltaLakeConfig>().to_owned(),
193        [
194            "commit_checkpoint_interval".to_owned(),
195        ].into_iter().collect(),
196    ).unwrap();
197    // DorisConfig
198    map.try_insert(
199        std::any::type_name::<DorisConfig>().to_owned(),
200        [
201            "doris.stream_load.http.timeout.ms".to_owned(),
202        ].into_iter().collect(),
203    ).unwrap();
204    // IcebergConfig
205    map.try_insert(
206        std::any::type_name::<IcebergConfig>().to_owned(),
207        [
208            "commit_checkpoint_interval".to_owned(),
209            "commit_checkpoint_size_threshold_mb".to_owned(),
210            "enable_compaction".to_owned(),
211            "compaction_interval_sec".to_owned(),
212            "enable_snapshot_expiration".to_owned(),
213            "snapshot_expiration_max_age_millis".to_owned(),
214            "snapshot_expiration_retain_last".to_owned(),
215            "snapshot_expiration_clear_expired_files".to_owned(),
216            "snapshot_expiration_clear_expired_meta_data".to_owned(),
217            "compaction.max_snapshots_num".to_owned(),
218            "compaction.small_files_threshold_mb".to_owned(),
219            "compaction.delete_files_count_threshold".to_owned(),
220            "compaction.trigger_snapshot_count".to_owned(),
221            "compaction.target_file_size_mb".to_owned(),
222            "compaction.type".to_owned(),
223            "compaction.write_parquet_compression".to_owned(),
224            "compaction.write_parquet_max_row_group_rows".to_owned(),
225            "compaction.write_parquet_max_row_group_bytes".to_owned(),
226        ].into_iter().collect(),
227    ).unwrap();
228    // KafkaConfig
229    map.try_insert(
230        std::any::type_name::<KafkaConfig>().to_owned(),
231        [
232            "properties.sync.call.timeout".to_owned(),
233            "properties.security.protocol".to_owned(),
234            "properties.ssl.endpoint.identification.algorithm".to_owned(),
235            "properties.sasl.mechanism".to_owned(),
236            "properties.sasl.username".to_owned(),
237            "properties.sasl.password".to_owned(),
238            "properties.message.max.bytes".to_owned(),
239            "properties.receive.message.max.bytes".to_owned(),
240            "properties.statistics.interval.ms".to_owned(),
241            "properties.client.id".to_owned(),
242            "properties.enable.ssl.certificate.verification".to_owned(),
243            "properties.allow.auto.create.topics".to_owned(),
244            "properties.queue.buffering.max.messages".to_owned(),
245            "properties.queue.buffering.max.kbytes".to_owned(),
246            "properties.queue.buffering.max.ms".to_owned(),
247            "properties.enable.idempotence".to_owned(),
248            "properties.message.send.max.retries".to_owned(),
249            "properties.retry.backoff.ms".to_owned(),
250            "properties.batch.num.messages".to_owned(),
251            "properties.batch.size".to_owned(),
252            "properties.message.timeout.ms".to_owned(),
253            "properties.max.in.flight.requests.per.connection".to_owned(),
254            "properties.request.required.acks".to_owned(),
255        ].into_iter().collect(),
256    ).unwrap();
257    // SnowflakeV2Config
258    map.try_insert(
259        std::any::type_name::<SnowflakeV2Config>().to_owned(),
260        [
261            "commit_checkpoint_interval".to_owned(),
262        ].into_iter().collect(),
263    ).unwrap();
264    // StarrocksConfig
265    map.try_insert(
266        std::any::type_name::<StarrocksConfig>().to_owned(),
267        [
268            "starrocks.stream_load.http.timeout.ms".to_owned(),
269            "commit_checkpoint_interval".to_owned(),
270        ].into_iter().collect(),
271    ).unwrap();
272    // Jdbc
273    map.try_insert(
274        JdbcSink::SINK_NAME.to_owned(),
275        [
276            "jdbc.url".to_owned(),
277            "user".to_owned(),
278            "password".to_owned(),
279        ].into_iter().collect(),
280    ).unwrap();
281    map
282});
283
284/// Map of connection names to their `allow_alter_on_fly` field names
285pub static CONNECTION_ALLOW_ALTER_ON_FLY_FIELDS: LazyLock<HashMap<String, HashSet<String>>> = LazyLock::new(|| {
286    use crate::connector_common::*;
287    let mut map = HashMap::new();
288    // KafkaConnection
289    map.try_insert(
290        std::any::type_name::<KafkaConnection>().to_owned(),
291        [
292            "properties.security.protocol".to_owned(),
293            "properties.ssl.endpoint.identification.algorithm".to_owned(),
294            "properties.sasl.mechanism".to_owned(),
295            "properties.sasl.username".to_owned(),
296            "properties.sasl.password".to_owned(),
297        ].into_iter().collect(),
298    ).unwrap();
299    // Jdbc
300    map.try_insert(
301        JdbcSink::SINK_NAME.to_owned(),
302        [
303            "jdbc.url".to_owned(),
304            "user".to_owned(),
305            "password".to_owned(),
306        ].into_iter().collect(),
307    ).unwrap();
308    map
309});
310
311/// Get all source connector names that have `allow_alter_on_fly` fields
312pub fn get_source_connectors_with_allow_alter_on_fly_fields() -> Vec<&'static str> {
313    SOURCE_ALLOW_ALTER_ON_FLY_FIELDS.keys().map(|s| s.as_str()).collect()
314}
315
316/// Get all sink connector names that have `allow_alter_on_fly` fields
317pub fn get_sink_connectors_with_allow_alter_on_fly_fields() -> Vec<&'static str> {
318    SINK_ALLOW_ALTER_ON_FLY_FIELDS.keys().map(|s| s.as_str()).collect()
319}
320
321/// Get all connection names that have `allow_alter_on_fly` fields
322pub fn get_connection_names_with_allow_alter_on_fly_fields() -> Vec<&'static str> {
323    CONNECTION_ALLOW_ALTER_ON_FLY_FIELDS.keys().map(|s| s.as_str()).collect()
324}
325
326/// Checks if all given fields are allowed to be altered on the fly for the specified source connector.
327/// Returns Ok(()) if all fields are allowed, otherwise returns a `ConnectorError`.
328pub fn check_source_allow_alter_on_fly_fields(
329    connector_name: &str,
330    fields: &[String],
331) -> crate::error::ConnectorResult<()> {
332    // Convert connector name to the type name key
333    let Some(type_name) = source_properties::source_name_to_prop_type_name(connector_name) else {
334        return Err(ConnectorError::from(anyhow::anyhow!(
335            "Unknown source connector: {connector_name}"
336        )));
337    };
338    let Some(allowed_fields) = SOURCE_ALLOW_ALTER_ON_FLY_FIELDS.get(type_name) else {
339    return Err(ConnectorError::from(anyhow::anyhow!(
340        "No allow_alter_on_fly fields registered for connector: {connector_name}"
341    )));
342    };
343    for field in fields {
344        if !allowed_fields.contains(field) {
345            return Err(ConnectorError::from(anyhow::anyhow!(
346                "Field '{field}' is not allowed to be altered on the fly for connector: {connector_name}"
347            )));
348        }
349    }
350    Ok(())
351}
352
353pub fn check_connection_allow_alter_on_fly_fields(
354    connection_name: &str,
355    fields: &[String],
356) -> crate::error::ConnectorResult<()> {
357    use crate::source::connection_name_to_prop_type_name;
358
359    // Convert connection name to the type name key
360    let Some(type_name) = connection_name_to_prop_type_name(connection_name) else {
361        return Err(ConnectorError::from(anyhow::anyhow!(
362            "Unknown connection: {connection_name}"
363        )));
364    };
365    let Some(allowed_fields) = CONNECTION_ALLOW_ALTER_ON_FLY_FIELDS.get(type_name) else {
366        return Err(ConnectorError::from(anyhow::anyhow!(
367            "No allow_alter_on_fly fields registered for connection: {connection_name}"
368        )));
369    };
370    for field in fields {
371        if !allowed_fields.contains(field) {
372            return Err(ConnectorError::from(anyhow::anyhow!(
373                "Field '{field}' is not allowed to be altered on the fly for connection: {connection_name}"
374            )));
375        }
376    }
377    Ok(())
378}
379
380/// Checks if all given fields are allowed to be altered on the fly for the specified sink connector.
381/// Returns Ok(()) if all fields are allowed, otherwise returns a `ConnectorError`.
382pub fn check_sink_allow_alter_on_fly_fields(
383    sink_name: &str,
384    fields: &[String],
385) -> crate::error::ConnectorResult<()> {
386    // TODO(#24846): JDBC sink currently uses `()` as sink config type in `for_all_sinks!`,
387    // so it cannot have an isolated key in `SINK_ALLOW_ALTER_ON_FLY_FIELDS`.
388    // Reuse the JDBC entry in `CONNECTION_ALLOW_ALTER_ON_FLY_FIELDS` for now.
389    // TODO(#24846): remove this special case after JDBC sink has a dedicated config type
390    // and allow-alter fields are generated directly into `SINK_ALLOW_ALTER_ON_FLY_FIELDS`.
391    let allowed_fields = if sink_name == JdbcSink::SINK_NAME {
392        CONNECTION_ALLOW_ALTER_ON_FLY_FIELDS.get(JdbcSink::SINK_NAME)
393    } else {
394        // Convert sink name to the type name key
395        let Some(type_name) = sink_properties::sink_name_to_config_type_name(sink_name) else {
396            return Err(ConnectorError::from(anyhow::anyhow!(
397                "Unknown sink connector: {sink_name}"
398            )));
399        };
400        SINK_ALLOW_ALTER_ON_FLY_FIELDS.get(type_name)
401    };
402    let Some(allowed_fields) = allowed_fields else {
403        return Err(ConnectorError::from(anyhow::anyhow!(
404            "No allow_alter_on_fly fields registered for sink: {sink_name}"
405        )));
406    };
407    for field in fields {
408        if !allowed_fields.contains(field) {
409            return Err(ConnectorError::from(anyhow::anyhow!(
410                "Field '{field}' is not allowed to be altered on the fly for sink: {sink_name}"
411            )));
412        }
413    }
414    Ok(())
415}