risingwave_connector/
allow_alter_on_fly_fields.rs

1// Copyright 2025 RisingWave Labs
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15// THIS FILE IS AUTO_GENERATED. DO NOT EDIT
16// UPDATE WITH: ./risedev generate-with-options
17
18#![rustfmt::skip]
19
20use std::collections::{HashMap, HashSet};
21use std::sync::LazyLock;
22use crate::error::ConnectorError;
23
24macro_rules! use_source_properties {
25    ({ $({ $variant_name:ident, $prop_name:ty, $split:ty }),* }) => {
26        $(
27            #[allow(unused_imports)]
28            pub(super) use $prop_name;
29        )*
30    };
31}
32
33mod source_properties {
34    use crate::for_all_sources;
35    use crate::source::base::SourceProperties;
36
37    for_all_sources!(use_source_properties);
38
39    /// Implements a function that maps a source name string to the Rust type name of the corresponding property type.
40    /// Usage: `impl_source_name_to_prop_type_name!();` will generate:
41    /// ```ignore
42    /// pub fn source_name_to_prop_type_name(source_name: &str) -> Option<&'static str>
43    /// ```
44    macro_rules! impl_source_name_to_prop_type_name_inner {
45        ({ $({$variant:ident, $prop_name:ty, $split:ty}),* }) => {
46            pub fn source_name_to_prop_type_name(source_name: &str) -> Option<&'static str> {
47                match source_name {
48                    $(
49                        <$prop_name>::SOURCE_NAME => Some(std::any::type_name::<$prop_name>()),
50                    )*
51                    _ => None,
52                }
53            }
54        };
55    }
56
57    macro_rules! impl_source_name_to_prop_type_name {
58        () => {
59            $crate::for_all_sources! { impl_source_name_to_prop_type_name_inner }
60        };
61    }
62
63    impl_source_name_to_prop_type_name!();
64}
65
66mod sink_properties {
67    use crate::use_all_sink_configs;
68    use crate::sink::Sink;
69    use crate::sink::file_sink::fs::FsSink;
70
71    use_all_sink_configs!();
72
73    macro_rules! impl_sink_name_to_config_type_name_inner {
74        ({ $({ $variant_name:ident, $sink_type:ty, $config_type:ty }),* }) => {
75            pub fn sink_name_to_config_type_name(sink_name: &str) -> Option<&'static str> {
76                match sink_name {
77                $(
78                    <$sink_type>::SINK_NAME => Some(std::any::type_name::<$config_type>()),
79                )*
80                    _ => None,
81                }
82            }
83        };
84    }
85
86    macro_rules! impl_sink_name_to_config_type_name {
87        () => {
88            $crate::for_all_sinks! { impl_sink_name_to_config_type_name_inner }
89        };
90    }
91
92    impl_sink_name_to_config_type_name!();
93}
94
95/// Map of source connector names to their `allow_alter_on_fly` field names
96pub static SOURCE_ALLOW_ALTER_ON_FLY_FIELDS: LazyLock<HashMap<String, HashSet<String>>> = LazyLock::new(|| {
97    use source_properties::*;
98    let mut map = HashMap::new();
99    // CDC Properties - added for schema.change.failure.policy
100    map.try_insert(
101        std::any::type_name::<MysqlCdcProperties>().to_owned(),
102        ["cdc.source.wait.streaming.start.timeout".to_owned()].into_iter().collect(),
103    ).unwrap();
104    map.try_insert(
105        std::any::type_name::<PostgresCdcProperties>().to_owned(),
106        ["cdc.source.wait.streaming.start.timeout".to_owned()].into_iter().collect(),
107    ).unwrap();
108    map.try_insert(
109        std::any::type_name::<SqlServerCdcProperties>().to_owned(),
110        ["cdc.source.wait.streaming.start.timeout".to_owned()].into_iter().collect(),
111    ).unwrap();
112
113    map.try_insert(
114        std::any::type_name::<MongodbCdcProperties>().to_owned(),
115        ["cdc.source.wait.streaming.start.timeout".to_owned()].into_iter().collect(),
116    ).unwrap();
117    // KafkaProperties
118    map.try_insert(
119        std::any::type_name::<KafkaProperties>().to_owned(),
120        [
121            "group.id.prefix".to_owned(),
122            "properties.sync.call.timeout".to_owned(),
123            "properties.security.protocol".to_owned(),
124            "properties.ssl.endpoint.identification.algorithm".to_owned(),
125            "properties.sasl.mechanism".to_owned(),
126            "properties.sasl.username".to_owned(),
127            "properties.sasl.password".to_owned(),
128            "properties.message.max.bytes".to_owned(),
129            "properties.receive.message.max.bytes".to_owned(),
130            "properties.statistics.interval.ms".to_owned(),
131            "properties.client.id".to_owned(),
132            "properties.enable.ssl.certificate.verification".to_owned(),
133            "properties.queued.min.messages".to_owned(),
134            "properties.queued.max.messages.kbytes".to_owned(),
135            "properties.fetch.wait.max.ms".to_owned(),
136            "properties.fetch.queue.backoff.ms".to_owned(),
137            "properties.fetch.max.bytes".to_owned(),
138            "properties.enable.auto.commit".to_owned(),
139        ].into_iter().collect(),
140    ).unwrap();
141    map
142});
143
144/// Map of sink connector names to their `allow_alter_on_fly` field names
145pub static SINK_ALLOW_ALTER_ON_FLY_FIELDS: LazyLock<HashMap<String, HashSet<String>>> = LazyLock::new(|| {
146    use sink_properties::*;
147    let mut map = HashMap::new();
148    // ClickHouseConfig
149    map.try_insert(
150        std::any::type_name::<ClickHouseConfig>().to_owned(),
151        [
152            "commit_checkpoint_interval".to_owned(),
153        ].into_iter().collect(),
154    ).unwrap();
155    // DeltaLakeConfig
156    map.try_insert(
157        std::any::type_name::<DeltaLakeConfig>().to_owned(),
158        [
159            "commit_checkpoint_interval".to_owned(),
160        ].into_iter().collect(),
161    ).unwrap();
162    // IcebergConfig
163    map.try_insert(
164        std::any::type_name::<IcebergConfig>().to_owned(),
165        [
166            "commit_checkpoint_interval".to_owned(),
167            "enable_compaction".to_owned(),
168            "compaction_interval_sec".to_owned(),
169            "enable_snapshot_expiration".to_owned(),
170            "snapshot_expiration_max_age_millis".to_owned(),
171            "snapshot_expiration_retain_last".to_owned(),
172            "snapshot_expiration_clear_expired_files".to_owned(),
173            "snapshot_expiration_clear_expired_meta_data".to_owned(),
174            "max_snapshots_num_before_compaction".to_owned(),
175        ].into_iter().collect(),
176    ).unwrap();
177    // KafkaConfig
178    map.try_insert(
179        std::any::type_name::<KafkaConfig>().to_owned(),
180        [
181            "properties.sync.call.timeout".to_owned(),
182            "properties.security.protocol".to_owned(),
183            "properties.ssl.endpoint.identification.algorithm".to_owned(),
184            "properties.sasl.mechanism".to_owned(),
185            "properties.sasl.username".to_owned(),
186            "properties.sasl.password".to_owned(),
187            "properties.message.max.bytes".to_owned(),
188            "properties.receive.message.max.bytes".to_owned(),
189            "properties.statistics.interval.ms".to_owned(),
190            "properties.client.id".to_owned(),
191            "properties.enable.ssl.certificate.verification".to_owned(),
192            "properties.allow.auto.create.topics".to_owned(),
193            "properties.queue.buffering.max.messages".to_owned(),
194            "properties.queue.buffering.max.kbytes".to_owned(),
195            "properties.queue.buffering.max.ms".to_owned(),
196            "properties.enable.idempotence".to_owned(),
197            "properties.message.send.max.retries".to_owned(),
198            "properties.retry.backoff.ms".to_owned(),
199            "properties.batch.num.messages".to_owned(),
200            "properties.batch.size".to_owned(),
201            "properties.message.timeout.ms".to_owned(),
202            "properties.max.in.flight.requests.per.connection".to_owned(),
203            "properties.request.required.acks".to_owned(),
204        ].into_iter().collect(),
205    ).unwrap();
206    // SnowflakeV2Config
207    map.try_insert(
208        std::any::type_name::<SnowflakeV2Config>().to_owned(),
209        [
210            "commit_checkpoint_interval".to_owned(),
211        ].into_iter().collect(),
212    ).unwrap();
213    // StarrocksConfig
214    map.try_insert(
215        std::any::type_name::<StarrocksConfig>().to_owned(),
216        [
217            "commit_checkpoint_interval".to_owned(),
218        ].into_iter().collect(),
219    ).unwrap();
220    map
221});
222
223/// Map of connection names to their `allow_alter_on_fly` field names
224pub static CONNECTION_ALLOW_ALTER_ON_FLY_FIELDS: LazyLock<HashMap<String, HashSet<String>>> = LazyLock::new(|| {
225    use crate::connector_common::*;
226    let mut map = HashMap::new();
227    // KafkaConnection
228    map.try_insert(
229        std::any::type_name::<KafkaConnection>().to_owned(),
230        [
231            "properties.security.protocol".to_owned(),
232            "properties.ssl.endpoint.identification.algorithm".to_owned(),
233            "properties.sasl.mechanism".to_owned(),
234            "properties.sasl.username".to_owned(),
235            "properties.sasl.password".to_owned(),
236        ].into_iter().collect(),
237    ).unwrap();
238    map
239});
240
241/// Get all source connector names that have `allow_alter_on_fly` fields
242pub fn get_source_connectors_with_allow_alter_on_fly_fields() -> Vec<&'static str> {
243    SOURCE_ALLOW_ALTER_ON_FLY_FIELDS.keys().map(|s| s.as_str()).collect()
244}
245
246/// Get all sink connector names that have `allow_alter_on_fly` fields
247pub fn get_sink_connectors_with_allow_alter_on_fly_fields() -> Vec<&'static str> {
248    SINK_ALLOW_ALTER_ON_FLY_FIELDS.keys().map(|s| s.as_str()).collect()
249}
250
251/// Get all connection names that have `allow_alter_on_fly` fields
252pub fn get_connection_names_with_allow_alter_on_fly_fields() -> Vec<&'static str> {
253    CONNECTION_ALLOW_ALTER_ON_FLY_FIELDS.keys().map(|s| s.as_str()).collect()
254}
255
256/// Checks if all given fields are allowed to be altered on the fly for the specified source connector.
257/// Returns Ok(()) if all fields are allowed, otherwise returns a `ConnectorError`.
258pub fn check_source_allow_alter_on_fly_fields(
259    connector_name: &str,
260    fields: &[String],
261) -> crate::error::ConnectorResult<()> {
262    // Convert connector name to the type name key
263    let Some(type_name) = source_properties::source_name_to_prop_type_name(connector_name) else {
264        return Err(ConnectorError::from(anyhow::anyhow!(
265            "Unknown source connector: {connector_name}"
266        )));
267    };
268    let Some(allowed_fields) = SOURCE_ALLOW_ALTER_ON_FLY_FIELDS.get(type_name) else {
269        return Err(ConnectorError::from(anyhow::anyhow!(
270            "No allow_alter_on_fly fields registered for connector: {connector_name}"
271        )));
272    };
273    for field in fields {
274        if !allowed_fields.contains(field) {
275            return Err(ConnectorError::from(anyhow::anyhow!(
276                "Field '{field}' is not allowed to be altered on the fly for connector: {connector_name}"
277            )));
278        }
279    }
280    Ok(())
281}
282
283pub fn check_connection_allow_alter_on_fly_fields(
284    connection_name: &str,
285    fields: &[String],
286) -> crate::error::ConnectorResult<()> {
287    use crate::source::connection_name_to_prop_type_name;
288
289    // Convert connection name to the type name key
290    let Some(type_name) = connection_name_to_prop_type_name(connection_name) else {
291        return Err(ConnectorError::from(anyhow::anyhow!(
292            "Unknown connection: {connection_name}"
293        )));
294    };
295    let Some(allowed_fields) = CONNECTION_ALLOW_ALTER_ON_FLY_FIELDS.get(type_name) else {
296        return Err(ConnectorError::from(anyhow::anyhow!(
297            "No allow_alter_on_fly fields registered for connection: {connection_name}"
298        )));
299    };
300    for field in fields {
301        if !allowed_fields.contains(field) {
302            return Err(ConnectorError::from(anyhow::anyhow!(
303                "Field '{field}' is not allowed to be altered on the fly for connection: {connection_name}"
304            )));
305        }
306    }
307    Ok(())
308}
309
310/// Checks if all given fields are allowed to be altered on the fly for the specified sink connector.
311/// Returns Ok(()) if all fields are allowed, otherwise returns a `ConnectorError`.
312pub fn check_sink_allow_alter_on_fly_fields(
313    sink_name: &str,
314    fields: &[String],
315) -> crate::error::ConnectorResult<()> {
316    // Convert sink name to the type name key
317    let Some(type_name) = sink_properties::sink_name_to_config_type_name(sink_name) else {
318        return Err(ConnectorError::from(anyhow::anyhow!(
319            "Unknown sink connector: {sink_name}"
320        )));
321    };
322    let Some(allowed_fields) = SINK_ALLOW_ALTER_ON_FLY_FIELDS.get(type_name) else {
323        return Err(ConnectorError::from(anyhow::anyhow!(
324            "No allow_alter_on_fly fields registered for sink: {sink_name}"
325        )));
326    };
327    for field in fields {
328        if !allowed_fields.contains(field) {
329            return Err(ConnectorError::from(anyhow::anyhow!(
330                "Field '{field}' is not allowed to be altered on the fly for sink: {sink_name}"
331            )));
332        }
333    }
334    Ok(())
335}