risingwave_connector/
allow_alter_on_fly_fields.rs

1// Copyright 2025 RisingWave Labs
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15// THIS FILE IS AUTO_GENERATED. DO NOT EDIT
16// UPDATE WITH: ./risedev generate-with-options
17
18#![rustfmt::skip]
19
20use std::collections::{HashMap, HashSet};
21use std::sync::LazyLock;
22use crate::error::ConnectorError;
23
24macro_rules! use_source_properties {
25    ({ $({ $variant_name:ident, $prop_name:ty, $split:ty }),* }) => {
26        $(
27            #[allow(unused_imports)]
28            pub(super) use $prop_name;
29        )*
30    };
31}
32
33mod source_properties {
34    use crate::for_all_sources;
35    use crate::source::base::SourceProperties;
36
37    for_all_sources!(use_source_properties);
38
39    /// Implements a function that maps a source name string to the Rust type name of the corresponding property type.
40    /// Usage: `impl_source_name_to_prop_type_name!();` will generate:
41    /// ```ignore
42    /// pub fn source_name_to_prop_type_name(source_name: &str) -> Option<&'static str>
43    /// ```
44    macro_rules! impl_source_name_to_prop_type_name_inner {
45        ({ $({$variant:ident, $prop_name:ty, $split:ty}),* }) => {
46            pub fn source_name_to_prop_type_name(source_name: &str) -> Option<&'static str> {
47                match source_name {
48                    $(
49                        <$prop_name>::SOURCE_NAME => Some(std::any::type_name::<$prop_name>()),
50                    )*
51                    _ => None,
52                }
53            }
54        };
55    }
56
57    macro_rules! impl_source_name_to_prop_type_name {
58        () => {
59            $crate::for_all_sources! { impl_source_name_to_prop_type_name_inner }
60        };
61    }
62
63    impl_source_name_to_prop_type_name!();
64}
65
66mod sink_properties {
67    use crate::use_all_sink_configs;
68    use crate::sink::Sink;
69    use crate::sink::file_sink::fs::FsSink;
70
71    use_all_sink_configs!();
72
73    macro_rules! impl_sink_name_to_config_type_name_inner {
74        ({ $({ $variant_name:ident, $sink_type:ty, $config_type:ty }),* }) => {
75            pub fn sink_name_to_config_type_name(sink_name: &str) -> Option<&'static str> {
76                match sink_name {
77                $(
78                    <$sink_type>::SINK_NAME => Some(std::any::type_name::<$config_type>()),
79                )*
80                    _ => None,
81                }
82            }
83        };
84    }
85
86    macro_rules! impl_sink_name_to_config_type_name {
87        () => {
88            $crate::for_all_sinks! { impl_sink_name_to_config_type_name_inner }
89        };
90    }
91
92    impl_sink_name_to_config_type_name!();
93}
94
95/// Map of source connector names to their `allow_alter_on_fly` field names
96pub static SOURCE_ALLOW_ALTER_ON_FLY_FIELDS: LazyLock<HashMap<String, HashSet<String>>> = LazyLock::new(|| {
97    use source_properties::*;
98    let mut map = HashMap::new();
99    // CDC Properties - added for schema.change.failure.policy
100    map.try_insert(
101        std::any::type_name::<MysqlCdcProperties>().to_owned(),
102        ["cdc.source.wait.streaming.start.timeout".to_owned()].into_iter().collect(),
103    ).unwrap();
104    map.try_insert(
105        std::any::type_name::<PostgresCdcProperties>().to_owned(),
106        ["cdc.source.wait.streaming.start.timeout".to_owned()].into_iter().collect(),
107    ).unwrap();
108    map.try_insert(
109        std::any::type_name::<SqlServerCdcProperties>().to_owned(),
110        ["cdc.source.wait.streaming.start.timeout".to_owned()].into_iter().collect(),
111    ).unwrap();
112
113    map.try_insert(
114        std::any::type_name::<MongodbCdcProperties>().to_owned(),
115        ["cdc.source.wait.streaming.start.timeout".to_owned()].into_iter().collect(),
116    ).unwrap();
117    // KafkaProperties
118    map.try_insert(
119        std::any::type_name::<KafkaProperties>().to_owned(),
120        [
121            "group.id.prefix".to_owned(),
122            "properties.sync.call.timeout".to_owned(),
123            "properties.security.protocol".to_owned(),
124            "properties.ssl.endpoint.identification.algorithm".to_owned(),
125            "properties.sasl.mechanism".to_owned(),
126            "properties.sasl.username".to_owned(),
127            "properties.sasl.password".to_owned(),
128            "properties.message.max.bytes".to_owned(),
129            "properties.receive.message.max.bytes".to_owned(),
130            "properties.statistics.interval.ms".to_owned(),
131            "properties.client.id".to_owned(),
132            "properties.enable.ssl.certificate.verification".to_owned(),
133            "properties.queued.min.messages".to_owned(),
134            "properties.queued.max.messages.kbytes".to_owned(),
135            "properties.fetch.wait.max.ms".to_owned(),
136            "properties.fetch.queue.backoff.ms".to_owned(),
137            "properties.fetch.max.bytes".to_owned(),
138            "properties.enable.auto.commit".to_owned(),
139        ].into_iter().collect(),
140    ).unwrap();
141    map
142});
143
144/// Map of sink connector names to their `allow_alter_on_fly` field names
145pub static SINK_ALLOW_ALTER_ON_FLY_FIELDS: LazyLock<HashMap<String, HashSet<String>>> = LazyLock::new(|| {
146    use sink_properties::*;
147    let mut map = HashMap::new();
148    // ClickHouseConfig
149    map.try_insert(
150        std::any::type_name::<ClickHouseConfig>().to_owned(),
151        [
152            "commit_checkpoint_interval".to_owned(),
153        ].into_iter().collect(),
154    ).unwrap();
155    // DeltaLakeConfig
156    map.try_insert(
157        std::any::type_name::<DeltaLakeConfig>().to_owned(),
158        [
159            "commit_checkpoint_interval".to_owned(),
160        ].into_iter().collect(),
161    ).unwrap();
162    // IcebergConfig
163    map.try_insert(
164        std::any::type_name::<IcebergConfig>().to_owned(),
165        [
166            "commit_checkpoint_interval".to_owned(),
167            "enable_compaction".to_owned(),
168            "compaction_interval_sec".to_owned(),
169            "enable_snapshot_expiration".to_owned(),
170            "snapshot_expiration_max_age_millis".to_owned(),
171            "snapshot_expiration_retain_last".to_owned(),
172            "snapshot_expiration_clear_expired_files".to_owned(),
173            "snapshot_expiration_clear_expired_meta_data".to_owned(),
174        ].into_iter().collect(),
175    ).unwrap();
176    // KafkaConfig
177    map.try_insert(
178        std::any::type_name::<KafkaConfig>().to_owned(),
179        [
180            "properties.sync.call.timeout".to_owned(),
181            "properties.security.protocol".to_owned(),
182            "properties.ssl.endpoint.identification.algorithm".to_owned(),
183            "properties.sasl.mechanism".to_owned(),
184            "properties.sasl.username".to_owned(),
185            "properties.sasl.password".to_owned(),
186            "properties.message.max.bytes".to_owned(),
187            "properties.receive.message.max.bytes".to_owned(),
188            "properties.statistics.interval.ms".to_owned(),
189            "properties.client.id".to_owned(),
190            "properties.enable.ssl.certificate.verification".to_owned(),
191            "properties.allow.auto.create.topics".to_owned(),
192            "properties.queue.buffering.max.messages".to_owned(),
193            "properties.queue.buffering.max.kbytes".to_owned(),
194            "properties.queue.buffering.max.ms".to_owned(),
195            "properties.enable.idempotence".to_owned(),
196            "properties.message.send.max.retries".to_owned(),
197            "properties.retry.backoff.ms".to_owned(),
198            "properties.batch.num.messages".to_owned(),
199            "properties.batch.size".to_owned(),
200            "properties.message.timeout.ms".to_owned(),
201            "properties.max.in.flight.requests.per.connection".to_owned(),
202            "properties.request.required.acks".to_owned(),
203        ].into_iter().collect(),
204    ).unwrap();
205    // SnowflakeV2Config
206    map.try_insert(
207        std::any::type_name::<SnowflakeV2Config>().to_owned(),
208        [
209            "commit_checkpoint_interval".to_owned(),
210        ].into_iter().collect(),
211    ).unwrap();
212    // StarrocksConfig
213    map.try_insert(
214        std::any::type_name::<StarrocksConfig>().to_owned(),
215        [
216            "commit_checkpoint_interval".to_owned(),
217        ].into_iter().collect(),
218    ).unwrap();
219    map
220});
221
222/// Map of connection names to their `allow_alter_on_fly` field names
223pub static CONNECTION_ALLOW_ALTER_ON_FLY_FIELDS: LazyLock<HashMap<String, HashSet<String>>> = LazyLock::new(|| {
224    use crate::connector_common::*;
225    let mut map = HashMap::new();
226    // KafkaConnection
227    map.try_insert(
228        std::any::type_name::<KafkaConnection>().to_owned(),
229        [
230            "properties.security.protocol".to_owned(),
231            "properties.ssl.endpoint.identification.algorithm".to_owned(),
232            "properties.sasl.mechanism".to_owned(),
233            "properties.sasl.username".to_owned(),
234            "properties.sasl.password".to_owned(),
235        ].into_iter().collect(),
236    ).unwrap();
237    map
238});
239
240/// Get all source connector names that have `allow_alter_on_fly` fields
241pub fn get_source_connectors_with_allow_alter_on_fly_fields() -> Vec<&'static str> {
242    SOURCE_ALLOW_ALTER_ON_FLY_FIELDS.keys().map(|s| s.as_str()).collect()
243}
244
245/// Get all sink connector names that have `allow_alter_on_fly` fields
246pub fn get_sink_connectors_with_allow_alter_on_fly_fields() -> Vec<&'static str> {
247    SINK_ALLOW_ALTER_ON_FLY_FIELDS.keys().map(|s| s.as_str()).collect()
248}
249
250/// Get all connection names that have `allow_alter_on_fly` fields
251pub fn get_connection_names_with_allow_alter_on_fly_fields() -> Vec<&'static str> {
252    CONNECTION_ALLOW_ALTER_ON_FLY_FIELDS.keys().map(|s| s.as_str()).collect()
253}
254
255/// Checks if all given fields are allowed to be altered on the fly for the specified source connector.
256/// Returns Ok(()) if all fields are allowed, otherwise returns a `ConnectorError`.
257pub fn check_source_allow_alter_on_fly_fields(
258    connector_name: &str,
259    fields: &[String],
260) -> crate::error::ConnectorResult<()> {
261    // Convert connector name to the type name key
262    let Some(type_name) = source_properties::source_name_to_prop_type_name(connector_name) else {
263        return Err(ConnectorError::from(anyhow::anyhow!(
264            "Unknown source connector: {connector_name}"
265        )));
266    };
267    let Some(allowed_fields) = SOURCE_ALLOW_ALTER_ON_FLY_FIELDS.get(type_name) else {
268        return Err(ConnectorError::from(anyhow::anyhow!(
269            "No allow_alter_on_fly fields registered for connector: {connector_name}"
270        )));
271    };
272    for field in fields {
273        if !allowed_fields.contains(field) {
274            return Err(ConnectorError::from(anyhow::anyhow!(
275                "Field '{field}' is not allowed to be altered on the fly for connector: {connector_name}"
276            )));
277        }
278    }
279    Ok(())
280}
281
282pub fn check_connection_allow_alter_on_fly_fields(
283    connection_name: &str,
284    fields: &[String],
285) -> crate::error::ConnectorResult<()> {
286    use crate::source::connection_name_to_prop_type_name;
287
288    // Convert connection name to the type name key
289    let Some(type_name) = connection_name_to_prop_type_name(connection_name) else {
290        return Err(ConnectorError::from(anyhow::anyhow!(
291            "Unknown connection: {connection_name}"
292        )));
293    };
294    let Some(allowed_fields) = CONNECTION_ALLOW_ALTER_ON_FLY_FIELDS.get(type_name) else {
295        return Err(ConnectorError::from(anyhow::anyhow!(
296            "No allow_alter_on_fly fields registered for connection: {connection_name}"
297        )));
298    };
299    for field in fields {
300        if !allowed_fields.contains(field) {
301            return Err(ConnectorError::from(anyhow::anyhow!(
302                "Field '{field}' is not allowed to be altered on the fly for connection: {connection_name}"
303            )));
304        }
305    }
306    Ok(())
307}
308
309/// Checks if all given fields are allowed to be altered on the fly for the specified sink connector.
310/// Returns Ok(()) if all fields are allowed, otherwise returns a `ConnectorError`.
311pub fn check_sink_allow_alter_on_fly_fields(
312    sink_name: &str,
313    fields: &[String],
314) -> crate::error::ConnectorResult<()> {
315    // Convert sink name to the type name key
316    let Some(type_name) = sink_properties::sink_name_to_config_type_name(sink_name) else {
317        return Err(ConnectorError::from(anyhow::anyhow!(
318            "Unknown sink connector: {sink_name}"
319        )));
320    };
321    let Some(allowed_fields) = SINK_ALLOW_ALTER_ON_FLY_FIELDS.get(type_name) else {
322        return Err(ConnectorError::from(anyhow::anyhow!(
323            "No allow_alter_on_fly fields registered for sink: {sink_name}"
324        )));
325    };
326    for field in fields {
327        if !allowed_fields.contains(field) {
328            return Err(ConnectorError::from(anyhow::anyhow!(
329                "Field '{field}' is not allowed to be altered on the fly for sink: {sink_name}"
330            )));
331        }
332    }
333    Ok(())
334}