1#![rustfmt::skip]
26
27use std::collections::{HashMap, HashSet};
28use std::sync::LazyLock;
29use crate::error::ConnectorError;
30use crate::sink::remote::JdbcSink;
31use crate::sink::Sink;
32
33macro_rules! use_source_properties {
34 ({ $({ $variant_name:ident, $prop_name:ty, $split:ty }),* }) => {
35 $(
36 #[allow(unused_imports)]
37 pub(super) use $prop_name;
38 )*
39 };
40}
41
42mod source_properties {
43 use crate::for_all_sources;
44 use crate::source::base::SourceProperties;
45
46 for_all_sources!(use_source_properties);
47
48 macro_rules! impl_source_name_to_prop_type_name_inner {
54 ({ $({$variant:ident, $prop_name:ty, $split:ty}),* }) => {
55 pub fn source_name_to_prop_type_name(source_name: &str) -> Option<&'static str> {
56 match source_name {
57 $(
58 <$prop_name>::SOURCE_NAME => Some(std::any::type_name::<$prop_name>()),
59 )*
60 _ => None,
61 }
62 }
63 };
64 }
65
66 macro_rules! impl_source_name_to_prop_type_name {
67 () => {
68 $crate::for_all_sources! { impl_source_name_to_prop_type_name_inner }
69 };
70 }
71
72 impl_source_name_to_prop_type_name!();
73}
74
75mod sink_properties {
76 use crate::use_all_sink_configs;
77 use crate::sink::Sink;
78 use crate::sink::file_sink::fs::FsSink;
79
80 use_all_sink_configs!();
81
82 macro_rules! impl_sink_name_to_config_type_name_inner {
83 ({ $({ $variant_name:ident, $sink_type:ty, $config_type:ty }),* }) => {
84 pub fn sink_name_to_config_type_name(sink_name: &str) -> Option<&'static str> {
85 match sink_name {
86 $(
87 <$sink_type>::SINK_NAME => Some(std::any::type_name::<$config_type>()),
88 )*
89 _ => None,
90 }
91 }
92 };
93 }
94
95 macro_rules! impl_sink_name_to_config_type_name {
96 () => {
97 $crate::for_all_sinks! { impl_sink_name_to_config_type_name_inner }
98 };
99 }
100
101 impl_sink_name_to_config_type_name!();
102}
103
104pub static SOURCE_ALLOW_ALTER_ON_FLY_FIELDS: LazyLock<HashMap<String, HashSet<String>>> = LazyLock::new(|| {
106 use source_properties::*;
107 let mut map = HashMap::new();
108 map.try_insert(
110 std::any::type_name::<MysqlCdcProperties>().to_owned(),
111 [
112 "cdc.source.wait.streaming.start.timeout".to_owned(),
113 "debezium.max.queue.size".to_owned(),
114 "debezium.queue.memory.ratio".to_owned(),
115 ].into_iter().collect(),
116 ).unwrap();
117 map.try_insert(
118 std::any::type_name::<PostgresCdcProperties>().to_owned(),
119 [
120 "cdc.source.wait.streaming.start.timeout".to_owned(),
121 "debezium.max.queue.size".to_owned(),
122 "debezium.queue.memory.ratio".to_owned(),
123 ].into_iter().collect(),
124 ).unwrap();
125 map.try_insert(
126 std::any::type_name::<SqlServerCdcProperties>().to_owned(),
127 [
128 "cdc.source.wait.streaming.start.timeout".to_owned(),
129 "debezium.max.queue.size".to_owned(),
130 "debezium.queue.memory.ratio".to_owned(),
131
132 ].into_iter().collect(),
133 ).unwrap();
134
135 map.try_insert(
136 std::any::type_name::<MongodbCdcProperties>().to_owned(),
137 [
138 "cdc.source.wait.streaming.start.timeout".to_owned(),
139 "debezium.max.queue.size".to_owned(),
140 "debezium.queue.memory.ratio".to_owned(),
141 ].into_iter().collect(),
142 ).unwrap();
143 map.try_insert(
145 std::any::type_name::<KafkaProperties>().to_owned(),
146 [
147 "group.id.prefix".to_owned(),
148 "properties.sync.call.timeout".to_owned(),
149 "properties.security.protocol".to_owned(),
150 "properties.ssl.endpoint.identification.algorithm".to_owned(),
151 "properties.sasl.mechanism".to_owned(),
152 "properties.sasl.username".to_owned(),
153 "properties.sasl.password".to_owned(),
154 "properties.message.max.bytes".to_owned(),
155 "properties.receive.message.max.bytes".to_owned(),
156 "properties.statistics.interval.ms".to_owned(),
157 "properties.client.id".to_owned(),
158 "properties.enable.ssl.certificate.verification".to_owned(),
159 "properties.queued.min.messages".to_owned(),
160 "properties.queued.max.messages.kbytes".to_owned(),
161 "properties.fetch.wait.max.ms".to_owned(),
162 "properties.fetch.queue.backoff.ms".to_owned(),
163 "properties.fetch.max.bytes".to_owned(),
164 "properties.enable.auto.commit".to_owned(),
165 ].into_iter().collect(),
166 ).unwrap();
167 map
168});
169
170pub static SINK_ALLOW_ALTER_ON_FLY_FIELDS: LazyLock<HashMap<String, HashSet<String>>> = LazyLock::new(|| {
172 use sink_properties::*;
173 let mut map = HashMap::new();
174 map.try_insert(
176 std::any::type_name::<ClickHouseConfig>().to_owned(),
177 [
178 "commit_checkpoint_interval".to_owned(),
179 ].into_iter().collect(),
180 ).unwrap();
181 map.try_insert(
183 std::any::type_name::<DeltaLakeConfig>().to_owned(),
184 [
185 "commit_checkpoint_interval".to_owned(),
186 ].into_iter().collect(),
187 ).unwrap();
188 map.try_insert(
190 std::any::type_name::<DorisConfig>().to_owned(),
191 [
192 "doris.stream_load.http.timeout.ms".to_owned(),
193 ].into_iter().collect(),
194 ).unwrap();
195 map.try_insert(
197 std::any::type_name::<IcebergConfig>().to_owned(),
198 [
199 "commit_checkpoint_interval".to_owned(),
200 "enable_compaction".to_owned(),
201 "compaction_interval_sec".to_owned(),
202 "enable_snapshot_expiration".to_owned(),
203 "snapshot_expiration_max_age_millis".to_owned(),
204 "snapshot_expiration_retain_last".to_owned(),
205 "snapshot_expiration_clear_expired_files".to_owned(),
206 "snapshot_expiration_clear_expired_meta_data".to_owned(),
207 "compaction.max_snapshots_num".to_owned(),
208 "compaction.small_files_threshold_mb".to_owned(),
209 "compaction.delete_files_count_threshold".to_owned(),
210 "compaction.trigger_snapshot_count".to_owned(),
211 "compaction.target_file_size_mb".to_owned(),
212 "compaction.type".to_owned(),
213 "compaction.write_parquet_compression".to_owned(),
214 "compaction.write_parquet_max_row_group_rows".to_owned(),
215 ].into_iter().collect(),
216 ).unwrap();
217 map.try_insert(
219 std::any::type_name::<KafkaConfig>().to_owned(),
220 [
221 "properties.sync.call.timeout".to_owned(),
222 "properties.security.protocol".to_owned(),
223 "properties.ssl.endpoint.identification.algorithm".to_owned(),
224 "properties.sasl.mechanism".to_owned(),
225 "properties.sasl.username".to_owned(),
226 "properties.sasl.password".to_owned(),
227 "properties.message.max.bytes".to_owned(),
228 "properties.receive.message.max.bytes".to_owned(),
229 "properties.statistics.interval.ms".to_owned(),
230 "properties.client.id".to_owned(),
231 "properties.enable.ssl.certificate.verification".to_owned(),
232 "properties.allow.auto.create.topics".to_owned(),
233 "properties.queue.buffering.max.messages".to_owned(),
234 "properties.queue.buffering.max.kbytes".to_owned(),
235 "properties.queue.buffering.max.ms".to_owned(),
236 "properties.enable.idempotence".to_owned(),
237 "properties.message.send.max.retries".to_owned(),
238 "properties.retry.backoff.ms".to_owned(),
239 "properties.batch.num.messages".to_owned(),
240 "properties.batch.size".to_owned(),
241 "properties.message.timeout.ms".to_owned(),
242 "properties.max.in.flight.requests.per.connection".to_owned(),
243 "properties.request.required.acks".to_owned(),
244 ].into_iter().collect(),
245 ).unwrap();
246 map.try_insert(
248 std::any::type_name::<SnowflakeV2Config>().to_owned(),
249 [
250 "commit_checkpoint_interval".to_owned(),
251 ].into_iter().collect(),
252 ).unwrap();
253 map.try_insert(
255 std::any::type_name::<StarrocksConfig>().to_owned(),
256 [
257 "starrocks.stream_load.http.timeout.ms".to_owned(),
258 "commit_checkpoint_interval".to_owned(),
259 ].into_iter().collect(),
260 ).unwrap();
261 map.try_insert(
263 JdbcSink::SINK_NAME.to_owned(),
264 [
265 "jdbc.url".to_owned(),
266 "user".to_owned(),
267 "password".to_owned(),
268 ].into_iter().collect(),
269 ).unwrap();
270 map
271});
272
273pub static CONNECTION_ALLOW_ALTER_ON_FLY_FIELDS: LazyLock<HashMap<String, HashSet<String>>> = LazyLock::new(|| {
275 use crate::connector_common::*;
276 let mut map = HashMap::new();
277 map.try_insert(
279 std::any::type_name::<KafkaConnection>().to_owned(),
280 [
281 "properties.security.protocol".to_owned(),
282 "properties.ssl.endpoint.identification.algorithm".to_owned(),
283 "properties.sasl.mechanism".to_owned(),
284 "properties.sasl.username".to_owned(),
285 "properties.sasl.password".to_owned(),
286 ].into_iter().collect(),
287 ).unwrap();
288 map.try_insert(
290 JdbcSink::SINK_NAME.to_owned(),
291 [
292 "jdbc.url".to_owned(),
293 "user".to_owned(),
294 "password".to_owned(),
295 ].into_iter().collect(),
296 ).unwrap();
297 map
298});
299
300pub fn get_source_connectors_with_allow_alter_on_fly_fields() -> Vec<&'static str> {
302 SOURCE_ALLOW_ALTER_ON_FLY_FIELDS.keys().map(|s| s.as_str()).collect()
303}
304
305pub fn get_sink_connectors_with_allow_alter_on_fly_fields() -> Vec<&'static str> {
307 SINK_ALLOW_ALTER_ON_FLY_FIELDS.keys().map(|s| s.as_str()).collect()
308}
309
310pub fn get_connection_names_with_allow_alter_on_fly_fields() -> Vec<&'static str> {
312 CONNECTION_ALLOW_ALTER_ON_FLY_FIELDS.keys().map(|s| s.as_str()).collect()
313}
314
315pub fn check_source_allow_alter_on_fly_fields(
318 connector_name: &str,
319 fields: &[String],
320) -> crate::error::ConnectorResult<()> {
321 let Some(type_name) = source_properties::source_name_to_prop_type_name(connector_name) else {
323 return Err(ConnectorError::from(anyhow::anyhow!(
324 "Unknown source connector: {connector_name}"
325 )));
326 };
327 let Some(allowed_fields) = SOURCE_ALLOW_ALTER_ON_FLY_FIELDS.get(type_name) else {
328 return Err(ConnectorError::from(anyhow::anyhow!(
329 "No allow_alter_on_fly fields registered for connector: {connector_name}"
330 )));
331 };
332 for field in fields {
333 if !allowed_fields.contains(field) {
334 return Err(ConnectorError::from(anyhow::anyhow!(
335 "Field '{field}' is not allowed to be altered on the fly for connector: {connector_name}"
336 )));
337 }
338 }
339 Ok(())
340}
341
342pub fn check_connection_allow_alter_on_fly_fields(
343 connection_name: &str,
344 fields: &[String],
345) -> crate::error::ConnectorResult<()> {
346 use crate::source::connection_name_to_prop_type_name;
347
348 let Some(type_name) = connection_name_to_prop_type_name(connection_name) else {
350 return Err(ConnectorError::from(anyhow::anyhow!(
351 "Unknown connection: {connection_name}"
352 )));
353 };
354 let Some(allowed_fields) = CONNECTION_ALLOW_ALTER_ON_FLY_FIELDS.get(type_name) else {
355 return Err(ConnectorError::from(anyhow::anyhow!(
356 "No allow_alter_on_fly fields registered for connection: {connection_name}"
357 )));
358 };
359 for field in fields {
360 if !allowed_fields.contains(field) {
361 return Err(ConnectorError::from(anyhow::anyhow!(
362 "Field '{field}' is not allowed to be altered on the fly for connection: {connection_name}"
363 )));
364 }
365 }
366 Ok(())
367}
368
369pub fn check_sink_allow_alter_on_fly_fields(
372 sink_name: &str,
373 fields: &[String],
374) -> crate::error::ConnectorResult<()> {
375 let allowed_fields = if sink_name == JdbcSink::SINK_NAME {
381 CONNECTION_ALLOW_ALTER_ON_FLY_FIELDS.get(JdbcSink::SINK_NAME)
382 } else {
383 let Some(type_name) = sink_properties::sink_name_to_config_type_name(sink_name) else {
385 return Err(ConnectorError::from(anyhow::anyhow!(
386 "Unknown sink connector: {sink_name}"
387 )));
388 };
389 SINK_ALLOW_ALTER_ON_FLY_FIELDS.get(type_name)
390 };
391 let Some(allowed_fields) = allowed_fields else {
392 return Err(ConnectorError::from(anyhow::anyhow!(
393 "No allow_alter_on_fly fields registered for sink: {sink_name}"
394 )));
395 };
396 for field in fields {
397 if !allowed_fields.contains(field) {
398 return Err(ConnectorError::from(anyhow::anyhow!(
399 "Field '{field}' is not allowed to be altered on the fly for sink: {sink_name}"
400 )));
401 }
402 }
403 Ok(())
404}