1#![rustfmt::skip]
26
27use std::collections::{HashMap, HashSet};
28use std::sync::LazyLock;
29use crate::error::ConnectorError;
30use crate::sink::remote::JdbcSink;
31use crate::sink::Sink;
32
33macro_rules! use_source_properties {
34 ({ $({ $variant_name:ident, $prop_name:ty, $split:ty }),* }) => {
35 $(
36 #[allow(unused_imports)]
37 pub(super) use $prop_name;
38 )*
39 };
40}
41
42mod source_properties {
43 use crate::for_all_sources;
44 use crate::source::base::SourceProperties;
45
46 for_all_sources!(use_source_properties);
47
48 macro_rules! impl_source_name_to_prop_type_name_inner {
54 ({ $({$variant:ident, $prop_name:ty, $split:ty}),* }) => {
55 pub fn source_name_to_prop_type_name(source_name: &str) -> Option<&'static str> {
56 match source_name {
57 $(
58 <$prop_name>::SOURCE_NAME => Some(std::any::type_name::<$prop_name>()),
59 )*
60 _ => None,
61 }
62 }
63 };
64 }
65
66 macro_rules! impl_source_name_to_prop_type_name {
67 () => {
68 $crate::for_all_sources! { impl_source_name_to_prop_type_name_inner }
69 };
70 }
71
72 impl_source_name_to_prop_type_name!();
73}
74
75mod sink_properties {
76 use crate::use_all_sink_configs;
77 use crate::sink::Sink;
78 use crate::sink::file_sink::fs::FsSink;
79
80 use_all_sink_configs!();
81
82 macro_rules! impl_sink_name_to_config_type_name_inner {
83 ({ $({ $variant_name:ident, $sink_type:ty, $config_type:ty }),* }) => {
84 pub fn sink_name_to_config_type_name(sink_name: &str) -> Option<&'static str> {
85 match sink_name {
86 $(
87 <$sink_type>::SINK_NAME => Some(std::any::type_name::<$config_type>()),
88 )*
89 _ => None,
90 }
91 }
92 };
93 }
94
95 macro_rules! impl_sink_name_to_config_type_name {
96 () => {
97 $crate::for_all_sinks! { impl_sink_name_to_config_type_name_inner }
98 };
99 }
100
101 impl_sink_name_to_config_type_name!();
102}
103
104pub static SOURCE_ALLOW_ALTER_ON_FLY_FIELDS: LazyLock<HashMap<String, HashSet<String>>> = LazyLock::new(|| {
106 use source_properties::*;
107 let mut map = HashMap::new();
108 map.try_insert(
110 std::any::type_name::<MysqlCdcProperties>().to_owned(),
111 [
112 "cdc.source.wait.streaming.start.timeout".to_owned(),
113 "debezium.max.queue.size".to_owned(),
114 "debezium.queue.memory.ratio".to_owned(),
115 ].into_iter().collect(),
116 ).unwrap();
117 map.try_insert(
118 std::any::type_name::<PostgresCdcProperties>().to_owned(),
119 [
120 "cdc.source.wait.streaming.start.timeout".to_owned(),
121 "debezium.max.queue.size".to_owned(),
122 "debezium.queue.memory.ratio".to_owned(),
123 ].into_iter().collect(),
124 ).unwrap();
125 map.try_insert(
126 std::any::type_name::<SqlServerCdcProperties>().to_owned(),
127 [
128 "cdc.source.wait.streaming.start.timeout".to_owned(),
129 "debezium.max.queue.size".to_owned(),
130 "debezium.queue.memory.ratio".to_owned(),
131
132 ].into_iter().collect(),
133 ).unwrap();
134
135 map.try_insert(
136 std::any::type_name::<MongodbCdcProperties>().to_owned(),
137 [
138 "cdc.source.wait.streaming.start.timeout".to_owned(),
139 "debezium.max.queue.size".to_owned(),
140 "debezium.queue.memory.ratio".to_owned(),
141 ].into_iter().collect(),
142 ).unwrap();
143 map.try_insert(
145 std::any::type_name::<KafkaProperties>().to_owned(),
146 [
147 "group.id.prefix".to_owned(),
148 "properties.sync.call.timeout".to_owned(),
149 "properties.security.protocol".to_owned(),
150 "properties.ssl.endpoint.identification.algorithm".to_owned(),
151 "properties.sasl.mechanism".to_owned(),
152 "properties.sasl.username".to_owned(),
153 "properties.sasl.password".to_owned(),
154 "properties.message.max.bytes".to_owned(),
155 "properties.receive.message.max.bytes".to_owned(),
156 "properties.statistics.interval.ms".to_owned(),
157 "properties.client.id".to_owned(),
158 "properties.enable.ssl.certificate.verification".to_owned(),
159 "properties.queued.min.messages".to_owned(),
160 "properties.queued.max.messages.kbytes".to_owned(),
161 "properties.fetch.wait.max.ms".to_owned(),
162 "properties.fetch.queue.backoff.ms".to_owned(),
163 "properties.fetch.max.bytes".to_owned(),
164 "properties.enable.auto.commit".to_owned(),
165 ].into_iter().collect(),
166 ).unwrap();
167 map.try_insert(
169 std::any::type_name::<PubsubProperties>().to_owned(),
170 [
171 "pubsub.ack_deadline_seconds".to_owned(),
172 ].into_iter().collect(),
173 ).unwrap();
174 map
175});
176
177pub static SINK_ALLOW_ALTER_ON_FLY_FIELDS: LazyLock<HashMap<String, HashSet<String>>> = LazyLock::new(|| {
179 use sink_properties::*;
180 let mut map = HashMap::new();
181 map.try_insert(
183 std::any::type_name::<ClickHouseConfig>().to_owned(),
184 [
185 "commit_checkpoint_interval".to_owned(),
186 ].into_iter().collect(),
187 ).unwrap();
188 map.try_insert(
190 std::any::type_name::<DeltaLakeConfig>().to_owned(),
191 [
192 "commit_checkpoint_interval".to_owned(),
193 ].into_iter().collect(),
194 ).unwrap();
195 map.try_insert(
197 std::any::type_name::<DorisConfig>().to_owned(),
198 [
199 "doris.stream_load.http.timeout.ms".to_owned(),
200 ].into_iter().collect(),
201 ).unwrap();
202 map.try_insert(
204 std::any::type_name::<IcebergConfig>().to_owned(),
205 [
206 "commit_checkpoint_interval".to_owned(),
207 "commit_checkpoint_size_threshold_mb".to_owned(),
208 "enable_compaction".to_owned(),
209 "compaction_interval_sec".to_owned(),
210 "enable_snapshot_expiration".to_owned(),
211 "snapshot_expiration_max_age_millis".to_owned(),
212 "snapshot_expiration_retain_last".to_owned(),
213 "snapshot_expiration_clear_expired_files".to_owned(),
214 "snapshot_expiration_clear_expired_meta_data".to_owned(),
215 "compaction.max_snapshots_num".to_owned(),
216 "compaction.small_files_threshold_mb".to_owned(),
217 "compaction.delete_files_count_threshold".to_owned(),
218 "compaction.trigger_snapshot_count".to_owned(),
219 "compaction.target_file_size_mb".to_owned(),
220 "compaction.type".to_owned(),
221 "compaction.write_parquet_compression".to_owned(),
222 "compaction.write_parquet_max_row_group_rows".to_owned(),
223 ].into_iter().collect(),
224 ).unwrap();
225 map.try_insert(
227 std::any::type_name::<KafkaConfig>().to_owned(),
228 [
229 "properties.sync.call.timeout".to_owned(),
230 "properties.security.protocol".to_owned(),
231 "properties.ssl.endpoint.identification.algorithm".to_owned(),
232 "properties.sasl.mechanism".to_owned(),
233 "properties.sasl.username".to_owned(),
234 "properties.sasl.password".to_owned(),
235 "properties.message.max.bytes".to_owned(),
236 "properties.receive.message.max.bytes".to_owned(),
237 "properties.statistics.interval.ms".to_owned(),
238 "properties.client.id".to_owned(),
239 "properties.enable.ssl.certificate.verification".to_owned(),
240 "properties.allow.auto.create.topics".to_owned(),
241 "properties.queue.buffering.max.messages".to_owned(),
242 "properties.queue.buffering.max.kbytes".to_owned(),
243 "properties.queue.buffering.max.ms".to_owned(),
244 "properties.enable.idempotence".to_owned(),
245 "properties.message.send.max.retries".to_owned(),
246 "properties.retry.backoff.ms".to_owned(),
247 "properties.batch.num.messages".to_owned(),
248 "properties.batch.size".to_owned(),
249 "properties.message.timeout.ms".to_owned(),
250 "properties.max.in.flight.requests.per.connection".to_owned(),
251 "properties.request.required.acks".to_owned(),
252 ].into_iter().collect(),
253 ).unwrap();
254 map.try_insert(
256 std::any::type_name::<SnowflakeV2Config>().to_owned(),
257 [
258 "commit_checkpoint_interval".to_owned(),
259 ].into_iter().collect(),
260 ).unwrap();
261 map.try_insert(
263 std::any::type_name::<StarrocksConfig>().to_owned(),
264 [
265 "starrocks.stream_load.http.timeout.ms".to_owned(),
266 "commit_checkpoint_interval".to_owned(),
267 ].into_iter().collect(),
268 ).unwrap();
269 map.try_insert(
271 JdbcSink::SINK_NAME.to_owned(),
272 [
273 "jdbc.url".to_owned(),
274 "user".to_owned(),
275 "password".to_owned(),
276 ].into_iter().collect(),
277 ).unwrap();
278 map
279});
280
281pub static CONNECTION_ALLOW_ALTER_ON_FLY_FIELDS: LazyLock<HashMap<String, HashSet<String>>> = LazyLock::new(|| {
283 use crate::connector_common::*;
284 let mut map = HashMap::new();
285 map.try_insert(
287 std::any::type_name::<KafkaConnection>().to_owned(),
288 [
289 "properties.security.protocol".to_owned(),
290 "properties.ssl.endpoint.identification.algorithm".to_owned(),
291 "properties.sasl.mechanism".to_owned(),
292 "properties.sasl.username".to_owned(),
293 "properties.sasl.password".to_owned(),
294 ].into_iter().collect(),
295 ).unwrap();
296 map.try_insert(
298 JdbcSink::SINK_NAME.to_owned(),
299 [
300 "jdbc.url".to_owned(),
301 "user".to_owned(),
302 "password".to_owned(),
303 ].into_iter().collect(),
304 ).unwrap();
305 map
306});
307
308pub fn get_source_connectors_with_allow_alter_on_fly_fields() -> Vec<&'static str> {
310 SOURCE_ALLOW_ALTER_ON_FLY_FIELDS.keys().map(|s| s.as_str()).collect()
311}
312
313pub fn get_sink_connectors_with_allow_alter_on_fly_fields() -> Vec<&'static str> {
315 SINK_ALLOW_ALTER_ON_FLY_FIELDS.keys().map(|s| s.as_str()).collect()
316}
317
318pub fn get_connection_names_with_allow_alter_on_fly_fields() -> Vec<&'static str> {
320 CONNECTION_ALLOW_ALTER_ON_FLY_FIELDS.keys().map(|s| s.as_str()).collect()
321}
322
323pub fn check_source_allow_alter_on_fly_fields(
326 connector_name: &str,
327 fields: &[String],
328) -> crate::error::ConnectorResult<()> {
329 let Some(type_name) = source_properties::source_name_to_prop_type_name(connector_name) else {
331 return Err(ConnectorError::from(anyhow::anyhow!(
332 "Unknown source connector: {connector_name}"
333 )));
334 };
335 let Some(allowed_fields) = SOURCE_ALLOW_ALTER_ON_FLY_FIELDS.get(type_name) else {
336 return Err(ConnectorError::from(anyhow::anyhow!(
337 "No allow_alter_on_fly fields registered for connector: {connector_name}"
338 )));
339 };
340 for field in fields {
341 if !allowed_fields.contains(field) {
342 return Err(ConnectorError::from(anyhow::anyhow!(
343 "Field '{field}' is not allowed to be altered on the fly for connector: {connector_name}"
344 )));
345 }
346 }
347 Ok(())
348}
349
350pub fn check_connection_allow_alter_on_fly_fields(
351 connection_name: &str,
352 fields: &[String],
353) -> crate::error::ConnectorResult<()> {
354 use crate::source::connection_name_to_prop_type_name;
355
356 let Some(type_name) = connection_name_to_prop_type_name(connection_name) else {
358 return Err(ConnectorError::from(anyhow::anyhow!(
359 "Unknown connection: {connection_name}"
360 )));
361 };
362 let Some(allowed_fields) = CONNECTION_ALLOW_ALTER_ON_FLY_FIELDS.get(type_name) else {
363 return Err(ConnectorError::from(anyhow::anyhow!(
364 "No allow_alter_on_fly fields registered for connection: {connection_name}"
365 )));
366 };
367 for field in fields {
368 if !allowed_fields.contains(field) {
369 return Err(ConnectorError::from(anyhow::anyhow!(
370 "Field '{field}' is not allowed to be altered on the fly for connection: {connection_name}"
371 )));
372 }
373 }
374 Ok(())
375}
376
377pub fn check_sink_allow_alter_on_fly_fields(
380 sink_name: &str,
381 fields: &[String],
382) -> crate::error::ConnectorResult<()> {
383 let allowed_fields = if sink_name == JdbcSink::SINK_NAME {
389 CONNECTION_ALLOW_ALTER_ON_FLY_FIELDS.get(JdbcSink::SINK_NAME)
390 } else {
391 let Some(type_name) = sink_properties::sink_name_to_config_type_name(sink_name) else {
393 return Err(ConnectorError::from(anyhow::anyhow!(
394 "Unknown sink connector: {sink_name}"
395 )));
396 };
397 SINK_ALLOW_ALTER_ON_FLY_FIELDS.get(type_name)
398 };
399 let Some(allowed_fields) = allowed_fields else {
400 return Err(ConnectorError::from(anyhow::anyhow!(
401 "No allow_alter_on_fly fields registered for sink: {sink_name}"
402 )));
403 };
404 for field in fields {
405 if !allowed_fields.contains(field) {
406 return Err(ConnectorError::from(anyhow::anyhow!(
407 "Field '{field}' is not allowed to be altered on the fly for sink: {sink_name}"
408 )));
409 }
410 }
411 Ok(())
412}