risingwave_connector/connector_common/iceberg/
mod.rs

1// Copyright 2025 RisingWave Labs
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15pub mod compaction;
16mod jni_catalog;
17mod mock_catalog;
18mod storage_catalog;
19
20use std::collections::HashMap;
21use std::sync::Arc;
22
23use ::iceberg::io::{S3_ACCESS_KEY_ID, S3_ENDPOINT, S3_REGION, S3_SECRET_ACCESS_KEY};
24use ::iceberg::table::Table;
25use ::iceberg::{Catalog, TableIdent};
26use anyhow::{Context, anyhow};
27use iceberg::io::{
28    AZBLOB_ACCOUNT_KEY, AZBLOB_ACCOUNT_NAME, AZBLOB_ENDPOINT, GCS_CREDENTIALS_JSON,
29    GCS_DISABLE_CONFIG_LOAD, S3_DISABLE_CONFIG_LOAD, S3_PATH_STYLE_ACCESS,
30};
31use iceberg_catalog_glue::{AWS_ACCESS_KEY_ID, AWS_REGION_NAME, AWS_SECRET_ACCESS_KEY};
32use phf::{Set, phf_set};
33use risingwave_common::bail;
34use risingwave_common::util::env_var::env_var_is_true;
35use serde_derive::Deserialize;
36use serde_with::serde_as;
37use url::Url;
38use with_options::WithOptions;
39
40use crate::connector_common::common::DISABLE_DEFAULT_CREDENTIAL;
41use crate::connector_common::iceberg::storage_catalog::StorageCatalogConfig;
42use crate::deserialize_optional_bool_from_string;
43use crate::enforce_secret::EnforceSecret;
44use crate::error::ConnectorResult;
45
46#[serde_as]
47#[derive(Debug, Clone, PartialEq, Eq, Deserialize, WithOptions)]
48pub struct IcebergCommon {
49    // Catalog type supported by iceberg, such as "storage", "rest".
50    // If not set, we use "storage" as default.
51    #[serde(rename = "catalog.type")]
52    pub catalog_type: Option<String>,
53    #[serde(rename = "s3.region")]
54    pub region: Option<String>,
55    #[serde(rename = "s3.endpoint")]
56    pub endpoint: Option<String>,
57    #[serde(rename = "s3.access.key")]
58    pub access_key: Option<String>,
59    #[serde(rename = "s3.secret.key")]
60    pub secret_key: Option<String>,
61
62    #[serde(rename = "gcs.credential")]
63    pub gcs_credential: Option<String>,
64
65    #[serde(rename = "azblob.account_name")]
66    pub azblob_account_name: Option<String>,
67    #[serde(rename = "azblob.account_key")]
68    pub azblob_account_key: Option<String>,
69    #[serde(rename = "azblob.endpoint_url")]
70    pub azblob_endpoint_url: Option<String>,
71
72    /// Path of iceberg warehouse.
73    #[serde(rename = "warehouse.path")]
74    pub warehouse_path: Option<String>,
75    /// AWS Client id, can be omitted for storage catalog or when
76    /// caller's AWS account ID matches glue id
77    #[serde(rename = "glue.id")]
78    pub glue_id: Option<String>,
79    /// Catalog name, default value is risingwave.
80    #[serde(rename = "catalog.name")]
81    pub catalog_name: Option<String>,
82    /// URI of iceberg catalog, only applicable in rest catalog.
83    #[serde(rename = "catalog.uri")]
84    pub catalog_uri: Option<String>,
85    #[serde(rename = "database.name")]
86    pub database_name: Option<String>,
87    /// Full name of table, must include schema name.
88    #[serde(rename = "table.name")]
89    pub table_name: String,
90    /// Credential for accessing iceberg catalog, only applicable in rest catalog.
91    /// A credential to exchange for a token in the `OAuth2` client credentials flow.
92    #[serde(rename = "catalog.credential")]
93    pub credential: Option<String>,
94    /// token for accessing iceberg catalog, only applicable in rest catalog.
95    /// A Bearer token which will be used for interaction with the server.
96    #[serde(rename = "catalog.token")]
97    pub token: Option<String>,
98    /// `oauth2_server_uri` for accessing iceberg catalog, only applicable in rest catalog.
99    /// Token endpoint URI to fetch token from if the Rest Catalog is not the authorization server.
100    #[serde(rename = "catalog.oauth2_server_uri")]
101    pub oauth2_server_uri: Option<String>,
102    /// scope for accessing iceberg catalog, only applicable in rest catalog.
103    /// Additional scope for `OAuth2`.
104    #[serde(rename = "catalog.scope")]
105    pub scope: Option<String>,
106
107    /// The signing region to use when signing requests to the REST catalog.
108    #[serde(rename = "catalog.rest.signing_region")]
109    pub rest_signing_region: Option<String>,
110
111    /// The signing name to use when signing requests to the REST catalog.
112    #[serde(rename = "catalog.rest.signing_name")]
113    pub rest_signing_name: Option<String>,
114
115    /// Whether to use `SigV4` for signing requests to the REST catalog.
116    #[serde(
117        rename = "catalog.rest.sigv4_enabled",
118        default,
119        deserialize_with = "deserialize_optional_bool_from_string"
120    )]
121    pub rest_sigv4_enabled: Option<bool>,
122
123    #[serde(
124        rename = "s3.path.style.access",
125        default,
126        deserialize_with = "deserialize_optional_bool_from_string"
127    )]
128    pub path_style_access: Option<bool>,
129    /// Enable config load. This parameter set to true will load warehouse credentials from the environment. Only allowed to be used in a self-hosted environment.
130    #[serde(default, deserialize_with = "deserialize_optional_bool_from_string")]
131    pub enable_config_load: Option<bool>,
132
133    /// This is only used by iceberg engine to enable the hosted catalog.
134    #[serde(
135        rename = "hosted_catalog",
136        default,
137        deserialize_with = "deserialize_optional_bool_from_string"
138    )]
139    pub hosted_catalog: Option<bool>,
140
141    /// The http header to be used in the catalog requests.
142    /// Example:
143    /// `catalog.header = "key1=value1;key2=value2;key3=value3"`
144    /// explain the format of the header:
145    /// - Each header is a key-value pair, separated by an '='.
146    /// - Multiple headers can be specified, separated by a ';'.
147    #[serde(rename = "catalog.header")]
148    pub header: Option<String>,
149}
150
151impl EnforceSecret for IcebergCommon {
152    const ENFORCE_SECRET_PROPERTIES: Set<&'static str> = phf_set! {
153        "s3.access.key",
154        "s3.secret.key",
155        "gcs.credential",
156        "catalog.credential",
157        "catalog.token",
158        "catalog.oauth2_server_uri",
159    };
160}
161
162impl IcebergCommon {
163    pub fn catalog_type(&self) -> &str {
164        self.catalog_type.as_deref().unwrap_or("storage")
165    }
166
167    pub fn catalog_name(&self) -> String {
168        self.catalog_name
169            .as_ref()
170            .cloned()
171            .unwrap_or_else(|| "risingwave".to_owned())
172    }
173
174    pub fn headers(&self) -> ConnectorResult<HashMap<String, String>> {
175        if let Some(header) = &self.header {
176            let mut headers = HashMap::new();
177            for pair in header.split(';') {
178                let mut parts = pair.split('=');
179                if let (Some(key), Some(value)) = (parts.next(), parts.next()) {
180                    headers.insert(key.to_owned(), value.to_owned());
181                } else {
182                    bail!("Invalid header format: {}", pair);
183                }
184            }
185            Ok(headers)
186        } else {
187            Ok(HashMap::new())
188        }
189    }
190
191    pub fn enable_config_load(&self) -> bool {
192        // If the env var is set to true, we disable the default config load. (Cloud environment)
193        if env_var_is_true(DISABLE_DEFAULT_CREDENTIAL) {
194            return false;
195        }
196        self.enable_config_load.unwrap_or(false)
197    }
198
199    /// For both V1 and V2.
200    fn build_jni_catalog_configs(
201        &self,
202        java_catalog_props: &HashMap<String, String>,
203    ) -> ConnectorResult<(HashMap<String, String>, HashMap<String, String>)> {
204        let mut iceberg_configs = HashMap::new();
205        let enable_config_load = self.enable_config_load();
206        let file_io_props = {
207            let catalog_type = self.catalog_type().to_owned();
208
209            if let Some(region) = &self.region {
210                // iceberg-rust
211                iceberg_configs.insert(S3_REGION.to_owned(), region.clone());
212            }
213
214            if let Some(endpoint) = &self.endpoint {
215                // iceberg-rust
216                iceberg_configs.insert(S3_ENDPOINT.to_owned(), endpoint.clone());
217            }
218
219            // iceberg-rust
220            if let Some(access_key) = &self.access_key {
221                iceberg_configs.insert(S3_ACCESS_KEY_ID.to_owned(), access_key.clone());
222            }
223            if let Some(secret_key) = &self.secret_key {
224                iceberg_configs.insert(S3_SECRET_ACCESS_KEY.to_owned(), secret_key.clone());
225            }
226            if let Some(gcs_credential) = &self.gcs_credential {
227                iceberg_configs.insert(GCS_CREDENTIALS_JSON.to_owned(), gcs_credential.clone());
228                if catalog_type != "rest" && catalog_type != "rest_rust" {
229                    bail!("gcs unsupported in {} catalog", &catalog_type);
230                }
231            }
232
233            if let (
234                Some(azblob_account_name),
235                Some(azblob_account_key),
236                Some(azblob_endpoint_url),
237            ) = (
238                &self.azblob_account_name,
239                &self.azblob_account_key,
240                &self.azblob_endpoint_url,
241            ) {
242                iceberg_configs.insert(AZBLOB_ACCOUNT_NAME.to_owned(), azblob_account_name.clone());
243                iceberg_configs.insert(AZBLOB_ACCOUNT_KEY.to_owned(), azblob_account_key.clone());
244                iceberg_configs.insert(AZBLOB_ENDPOINT.to_owned(), azblob_endpoint_url.clone());
245
246                if catalog_type != "rest" && catalog_type != "rest_rust" {
247                    bail!("azblob unsupported in {} catalog", &catalog_type);
248                }
249            }
250
251            match &self.warehouse_path {
252                Some(warehouse_path) => {
253                    let (bucket, _) = {
254                        let is_s3_tables = warehouse_path.starts_with("arn:aws:s3tables");
255                        let url = Url::parse(warehouse_path);
256                        if (url.is_err() || is_s3_tables)
257                            && (catalog_type == "rest" || catalog_type == "rest_rust")
258                        {
259                            // If the warehouse path is not a valid URL, it could be a warehouse name in rest catalog
260                            // Or it could be a s3tables path, which is not a valid URL but a valid warehouse path,
261                            // so we allow it to pass here.
262                            (None, None)
263                        } else {
264                            let url = url.with_context(|| {
265                                format!("Invalid warehouse path: {}", warehouse_path)
266                            })?;
267                            let bucket = url
268                                .host_str()
269                                .with_context(|| {
270                                    format!(
271                                        "Invalid s3 path: {}, bucket is missing",
272                                        warehouse_path
273                                    )
274                                })?
275                                .to_owned();
276                            let root = url.path().trim_start_matches('/').to_owned();
277                            (Some(bucket), Some(root))
278                        }
279                    };
280
281                    if let Some(bucket) = bucket {
282                        iceberg_configs.insert("iceberg.table.io.bucket".to_owned(), bucket);
283                    }
284                }
285                None => {
286                    if catalog_type != "rest" && catalog_type != "rest_rust" {
287                        bail!("`warehouse.path` must be set in {} catalog", &catalog_type);
288                    }
289                }
290            }
291            iceberg_configs.insert(
292                S3_DISABLE_CONFIG_LOAD.to_owned(),
293                (!enable_config_load).to_string(),
294            );
295
296            iceberg_configs.insert(
297                GCS_DISABLE_CONFIG_LOAD.to_owned(),
298                (!enable_config_load).to_string(),
299            );
300
301            if let Some(path_style_access) = self.path_style_access {
302                iceberg_configs.insert(
303                    S3_PATH_STYLE_ACCESS.to_owned(),
304                    path_style_access.to_string(),
305                );
306            }
307
308            iceberg_configs
309        };
310
311        // Prepare jni configs, for details please see https://iceberg.apache.org/docs/latest/aws/
312        let mut java_catalog_configs = HashMap::new();
313        {
314            if let Some(uri) = self.catalog_uri.as_deref() {
315                java_catalog_configs.insert("uri".to_owned(), uri.to_owned());
316            }
317
318            if let Some(warehouse_path) = &self.warehouse_path {
319                java_catalog_configs.insert("warehouse".to_owned(), warehouse_path.clone());
320            }
321            java_catalog_configs.extend(java_catalog_props.clone());
322
323            // Currently we only support s3, so let's set it to s3
324            java_catalog_configs.insert(
325                "io-impl".to_owned(),
326                "org.apache.iceberg.aws.s3.S3FileIO".to_owned(),
327            );
328
329            // suppress log of S3FileIO like: Unclosed S3FileIO instance created by...
330            java_catalog_configs.insert("init-creation-stacktrace".to_owned(), "false".to_owned());
331
332            if let Some(region) = &self.region {
333                java_catalog_configs.insert("client.region".to_owned(), region.clone());
334            }
335            if let Some(endpoint) = &self.endpoint {
336                java_catalog_configs.insert("s3.endpoint".to_owned(), endpoint.clone());
337            }
338
339            if let Some(access_key) = &self.access_key {
340                java_catalog_configs.insert("s3.access-key-id".to_owned(), access_key.clone());
341            }
342            if let Some(secret_key) = &self.secret_key {
343                java_catalog_configs.insert("s3.secret-access-key".to_owned(), secret_key.clone());
344            }
345
346            if let Some(path_style_access) = &self.path_style_access {
347                java_catalog_configs.insert(
348                    "s3.path-style-access".to_owned(),
349                    path_style_access.to_string(),
350                );
351            }
352
353            let headers = self.headers()?;
354            for (header_name, header_value) in headers {
355                java_catalog_configs.insert(format!("header.{}", header_name), header_value);
356            }
357
358            match self.catalog_type.as_deref() {
359                Some("rest") => {
360                    if let Some(credential) = &self.credential {
361                        java_catalog_configs.insert("credential".to_owned(), credential.clone());
362                    }
363                    if let Some(token) = &self.token {
364                        java_catalog_configs.insert("token".to_owned(), token.clone());
365                    }
366                    if let Some(oauth2_server_uri) = &self.oauth2_server_uri {
367                        java_catalog_configs
368                            .insert("oauth2-server-uri".to_owned(), oauth2_server_uri.clone());
369                    }
370                    if let Some(scope) = &self.scope {
371                        java_catalog_configs.insert("scope".to_owned(), scope.clone());
372                    }
373                    if let Some(rest_signing_region) = &self.rest_signing_region {
374                        java_catalog_configs.insert(
375                            "rest.signing-region".to_owned(),
376                            rest_signing_region.clone(),
377                        );
378                    }
379                    if let Some(rest_signing_name) = &self.rest_signing_name {
380                        java_catalog_configs
381                            .insert("rest.signing-name".to_owned(), rest_signing_name.clone());
382                    }
383                    if let Some(rest_sigv4_enabled) = self.rest_sigv4_enabled {
384                        java_catalog_configs.insert(
385                            "rest.sigv4-enabled".to_owned(),
386                            rest_sigv4_enabled.to_string(),
387                        );
388
389                        if let Some(access_key) = &self.access_key {
390                            java_catalog_configs
391                                .insert("rest.access-key-id".to_owned(), access_key.clone());
392                        }
393
394                        if let Some(secret_key) = &self.secret_key {
395                            java_catalog_configs
396                                .insert("rest.secret-access-key".to_owned(), secret_key.clone());
397                        }
398                    }
399                }
400                Some("glue") => {
401                    if !enable_config_load {
402                        java_catalog_configs.insert(
403                            "client.credentials-provider".to_owned(),
404                            "com.risingwave.connector.catalog.GlueCredentialProvider".to_owned(),
405                        );
406                        // Use S3 ak/sk and region as glue ak/sk and region by default.
407                        // TODO: use different ak/sk and region for s3 and glue.
408                        if let Some(access_key) = &self.access_key {
409                            java_catalog_configs.insert(
410                                "client.credentials-provider.glue.access-key-id".to_owned(),
411                                access_key.clone(),
412                            );
413                        }
414                        if let Some(secret_key) = &self.secret_key {
415                            java_catalog_configs.insert(
416                                "client.credentials-provider.glue.secret-access-key".to_owned(),
417                                secret_key.clone(),
418                            );
419                        }
420                    }
421
422                    if let Some(region) = &self.region {
423                        java_catalog_configs.insert("client.region".to_owned(), region.clone());
424                        java_catalog_configs.insert(
425                            "glue.endpoint".to_owned(),
426                            format!("https://glue.{}.amazonaws.com", region),
427                        );
428                    }
429
430                    if let Some(glue_id) = self.glue_id.as_deref() {
431                        java_catalog_configs.insert("glue.id".to_owned(), glue_id.to_owned());
432                    }
433                }
434                _ => {}
435            }
436        }
437
438        Ok((file_io_props, java_catalog_configs))
439    }
440}
441
442impl IcebergCommon {
443    pub fn full_table_name(&self) -> ConnectorResult<TableIdent> {
444        let ret = if let Some(database_name) = &self.database_name {
445            TableIdent::from_strs(vec![database_name, &self.table_name])
446        } else {
447            TableIdent::from_strs(vec![&self.table_name])
448        };
449
450        Ok(ret.context("Failed to create table identifier")?)
451    }
452
453    /// TODO: remove the arguments and put them into `IcebergCommon`. Currently the handling in source and sink are different, so pass them separately to be safer.
454    pub async fn create_catalog(
455        &self,
456        java_catalog_props: &HashMap<String, String>,
457    ) -> ConnectorResult<Arc<dyn Catalog>> {
458        match self.catalog_type() {
459            "storage" => {
460                let warehouse = self
461                    .warehouse_path
462                    .clone()
463                    .ok_or_else(|| anyhow!("`warehouse.path` must be set in storage catalog"))?;
464                let url = Url::parse(warehouse.as_ref())
465                    .map_err(|_| anyhow!("Invalid warehouse path: {}", warehouse))?;
466
467                let config = match url.scheme() {
468                    "s3" | "s3a" => StorageCatalogConfig::S3(
469                        storage_catalog::StorageCatalogS3Config::builder()
470                            .warehouse(warehouse)
471                            .access_key(self.access_key.clone())
472                            .secret_key(self.secret_key.clone())
473                            .region(self.region.clone())
474                            .endpoint(self.endpoint.clone())
475                            .path_style_access(self.path_style_access)
476                            .enable_config_load(Some(self.enable_config_load()))
477                            .build(),
478                    ),
479                    "gs" | "gcs" => StorageCatalogConfig::Gcs(
480                        storage_catalog::StorageCatalogGcsConfig::builder()
481                            .warehouse(warehouse)
482                            .credential(self.gcs_credential.clone())
483                            .enable_config_load(Some(self.enable_config_load()))
484                            .build(),
485                    ),
486                    "azblob" => StorageCatalogConfig::Azblob(
487                        storage_catalog::StorageCatalogAzblobConfig::builder()
488                            .warehouse(warehouse)
489                            .account_name(self.azblob_account_name.clone())
490                            .account_key(self.azblob_account_key.clone())
491                            .endpoint(self.azblob_endpoint_url.clone())
492                            .build(),
493                    ),
494                    scheme => bail!("Unsupported warehouse scheme: {}", scheme),
495                };
496
497                let catalog = storage_catalog::StorageCatalog::new(config)?;
498                Ok(Arc::new(catalog))
499            }
500            "rest_rust" => {
501                let mut iceberg_configs = HashMap::new();
502
503                // check gcs credential or s3 access key and secret key
504                if let Some(gcs_credential) = &self.gcs_credential {
505                    iceberg_configs.insert(GCS_CREDENTIALS_JSON.to_owned(), gcs_credential.clone());
506                } else {
507                    if let Some(region) = &self.region {
508                        iceberg_configs.insert(S3_REGION.to_owned(), region.clone());
509                    }
510                    if let Some(endpoint) = &self.endpoint {
511                        iceberg_configs.insert(S3_ENDPOINT.to_owned(), endpoint.clone());
512                    }
513                    if let Some(access_key) = &self.access_key {
514                        iceberg_configs.insert(S3_ACCESS_KEY_ID.to_owned(), access_key.clone());
515                    }
516                    if let Some(secret_key) = &self.secret_key {
517                        iceberg_configs.insert(S3_SECRET_ACCESS_KEY.to_owned(), secret_key.clone());
518                    }
519                    if let Some(path_style_access) = &self.path_style_access {
520                        iceberg_configs.insert(
521                            S3_PATH_STYLE_ACCESS.to_owned(),
522                            path_style_access.to_string(),
523                        );
524                    }
525                };
526
527                if let Some(credential) = &self.credential {
528                    iceberg_configs.insert("credential".to_owned(), credential.clone());
529                }
530                if let Some(token) = &self.token {
531                    iceberg_configs.insert("token".to_owned(), token.clone());
532                }
533                if let Some(oauth2_server_uri) = &self.oauth2_server_uri {
534                    iceberg_configs
535                        .insert("oauth2-server-uri".to_owned(), oauth2_server_uri.clone());
536                }
537                if let Some(scope) = &self.scope {
538                    iceberg_configs.insert("scope".to_owned(), scope.clone());
539                }
540
541                let headers = self.headers()?;
542                for (header_name, header_value) in headers {
543                    iceberg_configs.insert(format!("header.{}", header_name), header_value);
544                }
545
546                let config_builder =
547                    iceberg_catalog_rest::RestCatalogConfig::builder()
548                        .uri(self.catalog_uri.clone().with_context(|| {
549                            "`catalog.uri` must be set in rest catalog".to_owned()
550                        })?)
551                        .props(iceberg_configs);
552
553                let config = match &self.warehouse_path {
554                    Some(warehouse_path) => {
555                        config_builder.warehouse(warehouse_path.clone()).build()
556                    }
557                    None => config_builder.build(),
558                };
559                let catalog = iceberg_catalog_rest::RestCatalog::new(config);
560                Ok(Arc::new(catalog))
561            }
562            "glue_rust" => {
563                let mut iceberg_configs = HashMap::new();
564                // glue
565                if let Some(region) = &self.region {
566                    iceberg_configs.insert(AWS_REGION_NAME.to_owned(), region.clone());
567                }
568                if let Some(access_key) = &self.access_key {
569                    iceberg_configs.insert(AWS_ACCESS_KEY_ID.to_owned(), access_key.clone());
570                }
571                if let Some(secret_key) = &self.secret_key {
572                    iceberg_configs.insert(AWS_SECRET_ACCESS_KEY.to_owned(), secret_key.clone());
573                }
574                // s3
575                if let Some(region) = &self.region {
576                    iceberg_configs.insert(S3_REGION.to_owned(), region.clone());
577                }
578                if let Some(endpoint) = &self.endpoint {
579                    iceberg_configs.insert(S3_ENDPOINT.to_owned(), endpoint.clone());
580                }
581                if let Some(access_key) = &self.access_key {
582                    iceberg_configs.insert(S3_ACCESS_KEY_ID.to_owned(), access_key.clone());
583                }
584                if let Some(secret_key) = &self.secret_key {
585                    iceberg_configs.insert(S3_SECRET_ACCESS_KEY.to_owned(), secret_key.clone());
586                }
587                if let Some(path_style_access) = &self.path_style_access {
588                    iceberg_configs.insert(
589                        S3_PATH_STYLE_ACCESS.to_owned(),
590                        path_style_access.to_string(),
591                    );
592                }
593                let config_builder =
594                    iceberg_catalog_glue::GlueCatalogConfig::builder()
595                        .warehouse(self.warehouse_path.clone().ok_or_else(|| {
596                            anyhow!("`warehouse.path` must be set in glue catalog")
597                        })?)
598                        .props(iceberg_configs);
599                let config = if let Some(uri) = self.catalog_uri.as_deref() {
600                    config_builder.uri(uri.to_owned()).build()
601                } else {
602                    config_builder.build()
603                };
604                let catalog = iceberg_catalog_glue::GlueCatalog::new(config).await?;
605                Ok(Arc::new(catalog))
606            }
607            catalog_type
608                if catalog_type == "hive"
609                    || catalog_type == "snowflake"
610                    || catalog_type == "jdbc"
611                    || catalog_type == "rest"
612                    || catalog_type == "glue" =>
613            {
614                // Create java catalog
615                let (file_io_props, java_catalog_props) =
616                    self.build_jni_catalog_configs(java_catalog_props)?;
617                let catalog_impl = match catalog_type {
618                    "hive" => "org.apache.iceberg.hive.HiveCatalog",
619                    "jdbc" => "org.apache.iceberg.jdbc.JdbcCatalog",
620                    "snowflake" => "org.apache.iceberg.snowflake.SnowflakeCatalog",
621                    "rest" => "org.apache.iceberg.rest.RESTCatalog",
622                    "glue" => "org.apache.iceberg.aws.glue.GlueCatalog",
623                    _ => unreachable!(),
624                };
625
626                jni_catalog::JniCatalog::build_catalog(
627                    file_io_props,
628                    self.catalog_name(),
629                    catalog_impl,
630                    java_catalog_props,
631                )
632            }
633            "mock" => Ok(Arc::new(mock_catalog::MockCatalog {})),
634            _ => {
635                bail!(
636                    "Unsupported catalog type: {}, only support `storage`, `rest`, `hive`, `jdbc`, `glue`, `snowflake`",
637                    self.catalog_type()
638                )
639            }
640        }
641    }
642
643    /// TODO: remove the arguments and put them into `IcebergCommon`. Currently the handling in source and sink are different, so pass them separately to be safer.
644    pub async fn load_table(
645        &self,
646        java_catalog_props: &HashMap<String, String>,
647    ) -> ConnectorResult<Table> {
648        let catalog = self
649            .create_catalog(java_catalog_props)
650            .await
651            .context("Unable to load iceberg catalog")?;
652
653        let table_id = self
654            .full_table_name()
655            .context("Unable to parse table name")?;
656
657        catalog.load_table(&table_id).await.map_err(Into::into)
658    }
659}