risingwave_connector/sink/
dynamodb.rs

1// Copyright 2025 RisingWave Labs
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use std::collections::{BTreeMap, HashMap, HashSet};
16
17use anyhow::{Context, anyhow};
18use aws_sdk_dynamodb as dynamodb;
19use aws_sdk_dynamodb::client::Client;
20use aws_smithy_types::Blob;
21use dynamodb::types::{AttributeValue, TableStatus, WriteRequest};
22use futures::prelude::TryFuture;
23use futures::prelude::future::TryFutureExt;
24use risingwave_common::array::{Op, RowRef, StreamChunk};
25use risingwave_common::catalog::Schema;
26use risingwave_common::row::Row as _;
27use risingwave_common::types::{DataType, ScalarRefImpl, ToText};
28use risingwave_common::util::iter_util::ZipEqDebug;
29use serde_derive::Deserialize;
30use serde_with::{DisplayFromStr, serde_as};
31use with_options::WithOptions;
32use write_chunk_future::{DynamoDbPayloadWriter, WriteChunkFuture};
33
34use super::log_store::DeliveryFutureManagerAddFuture;
35use super::writer::{
36    AsyncTruncateLogSinkerOf, AsyncTruncateSinkWriter, AsyncTruncateSinkWriterExt,
37};
38use super::{DummySinkCommitCoordinator, Result, Sink, SinkError, SinkParam, SinkWriterParam};
39use crate::connector_common::AwsAuthProps;
40use crate::enforce_secret::EnforceSecret;
41use crate::error::ConnectorResult;
42
43pub const DYNAMO_DB_SINK: &str = "dynamodb";
44
45#[serde_as]
46#[derive(Deserialize, Debug, Clone, WithOptions)]
47pub struct DynamoDbConfig {
48    #[serde(rename = "table", alias = "dynamodb.table")]
49    pub table: String,
50
51    #[serde(rename = "dynamodb.max_batch_rows", default = "default_max_batch_rows")]
52    #[serde_as(as = "DisplayFromStr")]
53    #[deprecated]
54    pub max_batch_rows: usize,
55
56    #[serde(flatten)]
57    pub aws_auth_props: AwsAuthProps,
58
59    #[serde(
60        rename = "dynamodb.max_batch_item_nums",
61        default = "default_max_batch_item_nums"
62    )]
63    #[serde_as(as = "DisplayFromStr")]
64    pub max_batch_item_nums: usize,
65
66    #[serde(
67        rename = "dynamodb.max_future_send_nums",
68        default = "default_max_future_send_nums"
69    )]
70    #[serde_as(as = "DisplayFromStr")]
71    pub max_future_send_nums: usize,
72}
73
74impl EnforceSecret for DynamoDbConfig {
75    fn enforce_one(prop: &str) -> crate::error::ConnectorResult<()> {
76        AwsAuthProps::enforce_one(prop)
77    }
78}
79
80fn default_max_batch_item_nums() -> usize {
81    25
82}
83
84fn default_max_future_send_nums() -> usize {
85    256
86}
87
88fn default_max_batch_rows() -> usize {
89    1024
90}
91
92impl DynamoDbConfig {
93    pub async fn build_client(&self) -> ConnectorResult<Client> {
94        let config = &self.aws_auth_props;
95        let aws_config = config.build_config().await?;
96
97        Ok(Client::new(&aws_config))
98    }
99
100    fn from_btreemap(values: BTreeMap<String, String>) -> Result<Self> {
101        serde_json::from_value::<DynamoDbConfig>(serde_json::to_value(values).unwrap())
102            .map_err(|e| SinkError::Config(anyhow!(e)))
103    }
104}
105
106#[derive(Clone, Debug)]
107pub struct DynamoDbSink {
108    pub config: DynamoDbConfig,
109    schema: Schema,
110    pk_indices: Vec<usize>,
111}
112
113impl EnforceSecret for DynamoDbSink {
114    fn enforce_secret<'a>(
115        prop_iter: impl Iterator<Item = &'a str>,
116    ) -> crate::error::ConnectorResult<()> {
117        for prop in prop_iter {
118            DynamoDbConfig::enforce_one(prop)?;
119        }
120        Ok(())
121    }
122}
123
124impl Sink for DynamoDbSink {
125    type Coordinator = DummySinkCommitCoordinator;
126    type LogSinker = AsyncTruncateLogSinkerOf<DynamoDbSinkWriter>;
127
128    const SINK_NAME: &'static str = DYNAMO_DB_SINK;
129
130    async fn validate(&self) -> Result<()> {
131        risingwave_common::license::Feature::DynamoDbSink
132            .check_available()
133            .map_err(|e| anyhow::anyhow!(e))?;
134        let client = (self.config.build_client().await)
135            .context("validate DynamoDB sink error")
136            .map_err(SinkError::DynamoDb)?;
137
138        let table_name = &self.config.table;
139        let output = client
140            .describe_table()
141            .table_name(table_name)
142            .send()
143            .await
144            .map_err(|e| anyhow!(e))?;
145        let Some(table) = output.table else {
146            return Err(SinkError::DynamoDb(anyhow!(
147                "table {} not found",
148                table_name
149            )));
150        };
151        if !matches!(table.table_status(), Some(TableStatus::Active)) {
152            return Err(SinkError::DynamoDb(anyhow!(
153                "table {} is not active",
154                table_name
155            )));
156        }
157        let pk_set: HashSet<String> = self
158            .schema
159            .fields()
160            .iter()
161            .enumerate()
162            .filter(|(k, _)| self.pk_indices.contains(k))
163            .map(|(_, v)| v.name.clone())
164            .collect();
165        let key_schema = table.key_schema();
166
167        for key_element in key_schema.iter().map(|x| x.attribute_name()) {
168            if !pk_set.contains(key_element) {
169                return Err(SinkError::DynamoDb(anyhow!(
170                    "table {} key field {} not found in schema or not primary key",
171                    table_name,
172                    key_element
173                )));
174            }
175        }
176
177        Ok(())
178    }
179
180    async fn new_log_sinker(&self, _writer_param: SinkWriterParam) -> Result<Self::LogSinker> {
181        Ok(
182            DynamoDbSinkWriter::new(self.config.clone(), self.schema.clone())
183                .await?
184                .into_log_sinker(self.config.max_future_send_nums),
185        )
186    }
187}
188
189impl TryFrom<SinkParam> for DynamoDbSink {
190    type Error = SinkError;
191
192    fn try_from(param: SinkParam) -> std::result::Result<Self, Self::Error> {
193        let schema = param.schema();
194        let config = DynamoDbConfig::from_btreemap(param.properties)?;
195
196        Ok(Self {
197            config,
198            schema,
199            pk_indices: param.downstream_pk,
200        })
201    }
202}
203
204#[derive(Debug)]
205struct DynamoDbRequest {
206    inner: WriteRequest,
207    key_items: Vec<String>,
208}
209
210impl DynamoDbRequest {
211    fn extract_pk_values(&self) -> Option<Vec<AttributeValue>> {
212        let key = match (&self.inner.put_request(), &self.inner.delete_request()) {
213            (Some(put_req), None) => &put_req.item,
214            (None, Some(del_req)) => &del_req.key,
215            _ => return None,
216        };
217        let vs = key
218            .iter()
219            .filter(|(k, _)| self.key_items.contains(k))
220            .map(|(_, v)| v.clone())
221            .collect();
222        Some(vs)
223    }
224}
225
226pub struct DynamoDbSinkWriter {
227    payload_writer: DynamoDbPayloadWriter,
228    formatter: DynamoDbFormatter,
229}
230
231impl DynamoDbSinkWriter {
232    pub async fn new(config: DynamoDbConfig, schema: Schema) -> Result<Self> {
233        let client = config.build_client().await?;
234        let table_name = &config.table;
235        let output = client
236            .describe_table()
237            .table_name(table_name)
238            .send()
239            .await
240            .map_err(|e| anyhow!(e))?;
241        let Some(table) = output.table else {
242            return Err(SinkError::DynamoDb(anyhow!(
243                "table {} not found",
244                table_name
245            )));
246        };
247        let dynamodb_keys = table
248            .key_schema
249            .unwrap_or_default()
250            .into_iter()
251            .map(|k| k.attribute_name)
252            .collect();
253
254        let payload_writer = DynamoDbPayloadWriter {
255            client,
256            table: config.table.clone(),
257            dynamodb_keys,
258            max_batch_item_nums: config.max_batch_item_nums,
259        };
260
261        Ok(Self {
262            payload_writer,
263            formatter: DynamoDbFormatter { schema },
264        })
265    }
266
267    fn write_chunk_inner(&mut self, chunk: StreamChunk) -> Result<WriteChunkFuture> {
268        let mut request_items = Vec::new();
269        for (op, row) in chunk.rows() {
270            let items = self.formatter.format_row(row)?;
271            match op {
272                Op::Insert | Op::UpdateInsert => {
273                    self.payload_writer
274                        .write_one_insert(items, &mut request_items);
275                }
276                Op::Delete => {
277                    self.payload_writer
278                        .write_one_delete(items, &mut request_items);
279                }
280                Op::UpdateDelete => {}
281            }
282        }
283        Ok(self.payload_writer.write_chunk(request_items))
284    }
285}
286
287pub type DynamoDbSinkDeliveryFuture = impl TryFuture<Ok = (), Error = SinkError> + Unpin + 'static;
288
289impl AsyncTruncateSinkWriter for DynamoDbSinkWriter {
290    type DeliveryFuture = DynamoDbSinkDeliveryFuture;
291
292    async fn write_chunk<'a>(
293        &'a mut self,
294        chunk: StreamChunk,
295        mut add_future: DeliveryFutureManagerAddFuture<'a, Self::DeliveryFuture>,
296    ) -> Result<()> {
297        let futures = self.write_chunk_inner(chunk)?;
298        add_future
299            .add_future_may_await(futures.map_ok(|_: Vec<()>| ()))
300            .await?;
301        Ok(())
302    }
303}
304
305struct DynamoDbFormatter {
306    schema: Schema,
307}
308
309impl DynamoDbFormatter {
310    fn format_row(&self, row: RowRef<'_>) -> Result<HashMap<String, AttributeValue>> {
311        row.iter()
312            .zip_eq_debug((self.schema.clone()).into_fields())
313            .map(|(scalar, field)| {
314                map_data(scalar, &field.data_type()).map(|attr| (field.name, attr))
315            })
316            .collect()
317    }
318}
319
320fn map_data(scalar_ref: Option<ScalarRefImpl<'_>>, data_type: &DataType) -> Result<AttributeValue> {
321    let Some(scalar_ref) = scalar_ref else {
322        return Ok(AttributeValue::Null(true));
323    };
324    let attr = match data_type {
325        DataType::Int16
326        | DataType::Int32
327        | DataType::Int64
328        | DataType::Int256
329        | DataType::Float32
330        | DataType::Float64
331        | DataType::Decimal
332        | DataType::Serial => AttributeValue::N(scalar_ref.to_text_with_type(data_type)),
333        // TODO: jsonb as dynamic type (https://github.com/risingwavelabs/risingwave/issues/11699)
334        DataType::Varchar
335        | DataType::Interval
336        | DataType::Date
337        | DataType::Time
338        | DataType::Timestamp
339        | DataType::Timestamptz
340        | DataType::Jsonb => AttributeValue::S(scalar_ref.to_text_with_type(data_type)),
341        DataType::Boolean => AttributeValue::Bool(scalar_ref.into_bool()),
342        DataType::Bytea => AttributeValue::B(Blob::new(scalar_ref.into_bytea())),
343        DataType::List(datatype) => {
344            let list_attr = scalar_ref
345                .into_list()
346                .iter()
347                .map(|x| map_data(x, datatype))
348                .collect::<Result<Vec<_>>>()?;
349            AttributeValue::L(list_attr)
350        }
351        DataType::Struct(st) => {
352            let mut map = HashMap::with_capacity(st.len());
353            for (sub_datum_ref, (name, data_type)) in scalar_ref
354                .into_struct()
355                .iter_fields_ref()
356                .zip_eq_debug(st.iter())
357            {
358                let attr = map_data(sub_datum_ref, data_type)?;
359                map.insert(name.to_owned(), attr);
360            }
361            AttributeValue::M(map)
362        }
363        DataType::Map(_m) => {
364            return Err(SinkError::DynamoDb(anyhow!("map is not supported yet")));
365        }
366    };
367    Ok(attr)
368}
369
370mod write_chunk_future {
371    use core::result;
372    use std::collections::HashMap;
373
374    use anyhow::anyhow;
375    use aws_sdk_dynamodb as dynamodb;
376    use aws_sdk_dynamodb::client::Client;
377    use aws_smithy_runtime_api::client::orchestrator::HttpResponse;
378    use dynamodb::error::SdkError;
379    use dynamodb::operation::batch_write_item::{BatchWriteItemError, BatchWriteItemOutput};
380    use dynamodb::types::{
381        AttributeValue, DeleteRequest, PutRequest, ReturnConsumedCapacity,
382        ReturnItemCollectionMetrics, WriteRequest,
383    };
384    use futures::future::{Map, TryJoinAll};
385    use futures::prelude::Future;
386    use futures::prelude::future::{FutureExt, try_join_all};
387    use itertools::Itertools;
388    use maplit::hashmap;
389
390    use super::{DynamoDbRequest, Result, SinkError};
391
392    pub type WriteChunkFuture = TryJoinAll<
393        Map<
394            impl Future<
395                Output = result::Result<
396                    BatchWriteItemOutput,
397                    SdkError<BatchWriteItemError, HttpResponse>,
398                >,
399            >,
400            impl FnOnce(
401                result::Result<BatchWriteItemOutput, SdkError<BatchWriteItemError, HttpResponse>>,
402            ) -> Result<()>,
403        >,
404    >;
405    pub struct DynamoDbPayloadWriter {
406        pub client: Client,
407        pub table: String,
408        pub dynamodb_keys: Vec<String>,
409        pub max_batch_item_nums: usize,
410    }
411
412    impl DynamoDbPayloadWriter {
413        pub fn write_one_insert(
414            &mut self,
415            item: HashMap<String, AttributeValue>,
416            request_items: &mut Vec<DynamoDbRequest>,
417        ) {
418            let put_req = PutRequest::builder().set_item(Some(item)).build().unwrap();
419            let req = WriteRequest::builder().put_request(put_req).build();
420            self.write_one_req(req, request_items);
421        }
422
423        pub fn write_one_delete(
424            &mut self,
425            key: HashMap<String, AttributeValue>,
426            request_items: &mut Vec<DynamoDbRequest>,
427        ) {
428            let key = key
429                .into_iter()
430                .filter(|(k, _)| self.dynamodb_keys.contains(k))
431                .collect();
432            let del_req = DeleteRequest::builder().set_key(Some(key)).build().unwrap();
433            let req = WriteRequest::builder().delete_request(del_req).build();
434            self.write_one_req(req, request_items);
435        }
436
437        pub fn write_one_req(
438            &mut self,
439            req: WriteRequest,
440            request_items: &mut Vec<DynamoDbRequest>,
441        ) {
442            let r_req = DynamoDbRequest {
443                inner: req,
444                key_items: self.dynamodb_keys.clone(),
445            };
446            if let Some(v) = r_req.extract_pk_values() {
447                request_items.retain(|item| {
448                    !item
449                        .extract_pk_values()
450                        .unwrap_or_default()
451                        .iter()
452                        .all(|x| v.contains(x))
453                });
454            }
455            request_items.push(r_req);
456        }
457
458        pub fn write_chunk(&mut self, request_items: Vec<DynamoDbRequest>) -> WriteChunkFuture {
459            let table = self.table.clone();
460            let chunks = request_items
461                .into_iter()
462                .map(|r| r.inner)
463                .chunks(self.max_batch_item_nums);
464            let futures = chunks.into_iter().map(|chunk| {
465                let req_items = chunk.collect();
466                let reqs = hashmap! {
467                    table.clone() => req_items,
468                };
469                self.client
470                    .batch_write_item()
471                    .set_request_items(Some(reqs))
472                    .return_consumed_capacity(ReturnConsumedCapacity::None)
473                    .return_item_collection_metrics(ReturnItemCollectionMetrics::None)
474                    .send()
475                    .map(|result| {
476                        result
477                            .map_err(|e| {
478                                SinkError::DynamoDb(
479                                    anyhow!(e).context("failed to delete item from DynamoDB sink"),
480                                )
481                            })
482                            .map(|_| ())
483                    })
484            });
485            try_join_all(futures)
486        }
487    }
488}