1use core::str::FromStr;
16use std::pin::Pin;
17use std::sync::Arc;
18use std::task::{Context, Poll};
19
20use anyhow::Context as _;
21use bytes::{Bytes, BytesMut};
22use futures::Stream;
23use itertools::Itertools;
24use pgwire::pg_field_descriptor::PgFieldDescriptor;
25use pgwire::pg_response::RowSetResult;
26use pgwire::pg_server::BoxedError;
27use pgwire::types::{Format, FormatIterator, Row};
28use pin_project_lite::pin_project;
29use risingwave_common::array::DataChunk;
30use risingwave_common::catalog::Field;
31use risingwave_common::row::Row as _;
32use risingwave_common::types::{
33 DataType, Interval, ScalarRefImpl, Timestamptz, write_date_time_tz,
34};
35use risingwave_common::util::epoch::Epoch;
36use risingwave_common::util::iter_util::ZipEqFast;
37use risingwave_connector::sink::elasticsearch_opensearch::elasticsearch::ES_SINK;
38use risingwave_connector::source::KAFKA_CONNECTOR;
39use risingwave_connector::source::iceberg::ICEBERG_CONNECTOR;
40use risingwave_pb::catalog::connection_params::PbConnectionType;
41use risingwave_sqlparser::ast::{
42 CompatibleFormatEncode, FormatEncodeOptions, ObjectName, Query, Select, SelectItem, SetExpr,
43 TableFactor, TableWithJoins,
44};
45use thiserror_ext::AsReport;
46use tokio::select;
47use tokio::time::{Duration, sleep};
48
49use crate::catalog::root_catalog::SchemaPath;
50use crate::error::ErrorCode::ProtocolError;
51use crate::error::{ErrorCode, Result as RwResult, RwError};
52use crate::session::{SessionImpl, current};
53use crate::{Binder, HashSet, TableCatalog};
54
55pin_project! {
56 pub struct DataChunkToRowSetAdapter<VS>
63 where
64 VS: Stream<Item = Result<DataChunk, BoxedError>>,
65 {
66 #[pin]
67 chunk_stream: VS,
68 column_types: Vec<DataType>,
69 pub formats: Vec<Format>,
70 session_data: StaticSessionData,
71 }
72}
73
74pub struct StaticSessionData {
76 pub timezone: String,
77}
78
79impl<VS> DataChunkToRowSetAdapter<VS>
80where
81 VS: Stream<Item = Result<DataChunk, BoxedError>>,
82{
83 pub fn new(
84 chunk_stream: VS,
85 column_types: Vec<DataType>,
86 formats: Vec<Format>,
87 session: Arc<SessionImpl>,
88 ) -> Self {
89 let session_data = StaticSessionData {
90 timezone: session.config().timezone(),
91 };
92 Self {
93 chunk_stream,
94 column_types,
95 formats,
96 session_data,
97 }
98 }
99}
100
101impl<VS> Stream for DataChunkToRowSetAdapter<VS>
102where
103 VS: Stream<Item = Result<DataChunk, BoxedError>>,
104{
105 type Item = RowSetResult;
106
107 fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
108 let mut this = self.project();
109 match this.chunk_stream.as_mut().poll_next(cx) {
110 Poll::Pending => Poll::Pending,
111 Poll::Ready(chunk) => match chunk {
112 Some(chunk_result) => match chunk_result {
113 Ok(chunk) => Poll::Ready(Some(
114 to_pg_rows(this.column_types, chunk, this.formats, this.session_data)
115 .map_err(|err| err.into()),
116 )),
117 Err(err) => Poll::Ready(Some(Err(err))),
118 },
119 None => Poll::Ready(None),
120 },
121 }
122 }
123}
124
125pub fn pg_value_format(
127 data_type: &DataType,
128 d: ScalarRefImpl<'_>,
129 format: Format,
130 session_data: &StaticSessionData,
131) -> RwResult<Bytes> {
132 match format {
135 Format::Text => {
136 if *data_type == DataType::Timestamptz {
137 Ok(timestamptz_to_string_with_session_data(d, session_data))
138 } else {
139 Ok(d.text_format(data_type).into())
140 }
141 }
142 Format::Binary => Ok(d
143 .binary_format(data_type)
144 .context("failed to format binary value")?),
145 }
146}
147
148fn timestamptz_to_string_with_session_data(
149 d: ScalarRefImpl<'_>,
150 session_data: &StaticSessionData,
151) -> Bytes {
152 let tz = d.into_timestamptz();
153 let time_zone = Timestamptz::lookup_time_zone(&session_data.timezone).unwrap();
154 let instant_local = tz.to_datetime_in_zone(time_zone);
155 let mut result_string = BytesMut::new();
156 write_date_time_tz(instant_local, &mut result_string).unwrap();
157 result_string.into()
158}
159
160fn to_pg_rows(
161 column_types: &[DataType],
162 chunk: DataChunk,
163 formats: &[Format],
164 session_data: &StaticSessionData,
165) -> RwResult<Vec<Row>> {
166 assert_eq!(chunk.dimension(), column_types.len());
167 if cfg!(debug_assertions) {
168 let chunk_data_types = chunk.data_types();
169 for (ty1, ty2) in chunk_data_types.iter().zip_eq_fast(column_types) {
170 debug_assert!(
171 ty1.equals_datatype(ty2),
172 "chunk_data_types: {chunk_data_types:?}, column_types: {column_types:?}"
173 )
174 }
175 }
176
177 chunk
178 .rows()
179 .map(|r| {
180 let format_iter = FormatIterator::new(formats, chunk.dimension())
181 .map_err(ErrorCode::InternalError)?;
182 let row = r
183 .iter()
184 .zip_eq_fast(column_types)
185 .zip_eq_fast(format_iter)
186 .map(|((data, t), format)| match data {
187 Some(data) => Some(pg_value_format(t, data, format, session_data)).transpose(),
188 None => Ok(None),
189 })
190 .try_collect()?;
191 Ok(Row::new(row))
192 })
193 .try_collect()
194}
195
196pub fn to_pg_field(f: &Field) -> PgFieldDescriptor {
198 PgFieldDescriptor::new(
199 f.name.clone(),
200 f.data_type().to_oid(),
201 f.data_type().type_len(),
202 )
203}
204
205#[easy_ext::ext(SourceSchemaCompatExt)]
206impl CompatibleFormatEncode {
207 pub fn into_v2_with_warning(self) -> FormatEncodeOptions {
209 match self {
210 CompatibleFormatEncode::RowFormat(inner) => {
211 current::notice_to_user(
213 "RisingWave will stop supporting the syntax \"ROW FORMAT\" in future versions, which will be changed to \"FORMAT ... ENCODE ...\" syntax.",
214 );
215 inner.into_format_encode_v2()
216 }
217 CompatibleFormatEncode::V2(inner) => inner,
218 }
219 }
220}
221
222pub fn gen_query_from_table_name(from_name: ObjectName) -> Query {
223 let table_factor = TableFactor::Table {
224 name: from_name,
225 alias: None,
226 as_of: None,
227 };
228 let from = vec![TableWithJoins {
229 relation: table_factor,
230 joins: vec![],
231 }];
232 let select = Select {
233 from,
234 projection: vec![SelectItem::Wildcard(None)],
235 ..Default::default()
236 };
237 let body = SetExpr::Select(Box::new(select));
238 Query {
239 with: None,
240 body,
241 order_by: vec![],
242 limit: None,
243 offset: None,
244 fetch: None,
245 }
246}
247
248pub fn convert_unix_millis_to_logstore_u64(unix_millis: u64) -> u64 {
249 Epoch::from_unix_millis(unix_millis).0
250}
251
252pub fn convert_logstore_u64_to_unix_millis(logstore_u64: u64) -> u64 {
253 Epoch::from(logstore_u64).as_unix_millis()
254}
255
256pub fn convert_interval_to_u64_seconds(interval: &String) -> RwResult<u64> {
257 let seconds = (Interval::from_str(interval)
258 .map_err(|err| {
259 ErrorCode::InternalError(format!(
260 "Convert interval to u64 error, please check format, error: {:?}",
261 err.to_report_string()
262 ))
263 })?
264 .epoch_in_micros()
265 / 1000000) as u64;
266 Ok(seconds)
267}
268
269pub fn ensure_connection_type_allowed(
270 connection_type: PbConnectionType,
271 allowed_types: &HashSet<PbConnectionType>,
272) -> RwResult<()> {
273 if !allowed_types.contains(&connection_type) {
274 return Err(RwError::from(ProtocolError(format!(
275 "connection type {:?} is not allowed, allowed types: {:?}",
276 connection_type, allowed_types
277 ))));
278 }
279 Ok(())
280}
281
282fn connection_type_to_connector(connection_type: &PbConnectionType) -> &str {
283 match connection_type {
284 PbConnectionType::Kafka => KAFKA_CONNECTOR,
285 PbConnectionType::Iceberg => ICEBERG_CONNECTOR,
286 PbConnectionType::Elasticsearch => ES_SINK,
287 _ => unreachable!(),
288 }
289}
290
291pub fn check_connector_match_connection_type(
292 connector: &str,
293 connection_type: &PbConnectionType,
294) -> RwResult<()> {
295 if !connector.eq(connection_type_to_connector(connection_type)) {
296 return Err(RwError::from(ProtocolError(format!(
297 "connector {} and connection type {:?} are not compatible",
298 connector, connection_type
299 ))));
300 }
301 Ok(())
302}
303
304pub fn get_table_catalog_by_table_name(
305 session: &SessionImpl,
306 table_name: &ObjectName,
307) -> RwResult<(Arc<TableCatalog>, String)> {
308 let db_name = &session.database();
309 let (schema_name, real_table_name) =
310 Binder::resolve_schema_qualified_name(db_name, table_name)?;
311 let search_path = session.config().search_path();
312 let user_name = &session.user_name();
313
314 let schema_path = SchemaPath::new(schema_name.as_deref(), &search_path, user_name);
315 let reader = session.env().catalog_reader().read_guard();
316 let (table, schema_name) =
317 reader.get_created_table_by_name(db_name, schema_path, &real_table_name)?;
318
319 Ok((table.clone(), schema_name.to_owned()))
320}
321
322pub async fn execute_with_long_running_notification<F, T>(
341 operation_fut: F,
342 session: &SessionImpl,
343 operation_name: &str,
344) -> RwResult<T>
345where
346 F: std::future::Future<Output = RwResult<T>>,
347{
348 let notify_timeout_secs = session.config().slow_ddl_notification_secs();
349
350 if notify_timeout_secs == 0 {
352 return operation_fut.await;
353 }
354
355 let notify_fut = sleep(Duration::from_secs(notify_timeout_secs as u64));
356 tokio::pin!(operation_fut);
357
358 select! {
359 _ = notify_fut => {
360 session.notice_to_user(format!(
361 "{} has taken more than {} secs, likely due to high barrier latency.\n\
362 You may trigger cluster recovery to let {} take effect immediately.\n\
363 Run RECOVER in a separate session to trigger recovery.\n\
364 See: https://docs.risingwave.com/sql/commands/sql-recover#recover",
365 operation_name, notify_timeout_secs, operation_name
366 ));
367 operation_fut.await
368 }
369 result = &mut operation_fut => {
370 result
371 }
372 }
373}
374
375#[cfg(test)]
376mod tests {
377 use postgres_types::{ToSql, Type};
378 use risingwave_common::array::*;
379
380 use super::*;
381
382 #[test]
383 fn test_to_pg_field() {
384 let field = Field::with_name(DataType::Int32, "v1");
385 let pg_field = to_pg_field(&field);
386 assert_eq!(pg_field.get_name(), "v1");
387 assert_eq!(pg_field.get_type_oid(), DataType::Int32.to_oid());
388 }
389
390 #[test]
391 fn test_to_pg_rows() {
392 let chunk = DataChunk::from_pretty(
393 "i I f T
394 1 6 6.01 aaa
395 2 . . .
396 3 7 7.01 vvv
397 4 . . . ",
398 );
399 let static_session = StaticSessionData {
400 timezone: "UTC".into(),
401 };
402 let rows = to_pg_rows(
403 &[
404 DataType::Int32,
405 DataType::Int64,
406 DataType::Float32,
407 DataType::Varchar,
408 ],
409 chunk,
410 &[],
411 &static_session,
412 );
413 let expected: Vec<Vec<Option<Bytes>>> = vec![
414 vec![
415 Some("1".into()),
416 Some("6".into()),
417 Some("6.01".into()),
418 Some("aaa".into()),
419 ],
420 vec![Some("2".into()), None, None, None],
421 vec![
422 Some("3".into()),
423 Some("7".into()),
424 Some("7.01".into()),
425 Some("vvv".into()),
426 ],
427 vec![Some("4".into()), None, None, None],
428 ];
429 let vec = rows
430 .unwrap()
431 .into_iter()
432 .map(|r| r.values().iter().cloned().collect_vec())
433 .collect_vec();
434
435 assert_eq!(vec, expected);
436 }
437
438 #[test]
439 fn test_to_pg_rows_mix_format() {
440 let chunk = DataChunk::from_pretty(
441 "i I f T
442 1 6 6.01 aaa
443 ",
444 );
445 let static_session = StaticSessionData {
446 timezone: "UTC".into(),
447 };
448 let rows = to_pg_rows(
449 &[
450 DataType::Int32,
451 DataType::Int64,
452 DataType::Float32,
453 DataType::Varchar,
454 ],
455 chunk,
456 &[Format::Binary, Format::Binary, Format::Binary, Format::Text],
457 &static_session,
458 );
459 let mut raw_params = vec![BytesMut::new(); 3];
460 1_i32.to_sql(&Type::ANY, &mut raw_params[0]).unwrap();
461 6_i64.to_sql(&Type::ANY, &mut raw_params[1]).unwrap();
462 6.01_f32.to_sql(&Type::ANY, &mut raw_params[2]).unwrap();
463 let raw_params = raw_params
464 .into_iter()
465 .map(|b| b.freeze())
466 .collect::<Vec<_>>();
467 let expected: Vec<Vec<Option<Bytes>>> = vec![vec![
468 Some(raw_params[0].clone()),
469 Some(raw_params[1].clone()),
470 Some(raw_params[2].clone()),
471 Some("aaa".into()),
472 ]];
473 let vec = rows
474 .unwrap()
475 .into_iter()
476 .map(|r| r.values().iter().cloned().collect_vec())
477 .collect_vec();
478
479 assert_eq!(vec, expected);
480 }
481
482 #[test]
483 fn test_value_format() {
484 use {DataType as T, ScalarRefImpl as S};
485 let static_session = StaticSessionData {
486 timezone: "UTC".into(),
487 };
488
489 let f = |t, d, f| pg_value_format(t, d, f, &static_session).unwrap();
490 assert_eq!(&f(&T::Float32, S::Float32(1_f32.into()), Format::Text), "1");
491 assert_eq!(
492 &f(&T::Float32, S::Float32(f32::NAN.into()), Format::Text),
493 "NaN"
494 );
495 assert_eq!(
496 &f(&T::Float64, S::Float64(f64::NAN.into()), Format::Text),
497 "NaN"
498 );
499 assert_eq!(
500 &f(&T::Float32, S::Float32(f32::INFINITY.into()), Format::Text),
501 "Infinity"
502 );
503 assert_eq!(
504 &f(
505 &T::Float32,
506 S::Float32(f32::NEG_INFINITY.into()),
507 Format::Text
508 ),
509 "-Infinity"
510 );
511 assert_eq!(
512 &f(&T::Float64, S::Float64(f64::INFINITY.into()), Format::Text),
513 "Infinity"
514 );
515 assert_eq!(
516 &f(
517 &T::Float64,
518 S::Float64(f64::NEG_INFINITY.into()),
519 Format::Text
520 ),
521 "-Infinity"
522 );
523 assert_eq!(&f(&T::Boolean, S::Bool(true), Format::Text), "t");
524 assert_eq!(&f(&T::Boolean, S::Bool(false), Format::Text), "f");
525 assert_eq!(
526 &f(
527 &T::Timestamptz,
528 S::Timestamptz(Timestamptz::from_micros(-1)),
529 Format::Text
530 ),
531 "1969-12-31 23:59:59.999999+00:00"
532 );
533 }
534}