risingwave_frontend/handler/
query.rs

1// Copyright 2025 RisingWave Labs
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use std::collections::HashSet;
16use std::sync::Arc;
17use std::time::Instant;
18
19use itertools::Itertools;
20use pgwire::pg_field_descriptor::PgFieldDescriptor;
21use pgwire::pg_response::{PgResponse, StatementType};
22use pgwire::types::Format;
23use risingwave_batch::worker_manager::worker_node_manager::WorkerNodeSelector;
24use risingwave_common::bail_not_implemented;
25use risingwave_common::catalog::{FunctionId, Schema};
26use risingwave_common::id::ObjectId;
27use risingwave_common::session_config::QueryMode;
28use risingwave_common::types::{DataType, Datum};
29use risingwave_sqlparser::ast::{SetExpr, Statement};
30
31use super::extended_handle::{PortalResult, PrepareStatement, PreparedResult};
32use super::{PgResponseStream, RwPgResponse, create_mv, declare_cursor};
33use crate::binder::{Binder, BoundCreateView, BoundStatement};
34#[cfg(feature = "datafusion")]
35use crate::datafusion::DfBatchQueryPlanResult;
36use crate::error::{ErrorCode, Result, RwError};
37use crate::handler::HandlerArgs;
38use crate::handler::flush::do_flush;
39use crate::handler::util::{DataChunkToRowSetAdapter, to_pg_field};
40use crate::optimizer::plan_node::{BatchPlanRef, Explain};
41use crate::optimizer::{
42    BatchPlanRoot, ExecutionModeDecider, OptimizerContext, OptimizerContextRef,
43    RelationCollectorVisitor, SysTableVisitor,
44};
45use crate::planner::Planner;
46use crate::scheduler::plan_fragmenter::Query;
47use crate::scheduler::{
48    BatchPlanFragmenter, DistributedQueryStream, ExecutionContext, ExecutionContextRef,
49    LocalQueryExecution, LocalQueryStream,
50};
51use crate::session::SessionImpl;
52
53/// Choice between running RisingWave's own batch executor (Rw) or a `DataFusion` (DF) logical plan.
54pub enum BatchPlanChoice {
55    Rw(RwBatchQueryPlanResult),
56    #[cfg(feature = "datafusion")]
57    Df(DfBatchQueryPlanResult),
58}
59
60impl BatchPlanChoice {
61    pub fn unwrap_rw(self) -> Result<RwBatchQueryPlanResult> {
62        match self {
63            BatchPlanChoice::Rw(result) => Ok(result),
64            #[cfg(feature = "datafusion")]
65            BatchPlanChoice::Df { .. } => {
66                risingwave_common::bail!(
67                    "Expected RisingWave plan in BatchPlanChoice, but got DataFusion plan"
68                )
69            }
70        }
71    }
72}
73
74pub async fn handle_query(
75    handler_args: HandlerArgs,
76    stmt: Statement,
77    formats: Vec<Format>,
78) -> Result<RwPgResponse> {
79    let session = handler_args.session.clone();
80    let context = OptimizerContext::from_handler_args(handler_args);
81
82    #[cfg(feature = "datafusion")]
83    {
84        // We construct a future manually here to make sure this async function is `Send`.
85        // `BatchPlanChoice` is non-Send, and rust cannot prove it has dropped before await point.
86        // See more details in https://github.com/rust-lang/rust/issues/128095
87        use futures::FutureExt;
88
89        use crate::datafusion::execute_datafusion_plan;
90
91        let future = match gen_batch_plan_by_statement(&session, context.into(), stmt)? {
92            BatchPlanChoice::Rw(plan_result) => {
93                let plan_fragmenter_result = risingwave_expr::expr_context::TIME_ZONE::sync_scope(
94                    session.config().timezone(),
95                    || gen_batch_plan_fragmenter(&session, plan_result),
96                )?;
97                execute_risingwave_plan(session, plan_fragmenter_result, formats).left_future()
98            }
99            BatchPlanChoice::Df(plan_result) => {
100                execute_datafusion_plan(session, plan_result, formats).right_future()
101            }
102        };
103        future.await
104    }
105
106    #[cfg(not(feature = "datafusion"))]
107    {
108        let future = match gen_batch_plan_by_statement(&session, context.into(), stmt)? {
109            BatchPlanChoice::Rw(plan_result) => {
110                let plan_fragmenter_result = risingwave_expr::expr_context::TIME_ZONE::sync_scope(
111                    session.config().timezone(),
112                    || gen_batch_plan_fragmenter(&session, plan_result),
113                )?;
114                execute_risingwave_plan(session, plan_fragmenter_result, formats)
115            }
116        };
117        future.await
118    }
119}
120
121fn handle_parse_inner(binder: Binder, statement: Statement) -> Result<PrepareStatement> {
122    let bound_result = gen_bound(binder, statement.clone())?;
123
124    Ok(PrepareStatement::Prepared(PreparedResult {
125        statement,
126        bound_result,
127    }))
128}
129
130pub fn handle_parse_for_batch(
131    handler_args: HandlerArgs,
132    statement: Statement,
133    specified_param_types: Vec<Option<DataType>>,
134) -> Result<PrepareStatement> {
135    let binder = Binder::new_for_batch(&handler_args.session)
136        .with_specified_params_types(specified_param_types);
137    handle_parse_inner(binder, statement)
138}
139
140pub fn handle_parse_for_stream(
141    handler_args: HandlerArgs,
142    statement: Statement,
143    specified_param_types: Vec<Option<DataType>>,
144) -> Result<PrepareStatement> {
145    let binder = Binder::new_for_stream(&handler_args.session)
146        .with_specified_params_types(specified_param_types);
147    handle_parse_inner(binder, statement)
148}
149
150/// Execute a "Portal", which is a prepared statement with bound parameters.
151pub async fn handle_execute(
152    handler_args: HandlerArgs,
153    portal: PortalResult,
154) -> Result<RwPgResponse> {
155    let PortalResult {
156        bound_result,
157        result_formats,
158        statement,
159    } = portal;
160    match statement {
161        Statement::Query(_)
162        | Statement::Insert { .. }
163        | Statement::Delete { .. }
164        | Statement::Update { .. } => {
165            // Execute a batch query
166            let session = handler_args.session.clone();
167            let plan_fragmenter_result = {
168                let context = OptimizerContext::from_handler_args(handler_args);
169                let plan_result =
170                    gen_batch_query_plan(&session, context.into(), bound_result)?.unwrap_rw()?;
171                // Time zone is used by Hummock time travel query.
172                risingwave_expr::expr_context::TIME_ZONE::sync_scope(
173                    session.config().timezone(),
174                    || gen_batch_plan_fragmenter(&session, plan_result),
175                )?
176            };
177            execute_risingwave_plan(session, plan_fragmenter_result, result_formats).await
178        }
179        Statement::CreateView { materialized, .. } if materialized => {
180            // Execute a CREATE MATERIALIZED VIEW
181            let BoundResult {
182                bound,
183                dependent_relations,
184                dependent_udfs,
185                ..
186            } = bound_result;
187            let create_mv = if let BoundStatement::CreateView(create_mv) = bound {
188                create_mv
189            } else {
190                unreachable!("expect a BoundStatement::CreateView")
191            };
192            let BoundCreateView {
193                or_replace,
194                materialized: _,
195                if_not_exists,
196                name,
197                columns,
198                query,
199                emit_mode,
200                with_options,
201            } = *create_mv;
202            if or_replace {
203                bail_not_implemented!("CREATE OR REPLACE VIEW");
204            }
205
206            // Hack: replace the `with_options` with the bounded ones.
207            let handler_args = HandlerArgs {
208                session: handler_args.session.clone(),
209                sql: handler_args.sql.clone(),
210                normalized_sql: handler_args.normalized_sql.clone(),
211                with_options: crate::WithOptions::try_from(with_options.as_slice())?,
212            };
213
214            create_mv::handle_create_mv_bound(
215                handler_args,
216                if_not_exists,
217                name,
218                *query,
219                dependent_relations,
220                dependent_udfs,
221                columns,
222                emit_mode,
223            )
224            .await
225        }
226        Statement::DeclareCursor { stmt } => match stmt.declare_cursor {
227            risingwave_sqlparser::ast::DeclareCursor::Query(_) => {
228                let session = handler_args.session.clone();
229                let plan_fragmenter_result = {
230                    let context = OptimizerContext::from_handler_args(handler_args.clone());
231                    let plan_result = gen_batch_query_plan(&session, context.into(), bound_result)?
232                        .unwrap_rw()?;
233                    gen_batch_plan_fragmenter(&session, plan_result)?
234                };
235                declare_cursor::handle_bound_declare_query_cursor(
236                    handler_args,
237                    stmt.cursor_name,
238                    plan_fragmenter_result,
239                )
240                .await
241            }
242            risingwave_sqlparser::ast::DeclareCursor::Subscription(sub_name, rw_timestamp) => {
243                declare_cursor::handle_declare_subscription_cursor(
244                    handler_args,
245                    sub_name,
246                    stmt.cursor_name,
247                    rw_timestamp,
248                )
249                .await
250            }
251        },
252        _ => unreachable!(),
253    }
254}
255
256pub fn gen_batch_plan_by_statement(
257    session: &SessionImpl,
258    context: OptimizerContextRef,
259    stmt: Statement,
260) -> Result<BatchPlanChoice> {
261    let binder = Binder::new_for_batch(session);
262    let bound_result = gen_bound(binder, stmt)?;
263    gen_batch_query_plan(session, context, bound_result)
264}
265
266#[derive(Clone)]
267pub struct BoundResult {
268    pub(crate) stmt_type: StatementType,
269    pub(crate) must_dist: bool,
270    pub(crate) bound: BoundStatement,
271    pub(crate) param_types: Vec<DataType>,
272    pub(crate) parsed_params: Option<Vec<Datum>>,
273    pub(crate) dependent_relations: HashSet<ObjectId>,
274    /// TODO(rc): merge with `dependent_relations`
275    pub(crate) dependent_udfs: HashSet<FunctionId>,
276}
277
278fn gen_bound(mut binder: Binder, stmt: Statement) -> Result<BoundResult> {
279    let stmt_type = StatementType::infer_from_statement(&stmt)
280        .map_err(|err| RwError::from(ErrorCode::InvalidInputSyntax(err)))?;
281    let must_dist = must_run_in_distributed_mode(&stmt)?;
282
283    let bound = binder.bind(stmt)?;
284
285    Ok(BoundResult {
286        stmt_type,
287        must_dist,
288        bound,
289        param_types: binder.export_param_types()?,
290        parsed_params: None,
291        dependent_relations: binder.included_relations().clone(),
292        dependent_udfs: binder.included_udfs().clone(),
293    })
294}
295
296pub struct RwBatchQueryPlanResult {
297    pub(crate) plan: BatchPlanRef,
298    pub(crate) query_mode: QueryMode,
299    pub(crate) schema: Schema,
300    pub(crate) stmt_type: StatementType,
301    // Note that these relations are only resolved in the binding phase, and it may only be a
302    // subset of the final one. i.e. the final one may contain more implicit dependencies on
303    // indices.
304    pub(crate) dependent_relations: Vec<ObjectId>,
305}
306
307fn gen_batch_query_plan(
308    session: &SessionImpl,
309    context: OptimizerContextRef,
310    bind_result: BoundResult,
311) -> Result<BatchPlanChoice> {
312    let BoundResult {
313        stmt_type,
314        must_dist,
315        bound,
316        dependent_relations,
317        ..
318    } = bind_result;
319
320    let mut planner = if matches!(bound, BoundStatement::Query(_)) {
321        Planner::new_for_batch_dql(context)
322    } else {
323        Planner::new_for_batch(context)
324    };
325
326    let logical = planner.plan(bound)?;
327    let schema = logical.schema();
328    let optimized_logical = logical.gen_optimized_logical_plan_for_batch()?;
329
330    #[cfg(feature = "datafusion")]
331    {
332        use crate::optimizer::{LogicalIcebergScanExt, LogicalPlanToDataFusionExt};
333
334        if session.config().enable_datafusion_engine()
335            && optimized_logical.plan.contains_iceberg_scan()
336        {
337            let df_plan = optimized_logical.plan.to_datafusion_logical_plan()?;
338            tracing::debug!(
339                "Converted RisingWave logical plan to DataFusion plan:\nRisingWave Plan: {:?}\nDataFusion Plan: {:?}",
340                optimized_logical,
341                df_plan
342            );
343            return Ok(BatchPlanChoice::Df(DfBatchQueryPlanResult {
344                plan: df_plan,
345                schema,
346                stmt_type,
347            }));
348        }
349    }
350
351    let batch_plan = optimized_logical.gen_batch_plan()?;
352
353    let dependent_relations =
354        RelationCollectorVisitor::collect_with(dependent_relations, batch_plan.plan.clone());
355
356    let must_local = must_run_in_local_mode(&batch_plan);
357
358    let query_mode = match (must_dist, must_local) {
359        (true, true) => {
360            return Err(ErrorCode::InternalError(
361                "the query is forced to both local and distributed mode by optimizer".to_owned(),
362            )
363            .into());
364        }
365        (true, false) => QueryMode::Distributed,
366        (false, true) => QueryMode::Local,
367        (false, false) => match session.config().query_mode() {
368            QueryMode::Auto => determine_query_mode(&batch_plan),
369            QueryMode::Local => QueryMode::Local,
370            QueryMode::Distributed => QueryMode::Distributed,
371        },
372    };
373
374    let physical = match query_mode {
375        QueryMode::Auto => unreachable!(),
376        QueryMode::Local => batch_plan.gen_batch_local_plan()?,
377        QueryMode::Distributed => batch_plan.gen_batch_distributed_plan()?,
378    };
379
380    let result = RwBatchQueryPlanResult {
381        plan: physical,
382        query_mode,
383        schema,
384        stmt_type,
385        dependent_relations: dependent_relations.into_iter().collect_vec(),
386    };
387    Ok(BatchPlanChoice::Rw(result))
388}
389
390fn must_run_in_distributed_mode(stmt: &Statement) -> Result<bool> {
391    fn is_insert_using_select(stmt: &Statement) -> bool {
392        fn has_select_query(set_expr: &SetExpr) -> bool {
393            match set_expr {
394                SetExpr::Select(_) => true,
395                SetExpr::Query(query) => has_select_query(&query.body),
396                SetExpr::SetOperation { left, right, .. } => {
397                    has_select_query(left) || has_select_query(right)
398                }
399                SetExpr::Values(_) => false,
400            }
401        }
402
403        matches!(
404            stmt,
405            Statement::Insert {source, ..} if has_select_query(&source.body)
406        )
407    }
408
409    let stmt_type = StatementType::infer_from_statement(stmt)
410        .map_err(|err| RwError::from(ErrorCode::InvalidInputSyntax(err)))?;
411
412    Ok(matches!(
413        stmt_type,
414        StatementType::UPDATE
415            | StatementType::DELETE
416            | StatementType::UPDATE_RETURNING
417            | StatementType::DELETE_RETURNING
418    ) | is_insert_using_select(stmt))
419}
420
421fn must_run_in_local_mode(batch_plan: &BatchPlanRoot) -> bool {
422    SysTableVisitor::has_sys_table(batch_plan)
423}
424
425fn determine_query_mode(batch_plan: &BatchPlanRoot) -> QueryMode {
426    if ExecutionModeDecider::run_in_local_mode(batch_plan) {
427        QueryMode::Local
428    } else {
429        QueryMode::Distributed
430    }
431}
432
433pub struct BatchPlanFragmenterResult {
434    pub(crate) plan_fragmenter: BatchPlanFragmenter,
435    pub(crate) query_mode: QueryMode,
436    pub(crate) schema: Schema,
437    pub(crate) stmt_type: StatementType,
438}
439
440pub fn gen_batch_plan_fragmenter(
441    session: &SessionImpl,
442    plan_result: RwBatchQueryPlanResult,
443) -> Result<BatchPlanFragmenterResult> {
444    let RwBatchQueryPlanResult {
445        plan,
446        query_mode,
447        schema,
448        stmt_type,
449        ..
450    } = plan_result;
451
452    tracing::trace!(
453        "Generated query plan: {:?}, query_mode:{:?}",
454        plan.explain_to_string(),
455        query_mode
456    );
457    let worker_node_manager_reader = WorkerNodeSelector::new(
458        session.env().worker_node_manager_ref(),
459        session.is_barrier_read(),
460    );
461    let plan_fragmenter = BatchPlanFragmenter::new(
462        worker_node_manager_reader,
463        session.env().catalog_reader().clone(),
464        session.config().batch_parallelism().0,
465        plan,
466    )?;
467
468    Ok(BatchPlanFragmenterResult {
469        plan_fragmenter,
470        query_mode,
471        schema,
472        stmt_type,
473    })
474}
475
476pub async fn create_stream(
477    session: Arc<SessionImpl>,
478    plan_fragmenter_result: BatchPlanFragmenterResult,
479    formats: Vec<Format>,
480) -> Result<(PgResponseStream, Vec<PgFieldDescriptor>)> {
481    let BatchPlanFragmenterResult {
482        plan_fragmenter,
483        query_mode,
484        schema,
485        stmt_type,
486        ..
487    } = plan_fragmenter_result;
488
489    let mut can_timeout_cancel = true;
490    // Acquire the write guard for DML statements.
491    match stmt_type {
492        StatementType::INSERT
493        | StatementType::INSERT_RETURNING
494        | StatementType::DELETE
495        | StatementType::DELETE_RETURNING
496        | StatementType::UPDATE
497        | StatementType::UPDATE_RETURNING => {
498            session.txn_write_guard()?;
499            can_timeout_cancel = false;
500        }
501        _ => {}
502    }
503
504    let query = plan_fragmenter.generate_complete_query().await?;
505    tracing::trace!("Generated query after plan fragmenter: {:?}", &query);
506
507    let pg_descs = schema
508        .fields()
509        .iter()
510        .map(to_pg_field)
511        .collect::<Vec<PgFieldDescriptor>>();
512    let column_types = schema.fields().iter().map(|f| f.data_type()).collect_vec();
513
514    let row_stream = match query_mode {
515        QueryMode::Auto => unreachable!(),
516        QueryMode::Local => PgResponseStream::LocalQuery(DataChunkToRowSetAdapter::new(
517            local_execute(session.clone(), query, can_timeout_cancel).await?,
518            column_types,
519            formats,
520            session.clone(),
521        )),
522        // Local mode do not support cancel tasks.
523        QueryMode::Distributed => {
524            PgResponseStream::DistributedQuery(DataChunkToRowSetAdapter::new(
525                distribute_execute(session.clone(), query, can_timeout_cancel).await?,
526                column_types,
527                formats,
528                session.clone(),
529            ))
530        }
531    };
532
533    Ok((row_stream, pg_descs))
534}
535
536async fn execute_risingwave_plan(
537    session: Arc<SessionImpl>,
538    plan_fragmenter_result: BatchPlanFragmenterResult,
539    formats: Vec<Format>,
540) -> Result<RwPgResponse> {
541    // Used in counting row count.
542    let first_field_format = formats.first().copied().unwrap_or(Format::Text);
543    let query_mode = plan_fragmenter_result.query_mode;
544    let stmt_type = plan_fragmenter_result.stmt_type;
545
546    let query_start_time = Instant::now();
547    let (row_stream, pg_descs) =
548        create_stream(session.clone(), plan_fragmenter_result, formats).await?;
549
550    // We need to do some post work after the query is finished and before the `Complete` response
551    // it sent. This is achieved by the `callback` in `PgResponse`.
552    let callback = async move {
553        // Implicitly flush the writes.
554        if session.config().implicit_flush() && stmt_type.is_dml() {
555            do_flush(&session).await?;
556        }
557
558        // update some metrics
559        match query_mode {
560            QueryMode::Auto => unreachable!(),
561            QueryMode::Local => {
562                session
563                    .env()
564                    .frontend_metrics
565                    .latency_local_execution
566                    .observe(query_start_time.elapsed().as_secs_f64());
567
568                session
569                    .env()
570                    .frontend_metrics
571                    .query_counter_local_execution
572                    .inc();
573            }
574            QueryMode::Distributed => {
575                session
576                    .env()
577                    .query_manager()
578                    .query_metrics
579                    .query_latency
580                    .observe(query_start_time.elapsed().as_secs_f64());
581
582                session
583                    .env()
584                    .query_manager()
585                    .query_metrics
586                    .completed_query_counter
587                    .inc();
588            }
589        }
590
591        Ok(())
592    };
593
594    Ok(PgResponse::builder(stmt_type)
595        .row_cnt_format_opt(Some(first_field_format))
596        .values(row_stream, pg_descs)
597        .callback(callback)
598        .into())
599}
600
601pub async fn distribute_execute(
602    session: Arc<SessionImpl>,
603    query: Query,
604    can_timeout_cancel: bool,
605) -> Result<DistributedQueryStream> {
606    let timeout = if cfg!(madsim) {
607        None
608    } else if can_timeout_cancel {
609        Some(session.statement_timeout())
610    } else {
611        None
612    };
613    let execution_context: ExecutionContextRef =
614        ExecutionContext::new(session.clone(), timeout).into();
615    let query_manager = session.env().query_manager().clone();
616
617    query_manager
618        .schedule(execution_context, query)
619        .await
620        .map_err(|err| err.into())
621}
622
623pub async fn local_execute(
624    session: Arc<SessionImpl>,
625    mut query: Query,
626    can_timeout_cancel: bool,
627) -> Result<LocalQueryStream> {
628    let timeout = if cfg!(madsim) {
629        None
630    } else if can_timeout_cancel {
631        Some(session.statement_timeout())
632    } else {
633        None
634    };
635    let front_env = session.env();
636
637    let snapshot = session.pinned_snapshot();
638
639    snapshot.fill_batch_query_epoch(&mut query)?;
640
641    let execution = LocalQueryExecution::new(
642        query,
643        front_env.clone(),
644        snapshot.support_barrier_read(),
645        session,
646        timeout,
647    );
648
649    Ok(execution.stream_rows())
650}