1use std::collections::BTreeMap;
16use std::time::Duration;
17
18use anyhow::{Context, anyhow};
19use risingwave_common::bail;
20use risingwave_common::hash::VnodeCount;
21use risingwave_common::util::epoch::Epoch;
22use risingwave_meta_model::{
23 PrivateLinkService, connection, database, function, index, object, schema, secret, sink,
24 source, subscription, table, view,
25};
26use risingwave_meta_model_migration::{MigrationStatus, Migrator, MigratorTrait};
27use risingwave_pb::catalog::connection::PbInfo as PbConnectionInfo;
28use risingwave_pb::catalog::table::{CdcTableType as PbCdcTableType, PbEngine, PbTableType};
29use risingwave_pb::catalog::{
30 PbConnection, PbCreateType, PbDatabase, PbFunction, PbHandleConflictBehavior, PbIndex,
31 PbSchema, PbSecret, PbSink, PbSinkType, PbSource, PbStreamJobStatus, PbSubscription, PbTable,
32 PbView,
33};
34use sea_orm::{ConnectOptions, DatabaseConnection, DbBackend, ModelTrait};
35
36use crate::{MetaError, MetaResult, MetaStoreBackend};
37
38pub mod catalog;
39pub mod cluster;
40pub mod fragment;
41pub mod id;
42pub mod rename;
43pub mod scale;
44pub mod session_params;
45pub mod streaming_job;
46pub mod system_param;
47pub mod user;
48pub mod utils;
49
50impl From<sea_orm::DbErr> for MetaError {
52 fn from(err: sea_orm::DbErr) -> Self {
53 if let Some(err) = err.sql_err() {
54 return anyhow!(err).into();
55 }
56 anyhow!(err).into()
57 }
58}
59
60#[derive(Clone)]
61pub struct SqlMetaStore {
62 pub conn: DatabaseConnection,
63 pub endpoint: String,
64}
65
66impl SqlMetaStore {
67 pub async fn connect(backend: MetaStoreBackend) -> MetaResult<Self> {
69 const MAX_DURATION: Duration = Duration::new(u64::MAX / 4, 0);
70
71 #[easy_ext::ext]
72 impl ConnectOptions {
73 fn sqlite_common(&mut self) -> &mut Self {
75 self
76 .min_connections(1)
79 .max_connections(1)
80 .acquire_timeout(MAX_DURATION)
84 .connect_timeout(MAX_DURATION)
85 }
86 }
87
88 Ok(match backend {
89 MetaStoreBackend::Mem => {
90 const IN_MEMORY_STORE: &str = "sqlite::memory:";
91
92 let mut options = ConnectOptions::new(IN_MEMORY_STORE);
93
94 options
95 .sqlite_common()
96 .idle_timeout(MAX_DURATION)
101 .max_lifetime(MAX_DURATION);
102
103 let conn = sea_orm::Database::connect(options).await?;
104 Self {
105 conn,
106 endpoint: IN_MEMORY_STORE.to_owned(),
107 }
108 }
109 MetaStoreBackend::Sql { endpoint, config } => {
110 let mut options = ConnectOptions::new(endpoint.clone());
111 options
112 .max_connections(config.max_connections)
113 .min_connections(config.min_connections)
114 .connect_timeout(Duration::from_secs(config.connection_timeout_sec))
115 .idle_timeout(Duration::from_secs(config.idle_timeout_sec))
116 .acquire_timeout(Duration::from_secs(config.acquire_timeout_sec));
117
118 if DbBackend::Sqlite.is_prefix_of(&endpoint) {
119 if endpoint.contains(":memory:") || endpoint.contains("mode=memory") {
120 bail!(
121 "use the `mem` backend instead of specifying a URL of in-memory SQLite"
122 );
123 }
124 options.sqlite_common();
125 }
126
127 let conn = sea_orm::Database::connect(options).await?;
128 Self { conn, endpoint }
129 }
130 })
131 }
132
133 #[cfg(any(test, feature = "test"))]
134 pub async fn for_test() -> Self {
135 let this = Self::connect(MetaStoreBackend::Mem).await.unwrap();
136 Migrator::up(&this.conn, None).await.unwrap();
137 this
138 }
139
140 async fn is_first_launch(&self) -> MetaResult<bool> {
146 let migrations = Migrator::get_applied_migrations(&self.conn)
147 .await
148 .context("failed to get applied migrations")?;
149 for migration in migrations {
150 if migration.name() == "m20230908_072257_init"
151 && migration.status() == MigrationStatus::Applied
152 {
153 return Ok(false);
154 }
155 }
156 Ok(true)
157 }
158
159 pub async fn up(&self) -> MetaResult<bool> {
163 let cluster_first_launch = self.is_first_launch().await?;
164 Migrator::up(&self.conn, None)
166 .await
167 .context("failed to upgrade models in meta store")?;
168
169 Ok(cluster_first_launch)
170 }
171}
172
173pub struct ObjectModel<M: ModelTrait>(M, object::Model);
174
175impl From<ObjectModel<database::Model>> for PbDatabase {
176 fn from(value: ObjectModel<database::Model>) -> Self {
177 Self {
178 id: value.0.database_id,
179 name: value.0.name,
180 owner: value.1.owner_id as _,
181 resource_group: value.0.resource_group.clone(),
182 barrier_interval_ms: value.0.barrier_interval_ms.map(|v| v as u32),
183 checkpoint_frequency: value.0.checkpoint_frequency.map(|v| v as u64),
184 }
185 }
186}
187
188impl From<ObjectModel<secret::Model>> for PbSecret {
189 fn from(value: ObjectModel<secret::Model>) -> Self {
190 Self {
191 id: value.0.secret_id,
192 name: value.0.name,
193 database_id: value.1.database_id.unwrap(),
194 value: value.0.value,
195 owner: value.1.owner_id as _,
196 schema_id: value.1.schema_id.unwrap(),
197 }
198 }
199}
200
201impl From<ObjectModel<schema::Model>> for PbSchema {
202 fn from(value: ObjectModel<schema::Model>) -> Self {
203 Self {
204 id: value.0.schema_id,
205 name: value.0.name,
206 database_id: value.1.database_id.unwrap(),
207 owner: value.1.owner_id as _,
208 }
209 }
210}
211
212impl From<ObjectModel<table::Model>> for PbTable {
213 fn from(value: ObjectModel<table::Model>) -> Self {
214 Self {
215 id: value.0.table_id,
216 schema_id: value.1.schema_id.unwrap(),
217 database_id: value.1.database_id.unwrap(),
218 name: value.0.name,
219 columns: value.0.columns.to_protobuf(),
220 pk: value.0.pk.to_protobuf(),
221 table_type: PbTableType::from(value.0.table_type) as _,
222 distribution_key: value.0.distribution_key.0,
223 stream_key: value.0.stream_key.0,
224 append_only: value.0.append_only,
225 owner: value.1.owner_id as _,
226 fragment_id: value.0.fragment_id.unwrap_or_default(),
227 vnode_col_index: value.0.vnode_col_index.map(|index| index as _),
228 row_id_index: value.0.row_id_index.map(|index| index as _),
229 value_indices: value.0.value_indices.0,
230 definition: value.0.definition,
231 handle_pk_conflict_behavior: PbHandleConflictBehavior::from(
232 value.0.handle_pk_conflict_behavior,
233 ) as _,
234 version_column_indices: value
235 .0
236 .version_column_indices
237 .unwrap_or_default()
238 .0
239 .iter()
240 .map(|&idx| idx as u32)
241 .collect(),
242 read_prefix_len_hint: value.0.read_prefix_len_hint as _,
243 watermark_indices: value.0.watermark_indices.0,
244 dist_key_in_pk: value.0.dist_key_in_pk.0,
245 dml_fragment_id: value.0.dml_fragment_id,
246 cardinality: value
247 .0
248 .cardinality
249 .map(|cardinality| cardinality.to_protobuf()),
250 initialized_at_epoch: Some(
251 Epoch::from_unix_millis(value.1.initialized_at.and_utc().timestamp_millis() as _).0,
252 ),
253 created_at_epoch: Some(
254 Epoch::from_unix_millis(value.1.created_at.and_utc().timestamp_millis() as _).0,
255 ),
256 cleaned_by_watermark: value.0.cleaned_by_watermark,
257 stream_job_status: PbStreamJobStatus::Created as _,
258 create_type: PbCreateType::Foreground as _,
259 version: value.0.version.map(|v| v.to_protobuf()),
260 optional_associated_source_id: value.0.optional_associated_source_id.map(Into::into),
261 description: value.0.description,
262 #[expect(deprecated)]
263 incoming_sinks: vec![],
264 initialized_at_cluster_version: value.1.initialized_at_cluster_version,
265 created_at_cluster_version: value.1.created_at_cluster_version,
266 retention_seconds: value.0.retention_seconds.map(|id| id as u32),
267 cdc_table_id: value.0.cdc_table_id,
268 maybe_vnode_count: VnodeCount::set(value.0.vnode_count).to_protobuf(),
269 webhook_info: value.0.webhook_info.map(|info| info.to_protobuf()),
270 job_id: value.0.belongs_to_job_id,
271 engine: value.0.engine.map(|engine| PbEngine::from(engine) as i32),
272 clean_watermark_index_in_pk: value.0.clean_watermark_index_in_pk,
273 refreshable: value.0.refreshable,
274 vector_index_info: value.0.vector_index_info.map(|index| index.to_protobuf()),
275 cdc_table_type: value
276 .0
277 .cdc_table_type
278 .map(|cdc_type| PbCdcTableType::from(cdc_type) as i32),
279 }
280 }
281}
282
283impl From<ObjectModel<source::Model>> for PbSource {
284 fn from(value: ObjectModel<source::Model>) -> Self {
285 let mut secret_ref_map = BTreeMap::new();
286 if let Some(secret_ref) = value.0.secret_ref {
287 secret_ref_map = secret_ref.to_protobuf();
288 }
289 Self {
290 id: value.0.source_id as _,
291 schema_id: value.1.schema_id.unwrap(),
292 database_id: value.1.database_id.unwrap(),
293 name: value.0.name,
294 row_id_index: value.0.row_id_index.map(|id| id as _),
295 columns: value.0.columns.to_protobuf(),
296 pk_column_ids: value.0.pk_column_ids.0,
297 with_properties: value.0.with_properties.0,
298 owner: value.1.owner_id as _,
299 info: value.0.source_info.map(|info| info.to_protobuf()),
300 watermark_descs: value.0.watermark_descs.to_protobuf(),
301 definition: value.0.definition,
302 connection_id: value.0.connection_id,
303 initialized_at_epoch: Some(
305 Epoch::from_unix_millis(value.1.initialized_at.and_utc().timestamp_millis() as _).0,
306 ),
307 created_at_epoch: Some(
308 Epoch::from_unix_millis(value.1.created_at.and_utc().timestamp_millis() as _).0,
309 ),
310 version: value.0.version as _,
311 optional_associated_table_id: value.0.optional_associated_table_id.map(Into::into),
312 initialized_at_cluster_version: value.1.initialized_at_cluster_version,
313 created_at_cluster_version: value.1.created_at_cluster_version,
314 secret_refs: secret_ref_map,
315 rate_limit: value.0.rate_limit.map(|v| v as _),
316 refresh_mode: value
317 .0
318 .refresh_mode
319 .map(|refresh_mode| refresh_mode.to_protobuf()),
320 }
321 }
322}
323
324impl From<ObjectModel<sink::Model>> for PbSink {
325 fn from(value: ObjectModel<sink::Model>) -> Self {
326 let mut secret_ref_map = BTreeMap::new();
327 if let Some(secret_ref) = value.0.secret_ref {
328 secret_ref_map = secret_ref.to_protobuf();
329 }
330 Self {
331 id: value.0.sink_id as _,
332 schema_id: value.1.schema_id.unwrap(),
333 database_id: value.1.database_id.unwrap(),
334 name: value.0.name,
335 columns: value.0.columns.to_protobuf(),
336 plan_pk: value.0.plan_pk.to_protobuf(),
337 distribution_key: value.0.distribution_key.0,
338 downstream_pk: value.0.downstream_pk.0,
339 sink_type: PbSinkType::from(value.0.sink_type) as _,
340 owner: value.1.owner_id as _,
341 properties: value.0.properties.0,
342 definition: value.0.definition,
343 connection_id: value.0.connection_id,
344 initialized_at_epoch: Some(
345 Epoch::from_unix_millis(value.1.initialized_at.and_utc().timestamp_millis() as _).0,
346 ),
347 created_at_epoch: Some(
348 Epoch::from_unix_millis(value.1.created_at.and_utc().timestamp_millis() as _).0,
349 ),
350 db_name: value.0.db_name,
351 sink_from_name: value.0.sink_from_name,
352 stream_job_status: PbStreamJobStatus::Created as _,
353 format_desc: value.0.sink_format_desc.map(|desc| desc.to_protobuf()),
354 target_table: value.0.target_table,
355 initialized_at_cluster_version: value.1.initialized_at_cluster_version,
356 created_at_cluster_version: value.1.created_at_cluster_version,
357 create_type: PbCreateType::Foreground as _,
358 secret_refs: secret_ref_map,
359 original_target_columns: value
360 .0
361 .original_target_columns
362 .map(|cols| cols.to_protobuf())
363 .unwrap_or_default(),
364 auto_refresh_schema_from_table: value.0.auto_refresh_schema_from_table,
365 }
366 }
367}
368
369impl From<ObjectModel<subscription::Model>> for PbSubscription {
370 fn from(value: ObjectModel<subscription::Model>) -> Self {
371 Self {
372 id: value.0.subscription_id as _,
373 schema_id: value.1.schema_id.unwrap(),
374 database_id: value.1.database_id.unwrap(),
375 name: value.0.name,
376 owner: value.1.owner_id as _,
377 retention_seconds: value.0.retention_seconds as _,
378 definition: value.0.definition,
379 initialized_at_epoch: Some(
380 Epoch::from_unix_millis(value.1.initialized_at.and_utc().timestamp_millis() as _).0,
381 ),
382 created_at_epoch: Some(
383 Epoch::from_unix_millis(value.1.created_at.and_utc().timestamp_millis() as _).0,
384 ),
385 initialized_at_cluster_version: value.1.initialized_at_cluster_version,
386 created_at_cluster_version: value.1.created_at_cluster_version,
387 dependent_table_id: value.0.dependent_table_id,
388 subscription_state: value.0.subscription_state as _,
389 }
390 }
391}
392
393impl From<ObjectModel<index::Model>> for PbIndex {
394 fn from(value: ObjectModel<index::Model>) -> Self {
395 Self {
396 id: value.0.index_id as _,
397 schema_id: value.1.schema_id.unwrap(),
398 database_id: value.1.database_id.unwrap(),
399 name: value.0.name,
400 owner: value.1.owner_id as _,
401 index_table_id: value.0.index_table_id,
402 primary_table_id: value.0.primary_table_id,
403 index_item: value.0.index_items.to_protobuf(),
404 index_column_properties: value
405 .0
406 .index_column_properties
407 .map(|p| p.to_protobuf())
408 .unwrap_or_default(),
409 index_columns_len: value.0.index_columns_len as _,
410 initialized_at_epoch: Some(
411 Epoch::from_unix_millis(value.1.initialized_at.and_utc().timestamp_millis() as _).0,
412 ),
413 created_at_epoch: Some(
414 Epoch::from_unix_millis(value.1.created_at.and_utc().timestamp_millis() as _).0,
415 ),
416 stream_job_status: PbStreamJobStatus::Created as _,
417 initialized_at_cluster_version: value.1.initialized_at_cluster_version,
418 created_at_cluster_version: value.1.created_at_cluster_version,
419 create_type: risingwave_pb::catalog::CreateType::Foreground.into(), }
421 }
422}
423
424impl From<ObjectModel<view::Model>> for PbView {
425 fn from(value: ObjectModel<view::Model>) -> Self {
426 Self {
427 id: value.0.view_id as _,
428 schema_id: value.1.schema_id.unwrap(),
429 database_id: value.1.database_id.unwrap(),
430 name: value.0.name,
431 owner: value.1.owner_id as _,
432 properties: value.0.properties.0,
433 sql: value.0.definition,
434 columns: value.0.columns.to_protobuf(),
435 created_at_epoch: Some(
436 Epoch::from_unix_millis(value.1.created_at.and_utc().timestamp_millis() as _).0,
437 ),
438 created_at_cluster_version: value.1.created_at_cluster_version,
439 }
440 }
441}
442
443impl From<ObjectModel<connection::Model>> for PbConnection {
444 fn from(value: ObjectModel<connection::Model>) -> Self {
445 let info: PbConnectionInfo = if value.0.info == PrivateLinkService::default() {
446 PbConnectionInfo::ConnectionParams(value.0.params.to_protobuf())
447 } else {
448 PbConnectionInfo::PrivateLinkService(value.0.info.to_protobuf())
449 };
450 Self {
451 id: value.1.oid.as_connection_id(),
452 schema_id: value.1.schema_id.unwrap(),
453 database_id: value.1.database_id.unwrap(),
454 name: value.0.name,
455 owner: value.1.owner_id as _,
456 info: Some(info),
457 }
458 }
459}
460
461impl From<ObjectModel<function::Model>> for PbFunction {
462 fn from(value: ObjectModel<function::Model>) -> Self {
463 Self {
464 id: value.0.function_id as _,
465 schema_id: value.1.schema_id.unwrap(),
466 database_id: value.1.database_id.unwrap(),
467 name: value.0.name,
468 owner: value.1.owner_id as _,
469 arg_names: value.0.arg_names.split(',').map(|s| s.to_owned()).collect(),
470 arg_types: value.0.arg_types.to_protobuf(),
471 return_type: Some(value.0.return_type.to_protobuf()),
472 language: value.0.language,
473 runtime: value.0.runtime,
474 link: value.0.link,
475 name_in_runtime: value.0.name_in_runtime,
476 body: value.0.body,
477 compressed_binary: value.0.compressed_binary,
478 kind: Some(value.0.kind.into()),
479 always_retry_on_network_error: value.0.always_retry_on_network_error,
480 is_async: value
481 .0
482 .options
483 .as_ref()
484 .and_then(|o| o.0.get("async").map(|v| v == "true")),
485 is_batched: value
486 .0
487 .options
488 .as_ref()
489 .and_then(|o| o.0.get("batch").map(|v| v == "true")),
490 created_at_epoch: Some(
491 Epoch::from_unix_millis(value.1.created_at.and_utc().timestamp_millis() as _).0,
492 ),
493 created_at_cluster_version: value.1.created_at_cluster_version,
494 }
495 }
496}