pub struct CatalogController {
pub(crate) env: MetaSrvEnv,
pub(crate) inner: RwLock<CatalogControllerInner>,
}Expand description
CatalogController is the controller for catalog related operations, including database, schema, table, view, etc.
Fields§
§env: MetaSrvEnv§inner: RwLock<CatalogControllerInner>Implementations§
Source§impl CatalogController
impl CatalogController
async fn alter_database_name( &self, database_id: DatabaseId, name: &str, ) -> MetaResult<NotificationVersion>
async fn alter_schema_name( &self, schema_id: SchemaId, name: &str, ) -> MetaResult<NotificationVersion>
pub async fn alter_name( &self, object_type: ObjectType, object_id: impl Into<ObjectId>, object_name: &str, ) -> MetaResult<NotificationVersion>
pub async fn alter_swap_rename( &self, object_type: ObjectType, object_id: ObjectId, dst_object_id: ObjectId, ) -> MetaResult<NotificationVersion>
pub async fn alter_owner( &self, object_type: ObjectType, object_id: ObjectId, new_owner: UserId, ) -> MetaResult<NotificationVersion>
pub async fn alter_schema( &self, object_type: ObjectType, object_id: ObjectId, new_schema: SchemaId, ) -> MetaResult<NotificationVersion>
pub async fn alter_secret( &self, pb_secret: PbSecret, secret_plain_payload: Vec<u8>, ) -> MetaResult<NotificationVersion>
pub async fn drop_table_associated_source( txn: &DatabaseTransaction, drop_table_connector_ctx: &DropTableConnectorContext, ) -> MetaResult<(Vec<PbUserInfo>, Vec<PartialObject>)>
pub async fn alter_database_param( &self, database_id: DatabaseId, param: AlterDatabaseParam, ) -> MetaResult<(NotificationVersion, Model)>
pub async fn alter_streaming_job_config( &self, job_id: JobId, entries_to_add: HashMap<String, String>, keys_to_remove: Vec<String>, ) -> MetaResult<NotificationVersion>
pub async fn ensure_refresh_job(&self, table_id: TableId) -> MetaResult<()>
pub async fn update_refresh_job_status( &self, table_id: TableId, status: RefreshState, trigger_time: Option<DateTime>, is_success: bool, ) -> MetaResult<()>
pub async fn reset_all_refresh_jobs_to_idle(&self) -> MetaResult<()>
pub async fn update_refresh_job_interval( &self, table_id: TableId, trigger_interval_secs: Option<i64>, ) -> MetaResult<()>
Source§impl CatalogController
impl CatalogController
pub(crate) async fn create_object( txn: &DatabaseTransaction, obj_type: ObjectType, owner_id: UserId, database_id: Option<DatabaseId>, schema_id: Option<SchemaId>, ) -> MetaResult<Model>
pub async fn create_database( &self, db: PbDatabase, ) -> MetaResult<(NotificationVersion, Model)>
pub async fn create_schema( &self, schema: PbSchema, ) -> MetaResult<NotificationVersion>
pub async fn create_subscription_catalog( &self, pb_subscription: &mut PbSubscription, ) -> MetaResult<()>
pub async fn create_source( &self, pb_source: PbSource, ) -> MetaResult<(SourceId, NotificationVersion)>
pub async fn create_function( &self, pb_function: PbFunction, ) -> MetaResult<NotificationVersion>
pub async fn create_connection( &self, pb_connection: PbConnection, ) -> MetaResult<NotificationVersion>
pub async fn create_secret( &self, pb_secret: PbSecret, secret_plain_payload: Vec<u8>, ) -> MetaResult<NotificationVersion>
pub async fn create_view( &self, pb_view: PbView, dependencies: HashSet<ObjectId>, ) -> MetaResult<NotificationVersion>
pub async fn validate_cross_db_snapshot_backfill( &self, cross_db_snapshot_backfill_info: &SnapshotBackfillInfo, ) -> MetaResult<()>
Source§impl CatalogController
impl CatalogController
pub async fn drop_object( &self, object_type: ObjectType, object_id: impl Into<ObjectId>, drop_mode: DropMode, ) -> MetaResult<(ReleaseContext, NotificationVersion)>
pub async fn try_abort_creating_subscription( &self, subscription_id: SubscriptionId, ) -> MetaResult<()>
Source§impl CatalogController
impl CatalogController
pub async fn get_secret_by_id( &self, secret_id: SecretId, ) -> MetaResult<PbSecret>
pub async fn get_object_database_id( &self, object_id: impl Into<ObjectId>, ) -> MetaResult<DatabaseId>
pub async fn get_connection_by_id( &self, connection_id: ConnectionId, ) -> MetaResult<PbConnection>
pub async fn get_table_catalog_by_name( &self, database_id: DatabaseId, schema_id: SchemaId, name: &str, ) -> MetaResult<Option<PbTable>>
pub async fn get_table_by_name( &self, database_name: &str, table_name: &str, ) -> MetaResult<Option<PbTable>>
pub async fn get_table_associated_source_id( &self, table_id: TableId, ) -> MetaResult<Option<SourceId>>
pub async fn get_table_by_associate_source_id( &self, associated_source_id: SourceId, ) -> MetaResult<PbTable>
pub async fn get_table_by_id(&self, table_id: TableId) -> MetaResult<PbTable>
pub async fn get_user_created_table_by_ids( &self, job_ids: impl Iterator<Item = JobId>, ) -> MetaResult<Vec<PbTable>>
pub async fn get_user_created_table_by_ids_in_txn<C>(
&self,
txn: &C,
job_ids: impl Iterator<Item = JobId>,
) -> MetaResult<Vec<PbTable>>where
C: ConnectionTrait,
pub async fn get_table_by_ids( &self, table_ids: Vec<TableId>, include_dropped_table: bool, ) -> MetaResult<Vec<PbTable>>
pub async fn get_table_columns( &self, id: TableId, ) -> MetaResult<Vec<ColumnCatalog>>
pub async fn get_sink_by_id( &self, sink_id: SinkId, ) -> MetaResult<Option<PbSink>>
pub async fn get_sink_auto_refresh_schema_from( &self, table_id: TableId, ) -> MetaResult<Vec<PbSink>>
pub async fn get_sink_state_table_ids( &self, sink_id: SinkId, ) -> MetaResult<Vec<TableId>>
pub async fn get_subscription_by_id( &self, subscription_id: SubscriptionId, ) -> MetaResult<PbSubscription>
pub async fn get_mv_depended_subscriptions( &self, database_id: Option<DatabaseId>, ) -> MetaResult<HashMap<TableId, HashMap<SubscriptionId, u64>>>
pub async fn get_all_table_options( &self, ) -> MetaResult<HashMap<TableId, TableOption>>
pub async fn get_all_streaming_parallelisms( &self, ) -> MetaResult<HashMap<ObjectId, StreamingParallelism>>
pub async fn get_table_name_type_mapping( &self, ) -> MetaResult<HashMap<TableId, (String, String)>>
pub async fn get_table_by_cdc_table_id( &self, cdc_table_id: &String, ) -> MetaResult<Vec<PbTable>>
pub async fn get_created_table_ids(&self) -> MetaResult<Vec<TableId>>
Sourcepub async fn get_versioned_table_schemas(
&self,
) -> MetaResult<HashMap<TableId, Vec<i32>>>
pub async fn get_versioned_table_schemas( &self, ) -> MetaResult<HashMap<TableId, Vec<i32>>>
Returns column ids of versioned tables.
Being versioned implies using ColumnAwareSerde.
pub async fn get_existing_job_resource_group( &self, streaming_job_id: JobId, ) -> MetaResult<String>
pub async fn get_database_resource_group( &self, database_id: DatabaseId, ) -> MetaResult<String>
pub async fn get_existing_job_resource_groups( &self, streaming_job_ids: Vec<JobId>, ) -> MetaResult<HashMap<JobId, String>>
pub async fn get_existing_job_database_resource_group( &self, streaming_job_id: JobId, ) -> MetaResult<String>
pub async fn get_job_streaming_parallelisms( &self, streaming_job_id: JobId, ) -> MetaResult<StreamingParallelism>
pub async fn get_job_parallelisms( &self, streaming_job_id: JobId, ) -> MetaResult<(StreamingParallelism, Option<StreamingParallelism>)>
pub async fn get_fragment_streaming_job_id( &self, fragment_id: FragmentId, ) -> MetaResult<JobId>
pub async fn list_streaming_job_with_database( &self, ) -> MetaResult<HashMap<DatabaseId, Vec<JobId>>>
pub async fn list_table_objects( &self, ) -> MetaResult<Vec<(TableId, String, String, String, String, TableType)>>
pub async fn list_source_objects( &self, ) -> MetaResult<Vec<(TableId, String, String, String, String)>>
pub async fn list_sink_objects( &self, ) -> MetaResult<Vec<(TableId, String, String, String, String)>>
pub async fn list_relation_objects_by_ids( &self, ids: &HashSet<ObjectId>, ) -> MetaResult<Vec<(ObjectId, String, String, String, String)>>
pub async fn get_streaming_job_status( &self, streaming_job_id: JobId, ) -> MetaResult<JobStatus>
pub async fn get_streaming_job_extra_info( &self, job_ids: Vec<JobId>, ) -> MetaResult<HashMap<JobId, StreamingJobExtraInfo>>
pub async fn get_streaming_job_extra_info_in_txn<C>(
&self,
txn: &C,
job_ids: Vec<JobId>,
) -> MetaResult<HashMap<JobId, StreamingJobExtraInfo>>where
C: ConnectionTrait,
Source§impl CatalogController
impl CatalogController
pub async fn list_time_travel_table_ids(&self) -> MetaResult<Vec<TableId>>
pub async fn list_refresh_jobs(&self) -> MetaResult<Vec<Model>>
pub async fn get_refresh_job_state_by_table_id( &self, table_id: TableId, ) -> MetaResult<RefreshState>
pub async fn list_refreshable_table_ids(&self) -> MetaResult<Vec<TableId>>
pub async fn list_stream_job_desc_for_telemetry( &self, ) -> MetaResult<Vec<MetaTelemetryJobDesc>>
pub async fn list_background_creating_jobs( &self, include_initial: bool, database_id: Option<DatabaseId>, ) -> MetaResult<HashSet<JobId>>
pub async fn list_creating_jobs( &self, include_initial: bool, include_foreground: bool, database_id: Option<DatabaseId>, ) -> MetaResult<Vec<(JobId, String, DateTime, CreateType, bool)>>
pub async fn list_databases(&self) -> MetaResult<Vec<PbDatabase>>
pub async fn list_all_object_dependencies( &self, ) -> MetaResult<Vec<PbObjectDependency>>
pub async fn list_created_object_dependencies( &self, ) -> MetaResult<Vec<PbObjectDependency>>
pub async fn list_schemas(&self) -> MetaResult<Vec<PbSchema>>
Sourcepub async fn list_all_state_tables(&self) -> MetaResult<Vec<PbTable>>
pub async fn list_all_state_tables(&self) -> MetaResult<Vec<PbTable>>
Self::list_tables_by_type with all types.
pub async fn list_readonly_table_ids( &self, schema_id: SchemaId, ) -> MetaResult<Vec<TableId>>
pub async fn list_dml_table_ids( &self, schema_id: SchemaId, ) -> MetaResult<Vec<TableId>>
pub async fn list_view_ids( &self, schema_id: SchemaId, ) -> MetaResult<Vec<ViewId>>
Sourcepub async fn list_tables_by_type(
&self,
table_type: TableType,
) -> MetaResult<Vec<PbTable>>
pub async fn list_tables_by_type( &self, table_type: TableType, ) -> MetaResult<Vec<PbTable>>
Use Self::list_all_state_tables to get all types.
pub async fn list_sources(&self) -> MetaResult<Vec<PbSource>>
pub async fn list_connections(&self) -> MetaResult<Vec<PbConnection>>
pub async fn list_source_ids( &self, schema_id: SchemaId, ) -> MetaResult<Vec<SourceId>>
pub async fn list_indexes(&self) -> MetaResult<Vec<PbIndex>>
pub async fn list_sinks(&self) -> MetaResult<Vec<PbSink>>
pub async fn list_subscriptions(&self) -> MetaResult<Vec<PbSubscription>>
pub async fn list_views(&self) -> MetaResult<Vec<PbView>>
pub async fn list_users(&self) -> MetaResult<Vec<PbUserInfo>>
pub async fn list_functions(&self) -> MetaResult<Vec<PbFunction>>
Sourcepub async fn list_unmigrated_tables(&self) -> MetaResult<Vec<PbTable>>
pub async fn list_unmigrated_tables(&self) -> MetaResult<Vec<PbTable>>
Unmigrated refers to table-fragments that have not yet been migrated to the new plan (for now, this means
table-fragments that do not use UpstreamSinkUnion operator to receive multiple upstream sinks)
pub async fn list_sink_ids( &self, database_id: Option<DatabaseId>, ) -> MetaResult<Vec<SinkId>>
Source§impl CatalogController
impl CatalogController
pub(crate) async fn init(&self) -> MetaResult<()>
Sourcepub(crate) async fn table_catalog_cdc_table_id_update(&self) -> MetaResult<()>
pub(crate) async fn table_catalog_cdc_table_id_update(&self) -> MetaResult<()>
Fill in the cdc_table_id field for Table with empty cdc_table_id and parent Source job.
NOTES: We assume Table with a parent Source job is a CDC table
pub(crate) async fn log_cleaned_dirty_jobs( &self, dirty_objs: &[PartialObject], txn: &DatabaseTransaction, ) -> MetaResult<()>
pub(crate) async fn clean_dirty_sink_downstreams( txn: &DatabaseTransaction, ) -> MetaResult<()>
pub async fn has_any_streaming_jobs(&self) -> MetaResult<bool>
pub async fn find_creating_streaming_job_ids( &self, infos: Vec<PbCreatingJobInfo>, ) -> MetaResult<Vec<ObjectId>>
Source§impl CatalogController
impl CatalogController
pub async fn new(env: MetaSrvEnv) -> MetaResult<Self>
Sourcepub async fn get_inner_read_guard(
&self,
) -> RwLockReadGuard<'_, CatalogControllerInner>
pub async fn get_inner_read_guard( &self, ) -> RwLockReadGuard<'_, CatalogControllerInner>
Used in NotificationService::subscribe.
Need to pay attention to the order of acquiring locks to prevent deadlock problems.
pub async fn get_inner_write_guard( &self, ) -> RwLockWriteGuard<'_, CatalogControllerInner>
Source§impl CatalogController
impl CatalogController
pub(crate) async fn notify_frontend( &self, operation: NotificationOperation, info: NotificationInfo, ) -> NotificationVersion
pub(crate) async fn notify_frontend_relation_info( &self, operation: NotificationOperation, relation_info: PbObjectInfo, ) -> NotificationVersion
Sourcepub(crate) async fn notify_frontend_trivial(&self) -> NotificationVersion
pub(crate) async fn notify_frontend_trivial(&self) -> NotificationVersion
Trivially advance the notification version and notify to frontend, return the notification version for frontend to wait for.
Cannot simply return the current version, because the current version may not be sent to frontend, and the frontend may endlessly wait for this version, until a frontend related notification is sent.
Source§impl CatalogController
impl CatalogController
pub async fn finish_create_subscription_catalog( &self, subscription_id: SubscriptionId, ) -> MetaResult<()>
pub async fn notify_create_subscription( &self, subscription_id: SubscriptionId, ) -> MetaResult<NotificationVersion>
pub async fn get_connector_usage(&self) -> MetaResult<Value>
pub async fn clean_dirty_subscription( &self, database_id: Option<DatabaseId>, ) -> MetaResult<()>
Sourcepub async fn clean_dirty_creating_jobs(
&self,
database_id: Option<DatabaseId>,
) -> MetaResult<Vec<SourceId>>
pub async fn clean_dirty_creating_jobs( &self, database_id: Option<DatabaseId>, ) -> MetaResult<Vec<SourceId>>
clean_dirty_creating_jobs cleans up creating jobs that are creating in Foreground mode or in Initial status.
pub async fn comment_on( &self, comment: PbComment, ) -> MetaResult<NotificationVersion>
async fn notify_hummock_dropped_tables(&self, tables: Vec<PbTable>)
pub async fn complete_dropped_tables( &self, table_ids: impl IntoIterator<Item = TableId>, )
pub async fn cleanup_dropped_tables(&self)
pub async fn stats(&self) -> MetaResult<CatalogStats>
pub async fn fetch_sink_with_state_table_ids( &self, sink_ids: HashSet<SinkId>, ) -> MetaResult<HashMap<SinkId, Vec<TableId>>>
pub async fn list_all_pending_sinks( &self, database_id: Option<DatabaseId>, ) -> MetaResult<HashSet<SinkId>>
pub async fn abort_pending_sink_epochs( &self, sink_committed_epoch: HashMap<SinkId, u64>, ) -> MetaResult<()>
Source§impl CatalogController
impl CatalogController
pub fn prepare_fragment_models_from_fragments( job_id: JobId, fragments: impl Iterator<Item = &Fragment>, ) -> MetaResult<Vec<Model>>
pub fn prepare_fragment_model_for_new_job( job_id: JobId, fragment: &Fragment, ) -> MetaResult<Model>
fn compose_table_fragments( job_id: JobId, state: PbState, ctx: StreamContext, fragments: Vec<(Model, Vec<ActorInfo>)>, parallelism: StreamingParallelism, max_parallelism: usize, job_definition: Option<String>, ) -> MetaResult<StreamJobFragments>
fn compose_fragment( fragment: Model, actors: Vec<ActorInfo>, job_definition: Option<String>, ) -> MetaResult<(Fragment, HashMap<ActorId, PbActorStatus>, HashMap<ActorId, PbConnectorSplits>)>
pub fn running_fragment_parallelisms( &self, id_filter: Option<HashSet<FragmentId>>, ) -> MetaResult<HashMap<FragmentId, FragmentParallelismInfo>>
pub async fn fragment_job_mapping( &self, ) -> MetaResult<HashMap<FragmentId, JobId>>
pub async fn get_fragment_job_id( &self, fragment_ids: Vec<FragmentId>, ) -> MetaResult<Vec<ObjectId>>
pub async fn get_fragment_desc_by_id( &self, fragment_id: FragmentId, ) -> MetaResult<Option<(FragmentDesc, Vec<FragmentId>)>>
pub async fn list_fragment_database_ids( &self, select_fragment_ids: Option<Vec<FragmentId>>, ) -> MetaResult<Vec<(FragmentId, DatabaseId)>>
pub async fn get_job_fragments_by_id( &self, job_id: JobId, ) -> MetaResult<StreamJobFragments>
pub async fn get_fragment_actor_dispatchers( &self, fragment_ids: Vec<FragmentId>, ) -> MetaResult<FragmentActorDispatchers>
pub async fn get_fragment_actor_dispatchers_txn( &self, c: &impl ConnectionTrait, fragment_ids: Vec<FragmentId>, ) -> MetaResult<FragmentActorDispatchers>
pub async fn get_fragment_downstream_relations( &self, fragment_ids: Vec<FragmentId>, ) -> MetaResult<FragmentDownstreamRelation>
pub async fn get_fragment_downstream_relations_in_txn<C>(
&self,
txn: &C,
fragment_ids: Vec<FragmentId>,
) -> MetaResult<FragmentDownstreamRelation>where
C: ConnectionTrait + StreamTrait + Send,
pub async fn get_job_fragment_backfill_scan_type( &self, job_id: JobId, ) -> MetaResult<HashMap<FragmentId, PbStreamScanType>>
pub async fn count_streaming_jobs(&self) -> MetaResult<usize>
pub async fn list_streaming_job_infos( &self, ) -> MetaResult<Vec<StreamingJobInfo>>
pub async fn get_max_parallelism_by_id( &self, job_id: JobId, ) -> MetaResult<usize>
Sourcepub async fn get_job_internal_table_ids(
&self,
) -> Option<Vec<(JobId, Vec<TableId>)>>
pub async fn get_job_internal_table_ids( &self, ) -> Option<Vec<(JobId, Vec<TableId>)>>
Try to get internal table ids of each streaming job, used by metrics collection.
pub async fn has_any_running_jobs(&self) -> MetaResult<bool>
pub fn worker_actor_count(&self) -> MetaResult<HashMap<WorkerId, usize>>
fn collect_fragment_actor_map( &self, fragment_ids: &[FragmentId], stream_context: StreamContext, ) -> MetaResult<HashMap<FragmentId, Vec<ActorInfo>>>
fn collect_fragment_actor_pairs( &self, fragments: Vec<Model>, stream_context: StreamContext, ) -> MetaResult<Vec<(Model, Vec<ActorInfo>)>>
pub async fn table_fragments( &self, ) -> MetaResult<BTreeMap<JobId, StreamJobFragments>>
pub async fn upstream_fragments( &self, fragment_ids: impl Iterator<Item = FragmentId>, ) -> MetaResult<HashMap<FragmentId, HashSet<FragmentId>>>
pub fn list_actor_locations(&self) -> MetaResult<Vec<PartialActorLocation>>
pub async fn list_actor_info( &self, ) -> MetaResult<Vec<(ActorId, FragmentId, ObjectId, SchemaId, ObjectType)>>
pub fn get_worker_slot_mappings(&self) -> Vec<PbFragmentWorkerSlotMapping> ⓘ
pub async fn list_fragment_descs_with_node( &self, is_creating: bool, ) -> MetaResult<Vec<(FragmentDistribution, Vec<FragmentId>)>>
pub async fn list_fragment_descs_without_node( &self, is_creating: bool, ) -> MetaResult<Vec<(FragmentDistribution, Vec<FragmentId>)>>
fn build_fragment_query(is_creating: bool) -> Select<Entity>
async fn build_fragment_distributions( &self, txn: &DatabaseTransaction, rows: Vec<FragmentDescRow>, ) -> MetaResult<Vec<(FragmentDistribution, Vec<FragmentId>)>>
pub async fn list_sink_log_store_tables( &self, ) -> MetaResult<Vec<(SinkId, TableId)>>
Sourcefn collect_root_fragment_mapping(
ensembles: Vec<NoShuffleEnsemble>,
) -> HashMap<FragmentId, Vec<FragmentId>>
fn collect_root_fragment_mapping( ensembles: Vec<NoShuffleEnsemble>, ) -> HashMap<FragmentId, Vec<FragmentId>>
Build a fragment-to-root lookup for all reported root fragment ensembles.
fn format_fragment_parallelism_policy( distribution_type: DistributionType, fragment_parallelism: Option<&StreamingParallelism>, job_parallelism: Option<&StreamingParallelism>, root_fragments: &[FragmentId], ) -> String
fn format_streaming_parallelism(parallelism: &StreamingParallelism) -> String
pub async fn list_sink_actor_mapping( &self, ) -> MetaResult<HashMap<SinkId, (String, Vec<ActorId>)>>
pub async fn list_fragment_state_tables( &self, ) -> MetaResult<Vec<PartialFragmentStateTables>>
Sourcepub async fn load_all_actors_dynamic(
&self,
database_id: Option<DatabaseId>,
worker_nodes: &ActiveStreamingWorkerNodes,
) -> MetaResult<FragmentRenderMap>
pub async fn load_all_actors_dynamic( &self, database_id: Option<DatabaseId>, worker_nodes: &ActiveStreamingWorkerNodes, ) -> MetaResult<FragmentRenderMap>
Used in crate::barrier::GlobalBarrierManager, load all running actor that need to be sent or
collected
Sourcepub async fn load_fragment_context(
&self,
database_id: Option<DatabaseId>,
) -> MetaResult<LoadedFragmentContext>
pub async fn load_fragment_context( &self, database_id: Option<DatabaseId>, ) -> MetaResult<LoadedFragmentContext>
Async load stage: collects all metadata required for rendering actor assignments.
pub async fn load_fragment_context_in_txn<C>(
&self,
txn: &C,
database_id: Option<DatabaseId>,
) -> MetaResult<LoadedFragmentContext>where
C: ConnectionTrait,
pub async fn fill_snapshot_backfill_epoch( &self, fragment_ids: impl Iterator<Item = FragmentId>, snapshot_backfill_info: Option<&SnapshotBackfillInfo>, cross_db_snapshot_backfill_info: &SnapshotBackfillInfo, ) -> MetaResult<()>
Sourcepub fn get_running_actors_of_fragment(
&self,
fragment_id: FragmentId,
) -> MetaResult<HashSet<ActorId>>
pub fn get_running_actors_of_fragment( &self, fragment_id: FragmentId, ) -> MetaResult<HashSet<ActorId>>
Get the actor ids of the fragment with fragment_id with Running status.
Sourcepub async fn get_running_actors_for_source_backfill(
&self,
source_backfill_fragment_id: FragmentId,
source_fragment_id: FragmentId,
) -> MetaResult<Vec<(ActorId, ActorId)>>
pub async fn get_running_actors_for_source_backfill( &self, source_backfill_fragment_id: FragmentId, source_fragment_id: FragmentId, ) -> MetaResult<Vec<(ActorId, ActorId)>>
Get the actor ids, and each actor’s upstream source actor ids of the fragment with fragment_id with Running status.
(backfill_actor_id, upstream_source_actor_id)
Sourcepub async fn get_root_fragments(
&self,
job_ids: Vec<JobId>,
) -> MetaResult<(HashMap<JobId, (SharedFragmentInfo, PbStreamNode)>, HashMap<ActorId, WorkerId>)>
pub async fn get_root_fragments( &self, job_ids: Vec<JobId>, ) -> MetaResult<(HashMap<JobId, (SharedFragmentInfo, PbStreamNode)>, HashMap<ActorId, WorkerId>)>
Get and filter the “root” fragments of the specified jobs.
The root fragment is the bottom-most fragment of its fragment graph, and can be a MView or a Source.
Root fragment connects to downstream jobs.
§What can be the root fragment
- For sink, it should have one
Sinkfragment. - For MV, it should have one
MViewfragment. - For table, it should have one
MViewfragment and one or twoSourcefragments.MViewshould be the root. - For source, it should have one
Sourcefragment.
In other words, it’s the MView or Sink fragment if it exists, otherwise it’s the Source fragment.
pub async fn get_root_fragment( &self, job_id: JobId, ) -> MetaResult<(SharedFragmentInfo, HashMap<ActorId, WorkerId>)>
Sourcepub async fn get_downstream_fragments(
&self,
job_id: JobId,
) -> MetaResult<(Vec<(DispatcherType, SharedFragmentInfo, PbStreamNode)>, HashMap<ActorId, WorkerId>)>
pub async fn get_downstream_fragments( &self, job_id: JobId, ) -> MetaResult<(Vec<(DispatcherType, SharedFragmentInfo, PbStreamNode)>, HashMap<ActorId, WorkerId>)>
Get the downstream fragments connected to the specified job.
pub async fn load_source_fragment_ids( &self, ) -> MetaResult<HashMap<SourceId, BTreeSet<FragmentId>>>
pub async fn load_backfill_fragment_ids( &self, ) -> MetaResult<HashMap<SourceId, BTreeSet<(FragmentId, FragmentId)>>>
pub async fn get_all_upstream_sink_infos( &self, target_table: &PbTable, target_fragment_id: FragmentId, ) -> MetaResult<Vec<UpstreamSinkInfo>>
pub async fn get_all_upstream_sink_infos_in_txn<C>(
&self,
txn: &C,
target_table: &PbTable,
target_fragment_id: FragmentId,
) -> MetaResult<Vec<UpstreamSinkInfo>>where
C: ConnectionTrait,
pub async fn get_mview_fragment_by_id( &self, job_id: JobId, ) -> MetaResult<FragmentId>
pub async fn has_table_been_migrated( &self, table_id: TableId, ) -> MetaResult<bool>
pub async fn update_fragment_splits<C>(
&self,
txn: &C,
fragment_splits: &HashMap<FragmentId, Vec<SplitImpl>>,
) -> MetaResult<()>where
C: ConnectionTrait,
Source§impl CatalogController
impl CatalogController
pub async fn create_streaming_job_obj( txn: &DatabaseTransaction, obj_type: ObjectType, owner_id: UserId, database_id: Option<DatabaseId>, schema_id: Option<SchemaId>, create_type: PbCreateType, ctx: StreamContext, streaming_parallelism: StreamingParallelism, max_parallelism: usize, resource_type: ResourceType, backfill_parallelism: Option<StreamingParallelism>, ) -> MetaResult<JobId>
Sourcepub async fn create_job_catalog(
&self,
streaming_job: &mut StreamingJob,
ctx: &StreamContext,
parallelism: &Option<Parallelism>,
max_parallelism: usize,
dependencies: HashSet<ObjectId>,
resource_type: ResourceType,
backfill_parallelism: &Option<Parallelism>,
) -> MetaResult<()>
pub async fn create_job_catalog( &self, streaming_job: &mut StreamingJob, ctx: &StreamContext, parallelism: &Option<Parallelism>, max_parallelism: usize, dependencies: HashSet<ObjectId>, resource_type: ResourceType, backfill_parallelism: &Option<Parallelism>, ) -> MetaResult<()>
Create catalogs for the streaming job, then notify frontend about them if the job is a materialized view.
Some of the fields in the given streaming job are placeholders, which will
be updated later in prepare_streaming_job and notify again in finish_streaming_job.
Sourcepub async fn create_internal_table_catalog(
&self,
job: &StreamingJob,
incomplete_internal_tables: Vec<PbTable>,
) -> MetaResult<HashMap<TableId, TableId>>
pub async fn create_internal_table_catalog( &self, job: &StreamingJob, incomplete_internal_tables: Vec<PbTable>, ) -> MetaResult<HashMap<TableId, TableId>>
Create catalogs for internal tables, then notify frontend about them if the job is a materialized view.
Some of the fields in the given “incomplete” internal tables are placeholders, which will
be updated later in prepare_streaming_job and notify again in finish_streaming_job.
Returns a mapping from the temporary table id to the actual global table id.
pub async fn prepare_stream_job_fragments( &self, stream_job_fragments: &StreamJobFragmentsToCreate, streaming_job: &StreamingJob, for_replace: bool, backfill_orders: Option<BackfillOrders>, ) -> MetaResult<()>
Sourcepub async fn prepare_streaming_job<'a, I: Iterator<Item = &'a Fragment> + 'a>(
&self,
job_id: JobId,
get_fragments: impl Fn() -> I + 'a,
downstreams: &FragmentDownstreamRelation,
for_replace: bool,
creating_streaming_job: Option<&'a StreamingJob>,
backfill_orders: Option<BackfillOrders>,
) -> MetaResult<()>
pub async fn prepare_streaming_job<'a, I: Iterator<Item = &'a Fragment> + 'a>( &self, job_id: JobId, get_fragments: impl Fn() -> I + 'a, downstreams: &FragmentDownstreamRelation, for_replace: bool, creating_streaming_job: Option<&'a StreamingJob>, backfill_orders: Option<BackfillOrders>, ) -> MetaResult<()>
Insert fragments and actors to meta store. Used both for creating new jobs and replacing jobs.
Sourcepub async fn build_cancel_command(
&self,
table_fragments: &StreamJobFragments,
) -> MetaResult<Command>
pub async fn build_cancel_command( &self, table_fragments: &StreamJobFragments, ) -> MetaResult<Command>
Builds a cancel command for the streaming job. If the sink (with target table) needs to be dropped, additional information is required to build barrier mutation.
Sourcepub async fn try_abort_creating_streaming_job(
&self,
job_id: JobId,
is_cancelled: bool,
) -> MetaResult<(bool, Option<DatabaseId>)>
pub async fn try_abort_creating_streaming_job( &self, job_id: JobId, is_cancelled: bool, ) -> MetaResult<(bool, Option<DatabaseId>)>
try_abort_creating_streaming_job is used to abort the job that is under initial status or in FOREGROUND mode.
It returns (true, ) if the job is not found or aborted.
It returns (, Some(database_id)) is the database_id of the job_id exists
pub async fn post_collect_job_fragments( &self, job_id: JobId, upstream_fragment_new_downstreams: &FragmentDownstreamRelation, new_sink_downstream: Option<FragmentDownstreamRelation>, split_assignment: Option<&SplitAssignment>, ) -> MetaResult<()>
pub async fn create_job_catalog_for_replace( &self, streaming_job: &StreamingJob, ctx: Option<&StreamContext>, specified_parallelism: Option<&NonZeroUsize>, expected_original_max_parallelism: Option<usize>, ) -> MetaResult<JobId>
Sourcepub async fn finish_streaming_job(&self, job_id: JobId) -> MetaResult<()>
pub async fn finish_streaming_job(&self, job_id: JobId) -> MetaResult<()>
finish_streaming_job marks job related objects as Created and notify frontend.
Sourcepub async fn finish_streaming_job_inner(
&self,
txn: &DatabaseTransaction,
job_id: JobId,
) -> MetaResult<(Operation, Vec<Object>, Vec<PbUserInfo>, Vec<PbObjectDependency>)>
pub async fn finish_streaming_job_inner( &self, txn: &DatabaseTransaction, job_id: JobId, ) -> MetaResult<(Operation, Vec<Object>, Vec<PbUserInfo>, Vec<PbObjectDependency>)>
finish_streaming_job marks job related objects as Created and notify frontend.
pub async fn finish_replace_streaming_job( &self, tmp_id: JobId, streaming_job: StreamingJob, replace_upstream: FragmentReplaceUpstream, sink_into_table_context: SinkIntoTableContext, drop_table_connector_ctx: Option<&DropTableConnectorContext>, auto_refresh_schema_sinks: Option<Vec<FinishAutoRefreshSchemaSinkContext>>, ) -> MetaResult<NotificationVersion>
pub async fn finish_replace_streaming_job_inner( tmp_id: JobId, replace_upstream: FragmentReplaceUpstream, __arg2: SinkIntoTableContext, txn: &DatabaseTransaction, streaming_job: StreamingJob, drop_table_connector_ctx: Option<&DropTableConnectorContext>, auto_refresh_schema_sinks: Option<Vec<FinishAutoRefreshSchemaSinkContext>>, ) -> MetaResult<(Vec<PbObject>, Option<(Vec<PbUserInfo>, Vec<PartialObject>)>)>
Sourcepub async fn try_abort_replacing_streaming_job(
&self,
tmp_job_id: JobId,
tmp_sink_ids: Option<Vec<ObjectId>>,
) -> MetaResult<()>
pub async fn try_abort_replacing_streaming_job( &self, tmp_job_id: JobId, tmp_sink_ids: Option<Vec<ObjectId>>, ) -> MetaResult<()>
Abort the replacing streaming job by deleting the temporary job object.
pub async fn update_source_rate_limit_by_source_id( &self, source_id: SourceId, rate_limit: Option<u32>, ) -> MetaResult<(HashSet<JobId>, HashSet<FragmentId>)>
pub async fn mutate_fragments_by_job_id( &self, job_id: JobId, fragments_mutation_fn: impl FnMut(FragmentTypeMask, &mut PbStreamNode) -> MetaResult<bool>, err_msg: &'static str, ) -> MetaResult<HashSet<FragmentId>>
async fn mutate_fragment_by_fragment_id( &self, fragment_id: FragmentId, fragment_mutation_fn: impl FnMut(FragmentTypeMask, &mut PbStreamNode) -> bool, err_msg: &'static str, ) -> MetaResult<()>
pub async fn update_backfill_orders_by_job_id( &self, job_id: JobId, backfill_orders: Option<BackfillOrders>, ) -> MetaResult<()>
pub async fn update_backfill_rate_limit_by_job_id( &self, job_id: JobId, rate_limit: Option<u32>, ) -> MetaResult<HashSet<FragmentId>>
pub async fn update_sink_rate_limit_by_job_id( &self, sink_id: SinkId, rate_limit: Option<u32>, ) -> MetaResult<HashSet<FragmentId>>
pub async fn update_dml_rate_limit_by_job_id( &self, job_id: JobId, rate_limit: Option<u32>, ) -> MetaResult<HashSet<FragmentId>>
pub async fn update_source_props_by_source_id( &self, source_id: SourceId, alter_props: BTreeMap<String, String>, alter_secret_refs: BTreeMap<String, PbSecretRef>, skip_alter_on_fly_check: bool, ) -> MetaResult<WithOptionsSecResolved>
pub async fn update_sink_props_by_sink_id( &self, sink_id: SinkId, props: BTreeMap<String, String>, ) -> MetaResult<HashMap<String, String>>
pub async fn update_iceberg_table_props_by_table_id( &self, table_id: TableId, props: BTreeMap<String, String>, alter_iceberg_table_props: Option<PbExtraOptions>, ) -> MetaResult<(HashMap<String, String>, SinkId)>
Sourcepub async fn update_connection_and_dependent_objects_props(
&self,
connection_id: ConnectionId,
alter_props: BTreeMap<String, String>,
alter_secret_refs: BTreeMap<String, PbSecretRef>,
) -> MetaResult<(WithOptionsSecResolved, Vec<(SourceId, HashMap<String, String>)>, Vec<(SinkId, HashMap<String, String>)>)>
pub async fn update_connection_and_dependent_objects_props( &self, connection_id: ConnectionId, alter_props: BTreeMap<String, String>, alter_secret_refs: BTreeMap<String, PbSecretRef>, ) -> MetaResult<(WithOptionsSecResolved, Vec<(SourceId, HashMap<String, String>)>, Vec<(SinkId, HashMap<String, String>)>)>
Update connection properties and all dependent sources/sinks in a single transaction
pub async fn update_fragment_rate_limit_by_fragment_id( &self, fragment_id: FragmentId, rate_limit: Option<u32>, ) -> MetaResult<()>
Sourcepub async fn list_rate_limits(&self) -> MetaResult<Vec<RateLimitInfo>>
pub async fn list_rate_limits(&self) -> MetaResult<Vec<RateLimitInfo>>
Note: FsFetch created in old versions are not included.
Since this is only used for debugging, it should be fine.
Source§impl CatalogController
impl CatalogController
pub(crate) async fn notify_users_update( &self, user_infos: Vec<PbUserInfo>, ) -> NotificationVersion
pub async fn create_user( &self, pb_user: PbUserInfo, ) -> MetaResult<NotificationVersion>
pub async fn update_user( &self, update_user: PbUserInfo, update_fields: &[PbUpdateField], ) -> MetaResult<NotificationVersion>
pub async fn drop_user( &self, user_id: UserId, ) -> MetaResult<NotificationVersion>
pub async fn grant_privilege( &self, user_ids: Vec<UserId>, new_grant_privileges: &[PbGrantPrivilege], grantor: UserId, with_grant_option: bool, ) -> MetaResult<NotificationVersion>
pub async fn revoke_privilege( &self, user_ids: Vec<UserId>, revoke_grant_privileges: &[PbGrantPrivilege], granted_by: UserId, revoke_by: UserId, revoke_grant_option: bool, cascade: bool, ) -> MetaResult<NotificationVersion>
pub async fn grant_default_privileges( &self, user_ids: Vec<UserId>, database_id: DatabaseId, schema_ids: Vec<SchemaId>, grantor: UserId, actions: Vec<PbAction>, object_type: PbObjectType, grantees: Vec<UserId>, with_grant_option: bool, ) -> MetaResult<()>
pub async fn revoke_default_privileges( &self, user_ids: Vec<UserId>, database_id: DatabaseId, schema_ids: Vec<SchemaId>, actions: Vec<PbAction>, object_type: PbObjectType, grantees: Vec<UserId>, revoke_grant_option: bool, ) -> MetaResult<()>
Auto Trait Implementations§
impl !Freeze for CatalogController
impl !RefUnwindSafe for CatalogController
impl Send for CatalogController
impl Sync for CatalogController
impl Unpin for CatalogController
impl !UnwindSafe for CatalogController
Blanket Implementations§
§impl<T> AsAny for T
impl<T> AsAny for T
§fn any_ref(&self) -> &(dyn Any + Sync + Send + 'static)
fn any_ref(&self) -> &(dyn Any + Sync + Send + 'static)
dyn Any reference to the object: Read more§fn as_any(self: Arc<T>) -> Arc<dyn Any + Sync + Send>
fn as_any(self: Arc<T>) -> Arc<dyn Any + Sync + Send>
Arc<dyn Any> reference to the object: Read more§fn into_any(self: Box<T>) -> Box<dyn Any + Sync + Send>
fn into_any(self: Box<T>) -> Box<dyn Any + Sync + Send>
Box<dyn Any>: Read more§fn type_name(&self) -> &'static str
fn type_name(&self) -> &'static str
std::any::type_name, since Any does not provide it and
Any::type_id is useless as a debugging aid (its Debug is just a mess of hex digits).Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
§impl<T> Conv for T
impl<T> Conv for T
§impl<T> Downcast for Twhere
T: AsAny + ?Sized,
impl<T> Downcast for Twhere
T: AsAny + ?Sized,
§fn downcast_ref<T>(&self) -> Option<&T>where
T: AsAny,
fn downcast_ref<T>(&self) -> Option<&T>where
T: AsAny,
Any.§fn downcast_mut<T>(&mut self) -> Option<&mut T>where
T: AsAny,
fn downcast_mut<T>(&mut self) -> Option<&mut T>where
T: AsAny,
Any.§impl<T> FmtForward for T
impl<T> FmtForward for T
§fn fmt_binary(self) -> FmtBinary<Self>where
Self: Binary,
fn fmt_binary(self) -> FmtBinary<Self>where
Self: Binary,
self to use its Binary implementation when Debug-formatted.§fn fmt_display(self) -> FmtDisplay<Self>where
Self: Display,
fn fmt_display(self) -> FmtDisplay<Self>where
Self: Display,
self to use its Display implementation when
Debug-formatted.§fn fmt_lower_exp(self) -> FmtLowerExp<Self>where
Self: LowerExp,
fn fmt_lower_exp(self) -> FmtLowerExp<Self>where
Self: LowerExp,
self to use its LowerExp implementation when
Debug-formatted.§fn fmt_lower_hex(self) -> FmtLowerHex<Self>where
Self: LowerHex,
fn fmt_lower_hex(self) -> FmtLowerHex<Self>where
Self: LowerHex,
self to use its LowerHex implementation when
Debug-formatted.§fn fmt_octal(self) -> FmtOctal<Self>where
Self: Octal,
fn fmt_octal(self) -> FmtOctal<Self>where
Self: Octal,
self to use its Octal implementation when Debug-formatted.§fn fmt_pointer(self) -> FmtPointer<Self>where
Self: Pointer,
fn fmt_pointer(self) -> FmtPointer<Self>where
Self: Pointer,
self to use its Pointer implementation when
Debug-formatted.§fn fmt_upper_exp(self) -> FmtUpperExp<Self>where
Self: UpperExp,
fn fmt_upper_exp(self) -> FmtUpperExp<Self>where
Self: UpperExp,
self to use its UpperExp implementation when
Debug-formatted.§fn fmt_upper_hex(self) -> FmtUpperHex<Self>where
Self: UpperHex,
fn fmt_upper_hex(self) -> FmtUpperHex<Self>where
Self: UpperHex,
self to use its UpperHex implementation when
Debug-formatted.§fn fmt_list(self) -> FmtList<Self>where
&'a Self: for<'a> IntoIterator,
fn fmt_list(self) -> FmtList<Self>where
&'a Self: for<'a> IntoIterator,
§impl<T> FutureExt for T
impl<T> FutureExt for T
§fn with_context(self, otel_cx: Context) -> WithContext<Self>
fn with_context(self, otel_cx: Context) -> WithContext<Self>
§fn with_current_context(self) -> WithContext<Self>
fn with_current_context(self) -> WithContext<Self>
§impl<T> Instrument for T
impl<T> Instrument for T
§fn instrument(self, span: Span) -> Instrumented<Self>
fn instrument(self, span: Span) -> Instrumented<Self>
§fn in_current_span(self) -> Instrumented<Self>
fn in_current_span(self) -> Instrumented<Self>
Source§impl<T> Instrument for T
impl<T> Instrument for T
Source§fn instrument(self, span: Span) -> Instrumented<Self>
fn instrument(self, span: Span) -> Instrumented<Self>
Source§fn in_current_span(self) -> Instrumented<Self>
fn in_current_span(self) -> Instrumented<Self>
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§impl<T> IntoRequest<T> for T
impl<T> IntoRequest<T> for T
Source§fn into_request(self) -> Request<T>
fn into_request(self) -> Request<T>
T in a tonic::Request§impl<T> IntoRequest<T> for T
impl<T> IntoRequest<T> for T
§fn into_request(self) -> Request<T>
fn into_request(self) -> Request<T>
T in a tonic::Request§impl<T> IntoResult<T> for T
impl<T> IntoResult<T> for T
type Err = Infallible
fn into_result(self) -> Result<T, <T as IntoResult<T>>::Err>
§impl<L> LayerExt<L> for L
impl<L> LayerExt<L> for L
§fn named_layer<S>(&self, service: S) -> Layered<<L as Layer<S>>::Service, S>where
L: Layer<S>,
fn named_layer<S>(&self, service: S) -> Layered<<L as Layer<S>>::Service, S>where
L: Layer<S>,
Layered].Source§impl<M> MetricVecRelabelExt for M
impl<M> MetricVecRelabelExt for M
Source§fn relabel(
self,
metric_level: MetricLevel,
relabel_threshold: MetricLevel,
) -> RelabeledMetricVec<M>
fn relabel( self, metric_level: MetricLevel, relabel_threshold: MetricLevel, ) -> RelabeledMetricVec<M>
RelabeledMetricVec::with_metric_level.Source§fn relabel_n(
self,
metric_level: MetricLevel,
relabel_threshold: MetricLevel,
relabel_num: usize,
) -> RelabeledMetricVec<M>
fn relabel_n( self, metric_level: MetricLevel, relabel_threshold: MetricLevel, relabel_num: usize, ) -> RelabeledMetricVec<M>
RelabeledMetricVec::with_metric_level_relabel_n.Source§fn relabel_debug_1(
self,
relabel_threshold: MetricLevel,
) -> RelabeledMetricVec<M>
fn relabel_debug_1( self, relabel_threshold: MetricLevel, ) -> RelabeledMetricVec<M>
RelabeledMetricVec::with_metric_level_relabel_n with metric_level set to
MetricLevel::Debug and relabel_num set to 1.§impl<T> Pipe for Twhere
T: ?Sized,
impl<T> Pipe for Twhere
T: ?Sized,
§fn pipe<R>(self, func: impl FnOnce(Self) -> R) -> Rwhere
Self: Sized,
fn pipe<R>(self, func: impl FnOnce(Self) -> R) -> Rwhere
Self: Sized,
§fn pipe_ref<'a, R>(&'a self, func: impl FnOnce(&'a Self) -> R) -> Rwhere
R: 'a,
fn pipe_ref<'a, R>(&'a self, func: impl FnOnce(&'a Self) -> R) -> Rwhere
R: 'a,
self and passes that borrow into the pipe function. Read more§fn pipe_ref_mut<'a, R>(&'a mut self, func: impl FnOnce(&'a mut Self) -> R) -> Rwhere
R: 'a,
fn pipe_ref_mut<'a, R>(&'a mut self, func: impl FnOnce(&'a mut Self) -> R) -> Rwhere
R: 'a,
self and passes that borrow into the pipe function. Read more§fn pipe_borrow<'a, B, R>(&'a self, func: impl FnOnce(&'a B) -> R) -> R
fn pipe_borrow<'a, B, R>(&'a self, func: impl FnOnce(&'a B) -> R) -> R
§fn pipe_borrow_mut<'a, B, R>(
&'a mut self,
func: impl FnOnce(&'a mut B) -> R,
) -> R
fn pipe_borrow_mut<'a, B, R>( &'a mut self, func: impl FnOnce(&'a mut B) -> R, ) -> R
§fn pipe_as_ref<'a, U, R>(&'a self, func: impl FnOnce(&'a U) -> R) -> R
fn pipe_as_ref<'a, U, R>(&'a self, func: impl FnOnce(&'a U) -> R) -> R
self, then passes self.as_ref() into the pipe function.§fn pipe_as_mut<'a, U, R>(&'a mut self, func: impl FnOnce(&'a mut U) -> R) -> R
fn pipe_as_mut<'a, U, R>(&'a mut self, func: impl FnOnce(&'a mut U) -> R) -> R
self, then passes self.as_mut() into the pipe
function.§fn pipe_deref<'a, T, R>(&'a self, func: impl FnOnce(&'a T) -> R) -> R
fn pipe_deref<'a, T, R>(&'a self, func: impl FnOnce(&'a T) -> R) -> R
self, then passes self.deref() into the pipe function.§impl<T> Pointable for T
impl<T> Pointable for T
§impl<T> PolicyExt for Twhere
T: ?Sized,
impl<T> PolicyExt for Twhere
T: ?Sized,
§impl<T> Scope for T
impl<T> Scope for T
§impl<T> Tap for T
impl<T> Tap for T
§fn tap_borrow<B>(self, func: impl FnOnce(&B)) -> Self
fn tap_borrow<B>(self, func: impl FnOnce(&B)) -> Self
Borrow<B> of a value. Read more§fn tap_borrow_mut<B>(self, func: impl FnOnce(&mut B)) -> Self
fn tap_borrow_mut<B>(self, func: impl FnOnce(&mut B)) -> Self
BorrowMut<B> of a value. Read more§fn tap_ref<R>(self, func: impl FnOnce(&R)) -> Self
fn tap_ref<R>(self, func: impl FnOnce(&R)) -> Self
AsRef<R> view of a value. Read more§fn tap_ref_mut<R>(self, func: impl FnOnce(&mut R)) -> Self
fn tap_ref_mut<R>(self, func: impl FnOnce(&mut R)) -> Self
AsMut<R> view of a value. Read more§fn tap_deref<T>(self, func: impl FnOnce(&T)) -> Self
fn tap_deref<T>(self, func: impl FnOnce(&T)) -> Self
Deref::Target of a value. Read more§fn tap_deref_mut<T>(self, func: impl FnOnce(&mut T)) -> Self
fn tap_deref_mut<T>(self, func: impl FnOnce(&mut T)) -> Self
Deref::Target of a value. Read more§fn tap_dbg(self, func: impl FnOnce(&Self)) -> Self
fn tap_dbg(self, func: impl FnOnce(&Self)) -> Self
.tap() only in debug builds, and is erased in release builds.§fn tap_mut_dbg(self, func: impl FnOnce(&mut Self)) -> Self
fn tap_mut_dbg(self, func: impl FnOnce(&mut Self)) -> Self
.tap_mut() only in debug builds, and is erased in release
builds.§fn tap_borrow_dbg<B>(self, func: impl FnOnce(&B)) -> Self
fn tap_borrow_dbg<B>(self, func: impl FnOnce(&B)) -> Self
.tap_borrow() only in debug builds, and is erased in release
builds.§fn tap_borrow_mut_dbg<B>(self, func: impl FnOnce(&mut B)) -> Self
fn tap_borrow_mut_dbg<B>(self, func: impl FnOnce(&mut B)) -> Self
.tap_borrow_mut() only in debug builds, and is erased in release
builds.§fn tap_ref_dbg<R>(self, func: impl FnOnce(&R)) -> Self
fn tap_ref_dbg<R>(self, func: impl FnOnce(&R)) -> Self
.tap_ref() only in debug builds, and is erased in release
builds.§fn tap_ref_mut_dbg<R>(self, func: impl FnOnce(&mut R)) -> Self
fn tap_ref_mut_dbg<R>(self, func: impl FnOnce(&mut R)) -> Self
.tap_ref_mut() only in debug builds, and is erased in release
builds.§fn tap_deref_dbg<T>(self, func: impl FnOnce(&T)) -> Self
fn tap_deref_dbg<T>(self, func: impl FnOnce(&T)) -> Self
.tap_deref() only in debug builds, and is erased in release
builds.