Auto merge of #108058 - Zoxc:query-ctxtx-byval, r=cjgillot

Pass `DepContext` and `QueryContext` by value when practical

This removes some indirections for a minor performance improvement.

<table><tr><td rowspan="2">Benchmark</td><td colspan="1"><b>Before</b></th><td colspan="2"><b>After</b></th></tr><tr><td align="right">Time</td><td align="right">Time</td><td align="right">%</th></tr><tr><td>🟣 <b>clap</b>:check</td><td align="right">1.8294s</td><td align="right">1.8255s</td><td align="right"> -0.21%</td></tr><tr><td>🟣 <b>hyper</b>:check</td><td align="right">0.2667s</td><td align="right">0.2669s</td><td align="right"> 0.07%</td></tr><tr><td>🟣 <b>regex</b>:check</td><td align="right">1.0080s</td><td align="right">1.0063s</td><td align="right"> -0.17%</td></tr><tr><td>🟣 <b>syn</b>:check</td><td align="right">1.6335s</td><td align="right">1.6295s</td><td align="right"> -0.24%</td></tr><tr><td>🟣 <b>syntex_syntax</b>:check</td><td align="right">6.3633s</td><td align="right">6.3344s</td><td align="right"> -0.45%</td></tr><tr><td>Total</td><td align="right">11.1009s</td><td align="right">11.0627s</td><td align="right"> -0.34%</td></tr><tr><td>Summary</td><td align="right">1.0000s</td><td align="right">0.9980s</td><td align="right"> -0.20%</td></tr></table>
This commit is contained in:
bors 2023-02-17 08:23:53 +00:00
commit b5c8c329a7
4 changed files with 26 additions and 26 deletions

View File

@ -74,8 +74,8 @@ impl<'tcx> DepContext for TyCtxt<'tcx> {
type DepKind = DepKind; type DepKind = DepKind;
#[inline] #[inline]
fn with_stable_hashing_context<R>(&self, f: impl FnOnce(StableHashingContext<'_>) -> R) -> R { fn with_stable_hashing_context<R>(self, f: impl FnOnce(StableHashingContext<'_>) -> R) -> R {
TyCtxt::with_stable_hashing_context(*self, f) TyCtxt::with_stable_hashing_context(self, f)
} }
#[inline] #[inline]

View File

@ -53,7 +53,7 @@ impl<'tcx> HasDepContext for QueryCtxt<'tcx> {
} }
impl QueryContext for QueryCtxt<'_> { impl QueryContext for QueryCtxt<'_> {
fn next_job_id(&self) -> QueryJobId { fn next_job_id(self) -> QueryJobId {
QueryJobId( QueryJobId(
NonZeroU64::new( NonZeroU64::new(
self.queries.jobs.fetch_add(1, rustc_data_structures::sync::Ordering::Relaxed), self.queries.jobs.fetch_add(1, rustc_data_structures::sync::Ordering::Relaxed),
@ -62,31 +62,31 @@ impl QueryContext for QueryCtxt<'_> {
) )
} }
fn current_query_job(&self) -> Option<QueryJobId> { fn current_query_job(self) -> Option<QueryJobId> {
tls::with_related_context(**self, |icx| icx.query) tls::with_related_context(*self, |icx| icx.query)
} }
fn try_collect_active_jobs(&self) -> Option<QueryMap<DepKind>> { fn try_collect_active_jobs(self) -> Option<QueryMap<DepKind>> {
self.queries.try_collect_active_jobs(**self) self.queries.try_collect_active_jobs(*self)
} }
// Interactions with on_disk_cache // Interactions with on_disk_cache
fn load_side_effects(&self, prev_dep_node_index: SerializedDepNodeIndex) -> QuerySideEffects { fn load_side_effects(self, prev_dep_node_index: SerializedDepNodeIndex) -> QuerySideEffects {
self.queries self.queries
.on_disk_cache .on_disk_cache
.as_ref() .as_ref()
.map(|c| c.load_side_effects(**self, prev_dep_node_index)) .map(|c| c.load_side_effects(*self, prev_dep_node_index))
.unwrap_or_default() .unwrap_or_default()
} }
fn store_side_effects(&self, dep_node_index: DepNodeIndex, side_effects: QuerySideEffects) { fn store_side_effects(self, dep_node_index: DepNodeIndex, side_effects: QuerySideEffects) {
if let Some(c) = self.queries.on_disk_cache.as_ref() { if let Some(c) = self.queries.on_disk_cache.as_ref() {
c.store_side_effects(dep_node_index, side_effects) c.store_side_effects(dep_node_index, side_effects)
} }
} }
fn store_side_effects_for_anon_node( fn store_side_effects_for_anon_node(
&self, self,
dep_node_index: DepNodeIndex, dep_node_index: DepNodeIndex,
side_effects: QuerySideEffects, side_effects: QuerySideEffects,
) { ) {
@ -100,7 +100,7 @@ impl QueryContext for QueryCtxt<'_> {
/// captured during execution and the actual result. /// captured during execution and the actual result.
#[inline(always)] #[inline(always)]
fn start_query<R>( fn start_query<R>(
&self, self,
token: QueryJobId, token: QueryJobId,
depth_limit: bool, depth_limit: bool,
diagnostics: Option<&Lock<ThinVec<Diagnostic>>>, diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
@ -109,14 +109,14 @@ impl QueryContext for QueryCtxt<'_> {
// The `TyCtxt` stored in TLS has the same global interner lifetime // The `TyCtxt` stored in TLS has the same global interner lifetime
// as `self`, so we use `with_related_context` to relate the 'tcx lifetimes // as `self`, so we use `with_related_context` to relate the 'tcx lifetimes
// when accessing the `ImplicitCtxt`. // when accessing the `ImplicitCtxt`.
tls::with_related_context(**self, move |current_icx| { tls::with_related_context(*self, move |current_icx| {
if depth_limit && !self.recursion_limit().value_within_limit(current_icx.query_depth) { if depth_limit && !self.recursion_limit().value_within_limit(current_icx.query_depth) {
self.depth_limit_error(token); self.depth_limit_error(token);
} }
// Update the `ImplicitCtxt` to point to our new query job. // Update the `ImplicitCtxt` to point to our new query job.
let new_icx = ImplicitCtxt { let new_icx = ImplicitCtxt {
tcx: **self, tcx: *self,
query: Some(token), query: Some(token),
diagnostics, diagnostics,
query_depth: current_icx.query_depth + depth_limit as usize, query_depth: current_icx.query_depth + depth_limit as usize,
@ -130,7 +130,7 @@ impl QueryContext for QueryCtxt<'_> {
}) })
} }
fn depth_limit_error(&self, job: QueryJobId) { fn depth_limit_error(self, job: QueryJobId) {
let mut span = None; let mut span = None;
let mut layout_of_depth = None; let mut layout_of_depth = None;
if let Some(map) = self.try_collect_active_jobs() { if let Some(map) = self.try_collect_active_jobs() {

View File

@ -23,7 +23,7 @@ pub trait DepContext: Copy {
type DepKind: self::DepKind; type DepKind: self::DepKind;
/// Create a hashing context for hashing new results. /// Create a hashing context for hashing new results.
fn with_stable_hashing_context<R>(&self, f: impl FnOnce(StableHashingContext<'_>) -> R) -> R; fn with_stable_hashing_context<R>(self, f: impl FnOnce(StableHashingContext<'_>) -> R) -> R;
/// Access the DepGraph. /// Access the DepGraph.
fn dep_graph(&self) -> &DepGraph<Self::DepKind>; fn dep_graph(&self) -> &DepGraph<Self::DepKind>;
@ -37,7 +37,7 @@ pub trait DepContext: Copy {
fn dep_kind_info(&self, dep_node: Self::DepKind) -> &DepKindStruct<Self>; fn dep_kind_info(&self, dep_node: Self::DepKind) -> &DepKindStruct<Self>;
#[inline(always)] #[inline(always)]
fn fingerprint_style(&self, kind: Self::DepKind) -> FingerprintStyle { fn fingerprint_style(self, kind: Self::DepKind) -> FingerprintStyle {
let data = self.dep_kind_info(kind); let data = self.dep_kind_info(kind);
if data.is_anon { if data.is_anon {
return FingerprintStyle::Opaque; return FingerprintStyle::Opaque;
@ -47,7 +47,7 @@ pub trait DepContext: Copy {
#[inline(always)] #[inline(always)]
/// Return whether this kind always require evaluation. /// Return whether this kind always require evaluation.
fn is_eval_always(&self, kind: Self::DepKind) -> bool { fn is_eval_always(self, kind: Self::DepKind) -> bool {
self.dep_kind_info(kind).is_eval_always self.dep_kind_info(kind).is_eval_always
} }

View File

@ -101,22 +101,22 @@ impl QuerySideEffects {
} }
pub trait QueryContext: HasDepContext { pub trait QueryContext: HasDepContext {
fn next_job_id(&self) -> QueryJobId; fn next_job_id(self) -> QueryJobId;
/// Get the query information from the TLS context. /// Get the query information from the TLS context.
fn current_query_job(&self) -> Option<QueryJobId>; fn current_query_job(self) -> Option<QueryJobId>;
fn try_collect_active_jobs(&self) -> Option<QueryMap<Self::DepKind>>; fn try_collect_active_jobs(self) -> Option<QueryMap<Self::DepKind>>;
/// Load side effects associated to the node in the previous session. /// Load side effects associated to the node in the previous session.
fn load_side_effects(&self, prev_dep_node_index: SerializedDepNodeIndex) -> QuerySideEffects; fn load_side_effects(self, prev_dep_node_index: SerializedDepNodeIndex) -> QuerySideEffects;
/// Register diagnostics for the given node, for use in next session. /// Register diagnostics for the given node, for use in next session.
fn store_side_effects(&self, dep_node_index: DepNodeIndex, side_effects: QuerySideEffects); fn store_side_effects(self, dep_node_index: DepNodeIndex, side_effects: QuerySideEffects);
/// Register diagnostics for the given node, for use in next session. /// Register diagnostics for the given node, for use in next session.
fn store_side_effects_for_anon_node( fn store_side_effects_for_anon_node(
&self, self,
dep_node_index: DepNodeIndex, dep_node_index: DepNodeIndex,
side_effects: QuerySideEffects, side_effects: QuerySideEffects,
); );
@ -125,12 +125,12 @@ pub trait QueryContext: HasDepContext {
/// new query job while it executes. It returns the diagnostics /// new query job while it executes. It returns the diagnostics
/// captured during execution and the actual result. /// captured during execution and the actual result.
fn start_query<R>( fn start_query<R>(
&self, self,
token: QueryJobId, token: QueryJobId,
depth_limit: bool, depth_limit: bool,
diagnostics: Option<&Lock<ThinVec<Diagnostic>>>, diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
compute: impl FnOnce() -> R, compute: impl FnOnce() -> R,
) -> R; ) -> R;
fn depth_limit_error(&self, job: QueryJobId); fn depth_limit_error(self, job: QueryJobId);
} }