Revert "Auto merge of #113166 - moulins:ref-niches-initial, r=oli-obk"

This reverts commit 557359f925, reversing
changes made to 1e6c09a803.
This commit is contained in:
David Tolnay 2023-07-21 22:35:57 -07:00
parent a5e2eca40e
commit 5bbf0a8306
No known key found for this signature in database
GPG Key ID: F9BA143B95FF6D82
48 changed files with 296 additions and 1067 deletions

View File

@ -49,14 +49,6 @@ bitflags! {
}
}
/// Which niches (beyond the `null` niche) are available on references.
#[derive(Default, Copy, Clone, Hash, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
pub struct ReferenceNichePolicy {
pub size: bool,
pub align: bool,
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
pub enum IntegerType {
@ -354,33 +346,6 @@ impl TargetDataLayout {
}
}
#[inline]
pub fn target_usize_max(&self) -> u64 {
self.pointer_size.unsigned_int_max().try_into().unwrap()
}
#[inline]
pub fn target_isize_min(&self) -> i64 {
self.pointer_size.signed_int_min().try_into().unwrap()
}
#[inline]
pub fn target_isize_max(&self) -> i64 {
self.pointer_size.signed_int_max().try_into().unwrap()
}
/// Returns the (inclusive) range of possible addresses for an allocation with
/// the given size and alignment.
///
/// Note that this doesn't take into account target-specific limitations.
#[inline]
pub fn address_range_for(&self, size: Size, align: Align) -> (u64, u64) {
let end = Size::from_bytes(self.target_usize_max());
let min = align.bytes();
let max = (end - size).align_down_to(align).bytes();
(min, max)
}
#[inline]
pub fn vector_align(&self, vec_size: Size) -> AbiAndPrefAlign {
for &(size, align) in &self.vector_align {
@ -508,12 +473,6 @@ impl Size {
Size::from_bytes((self.bytes() + mask) & !mask)
}
#[inline]
pub fn align_down_to(self, align: Align) -> Size {
let mask = align.bytes() - 1;
Size::from_bytes(self.bytes() & !mask)
}
#[inline]
pub fn is_aligned(self, align: Align) -> bool {
let mask = align.bytes() - 1;
@ -1008,43 +967,6 @@ impl WrappingRange {
}
}
/// Returns `true` if `range` is contained in `self`.
#[inline(always)]
pub fn contains_range<I: Into<u128> + Ord>(&self, range: RangeInclusive<I>) -> bool {
if range.is_empty() {
return true;
}
let (vmin, vmax) = range.into_inner();
let (vmin, vmax) = (vmin.into(), vmax.into());
if self.start <= self.end {
self.start <= vmin && vmax <= self.end
} else {
// The last check is needed to cover the following case:
// `vmin ... start, end ... vmax`. In this special case there is no gap
// between `start` and `end` so we must return true.
self.start <= vmin || vmax <= self.end || self.start == self.end + 1
}
}
/// Returns `true` if `range` has an overlap with `self`.
#[inline(always)]
pub fn overlaps_range<I: Into<u128> + Ord>(&self, range: RangeInclusive<I>) -> bool {
if range.is_empty() {
return false;
}
let (vmin, vmax) = range.into_inner();
let (vmin, vmax) = (vmin.into(), vmax.into());
if self.start <= self.end {
self.start <= vmax && vmin <= self.end
} else {
self.start <= vmax || vmin <= self.end
}
}
/// Returns `self` with replaced `start`
#[inline(always)]
pub fn with_start(mut self, start: u128) -> Self {
@ -1062,15 +984,9 @@ impl WrappingRange {
/// Returns `true` if `size` completely fills the range.
#[inline]
pub fn is_full_for(&self, size: Size) -> bool {
debug_assert!(self.is_in_range_for(size));
self.start == (self.end.wrapping_add(1) & size.unsigned_int_max())
}
/// Returns `true` if the range is valid for `size`.
#[inline(always)]
pub fn is_in_range_for(&self, size: Size) -> bool {
let max_value = size.unsigned_int_max();
self.start <= max_value && self.end <= max_value
debug_assert!(self.start <= max_value && self.end <= max_value);
self.start == (self.end.wrapping_add(1) & max_value)
}
}
@ -1511,21 +1427,16 @@ impl Niche {
pub fn reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> {
assert!(count > 0);
if count > self.available(cx) {
return None;
}
let Self { value, valid_range: v, .. } = *self;
let max_value = value.size(cx).unsigned_int_max();
let distance_end_zero = max_value - v.end;
let size = value.size(cx);
assert!(size.bits() <= 128);
let max_value = size.unsigned_int_max();
// Null-pointer optimization. This is guaranteed by Rust (at least for `Option<_>`),
// and offers better codegen opportunities.
if count == 1 && matches!(value, Pointer(_)) && !v.contains(0) {
// Select which bound to move to minimize the number of lost niches.
let valid_range =
if v.start - 1 > distance_end_zero { v.with_end(0) } else { v.with_start(0) };
return Some((0, Scalar::Initialized { value, valid_range }));
let niche = v.end.wrapping_add(1)..v.start;
let available = niche.end.wrapping_sub(niche.start) & max_value;
if count > available {
return None;
}
// Extend the range of valid values being reserved by moving either `v.start` or `v.end` bound.
@ -1548,6 +1459,7 @@ impl Niche {
let end = v.end.wrapping_add(count) & max_value;
Some((start, Scalar::Initialized { value, valid_range: v.with_end(end) }))
};
let distance_end_zero = max_value - v.end;
if v.start > v.end {
// zero is unavailable because wrapping occurs
move_end(v)

View File

@ -339,8 +339,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
return pointee;
}
let assume_valid_ptr = true;
let result = Ty::ty_and_layout_pointee_info_at(*self, cx, offset, assume_valid_ptr);
let result = Ty::ty_and_layout_pointee_info_at(*self, cx, offset);
cx.pointee_infos.borrow_mut().insert((self.ty, offset), result);
result

View File

@ -411,8 +411,8 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
if let Some(&pointee) = cx.pointee_infos.borrow().get(&(self.ty, offset)) {
return pointee;
}
let assume_valid_ptr = true;
let result = Ty::ty_and_layout_pointee_info_at(*self, cx, offset, assume_valid_ptr);
let result = Ty::ty_and_layout_pointee_info_at(*self, cx, offset);
cx.pointee_infos.borrow_mut().insert((self.ty, offset), result);
result

View File

@ -244,6 +244,7 @@ const_eval_not_enough_caller_args =
const_eval_null_box = {$front_matter}: encountered a null box
const_eval_null_fn_ptr = {$front_matter}: encountered a null function pointer
const_eval_null_ref = {$front_matter}: encountered a null reference
const_eval_nullable_ptr_out_of_range = {$front_matter}: encountered a potentially null pointer, but expected something that cannot possibly fail to be {$in_range}
const_eval_nullary_intrinsic_fail =
could not evaluate nullary intrinsic

View File

@ -1,6 +1,7 @@
use rustc_hir::def::DefKind;
use rustc_hir::{LangItem, CRATE_HIR_ID};
use rustc_middle::mir;
use rustc_middle::mir::interpret::PointerArithmetic;
use rustc_middle::ty::layout::{FnAbiOf, TyAndLayout};
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_session::lint::builtin::INVALID_ALIGNMENT;
@ -16,7 +17,7 @@ use rustc_ast::Mutability;
use rustc_hir::def_id::DefId;
use rustc_middle::mir::AssertMessage;
use rustc_span::symbol::{sym, Symbol};
use rustc_target::abi::{Align, HasDataLayout as _, Size};
use rustc_target::abi::{Align, Size};
use rustc_target::spec::abi::Abi as CallAbi;
use crate::errors::{LongRunning, LongRunningWarn};
@ -303,8 +304,8 @@ impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
Ok(ControlFlow::Break(()))
} else {
// Not alignable in const, return `usize::MAX`.
let usize_max = self.data_layout().target_usize_max();
self.write_scalar(Scalar::from_target_usize(usize_max, self), dest)?;
let usize_max = Scalar::from_target_usize(self.target_usize_max(), self);
self.write_scalar(usize_max, dest)?;
self.return_to_block(ret)?;
Ok(ControlFlow::Break(()))
}
@ -332,7 +333,7 @@ impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
// Inequality with integers other than null can never be known for sure.
(Scalar::Int(int), ptr @ Scalar::Ptr(..))
| (ptr @ Scalar::Ptr(..), Scalar::Int(int))
if int.is_null() && !self.ptr_scalar_range(ptr)?.contains(&0) =>
if int.is_null() && !self.scalar_may_be_null(ptr)? =>
{
0
}

View File

@ -617,6 +617,7 @@ impl<'tcx> ReportErrorExt for ValidationErrorInfo<'tcx> {
MutableRefInConst => const_eval_mutable_ref_in_const,
NullFnPtr => const_eval_null_fn_ptr,
NeverVal => const_eval_never_val,
NullablePtrOutOfRange { .. } => const_eval_nullable_ptr_out_of_range,
PtrOutOfRange { .. } => const_eval_ptr_out_of_range,
OutOfRange { .. } => const_eval_out_of_range,
UnsafeCell => const_eval_unsafe_cell,
@ -731,7 +732,9 @@ impl<'tcx> ReportErrorExt for ValidationErrorInfo<'tcx> {
| InvalidFnPtr { value } => {
err.set_arg("value", value);
}
PtrOutOfRange { range, max_value } => add_range_arg(range, max_value, handler, err),
NullablePtrOutOfRange { range, max_value } | PtrOutOfRange { range, max_value } => {
add_range_arg(range, max_value, handler, err)
}
OutOfRange { range, max_value, value } => {
err.set_arg("value", value);
add_range_arg(range, max_value, handler, err);

View File

@ -2,7 +2,8 @@
use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt};
use rustc_middle::{mir, ty};
use rustc_target::abi::{self, TagEncoding, VariantIdx, Variants, WrappingRange};
use rustc_target::abi::{self, TagEncoding};
use rustc_target::abi::{VariantIdx, Variants};
use super::{ImmTy, InterpCx, InterpResult, Machine, OpTy, PlaceTy, Scalar};
@ -179,24 +180,19 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// discriminant (encoded in niche/tag) and variant index are the same.
let variants_start = niche_variants.start().as_u32();
let variants_end = niche_variants.end().as_u32();
let variants_len = u128::from(variants_end - variants_start);
let variant = match tag_val.try_to_int() {
Err(dbg_val) => {
// So this is a pointer then, and casting to an int failed.
// Can only happen during CTFE.
// The pointer and niches ranges must be disjoint, then we know
// this is the untagged variant (as the value is not in the niche).
// Everything else, we conservatively reject.
let range = self.ptr_scalar_range(tag_val)?;
let niches = WrappingRange {
start: niche_start,
end: niche_start.wrapping_add(variants_len),
};
if niches.overlaps_range(range) {
// The niche must be just 0, and the ptr not null, then we know this is
// okay. Everything else, we conservatively reject.
let ptr_valid = niche_start == 0
&& variants_start == variants_end
&& !self.scalar_may_be_null(tag_val)?;
if !ptr_valid {
throw_ub!(InvalidTag(dbg_val))
} else {
untagged_variant
}
untagged_variant
}
Ok(tag_bits) => {
let tag_bits = tag_bits.assert_bits(tag_layout.size);
@ -209,7 +205,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let variant_index_relative =
variant_index_relative_val.to_scalar().assert_bits(tag_val.layout.size);
// Check if this is in the range that indicates an actual discriminant.
if variant_index_relative <= variants_len {
if variant_index_relative <= u128::from(variants_end - variants_start) {
let variant_index_relative = u32::try_from(variant_index_relative)
.expect("we checked that this fits into a u32");
// Then computing the absolute variant idx should not overflow any more.

View File

@ -5,7 +5,9 @@
use rustc_hir::def_id::DefId;
use rustc_middle::mir::{
self,
interpret::{Allocation, ConstAllocation, ConstValue, GlobalId, InterpResult, Scalar},
interpret::{
Allocation, ConstAllocation, ConstValue, GlobalId, InterpResult, PointerArithmetic, Scalar,
},
BinOp, NonDivergingIntrinsic,
};
use rustc_middle::ty;
@ -13,7 +15,7 @@ use rustc_middle::ty::layout::{LayoutOf as _, ValidityRequirement};
use rustc_middle::ty::GenericArgsRef;
use rustc_middle::ty::{Ty, TyCtxt};
use rustc_span::symbol::{sym, Symbol};
use rustc_target::abi::{Abi, Align, HasDataLayout as _, Primitive, Size};
use rustc_target::abi::{Abi, Align, Primitive, Size};
use super::{
util::ensure_monomorphic_enough, CheckInAllocMsg, ImmTy, InterpCx, Machine, OpTy, PlaceTy,
@ -359,12 +361,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
)?;
// Perform division by size to compute return value.
let dl = self.data_layout();
let ret_layout = if intrinsic_name == sym::ptr_offset_from_unsigned {
assert!(0 <= dist && dist <= dl.target_isize_max());
assert!(0 <= dist && dist <= self.target_isize_max());
usize_layout
} else {
assert!(dl.target_isize_min() <= dist && dist <= dl.target_isize_max());
assert!(self.target_isize_min() <= dist && dist <= self.target_isize_max());
isize_layout
};
let pointee_layout = self.layout_of(instance_args.type_at(0))?;

View File

@ -10,7 +10,6 @@ use std::assert_matches::assert_matches;
use std::borrow::Cow;
use std::collections::VecDeque;
use std::fmt;
use std::ops::RangeInclusive;
use std::ptr;
use rustc_ast::Mutability;
@ -1223,34 +1222,24 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Machine pointer introspection.
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Turn a pointer-sized scalar into a (non-empty) range of possible values.
/// Test if this value might be null.
/// If the machine does not support ptr-to-int casts, this is conservative.
pub fn ptr_scalar_range(
&self,
scalar: Scalar<M::Provenance>,
) -> InterpResult<'tcx, RangeInclusive<u64>> {
if let Ok(int) = scalar.to_target_usize(self) {
return Ok(int..=int);
}
let ptr = scalar.to_pointer(self)?;
// Can only happen during CTFE.
Ok(match self.ptr_try_get_alloc_id(ptr) {
Ok((alloc_id, offset, _)) => {
let offset = offset.bytes();
let (size, align, _) = self.get_alloc_info(alloc_id);
let dl = self.data_layout();
if offset > size.bytes() {
// If the pointer is out-of-bounds, we do not have a
// meaningful range to return.
0..=dl.target_usize_max()
} else {
let (min, max) = dl.address_range_for(size, align);
(min + offset)..=(max + offset)
pub fn scalar_may_be_null(&self, scalar: Scalar<M::Provenance>) -> InterpResult<'tcx, bool> {
Ok(match scalar.try_to_int() {
Ok(int) => int.is_null(),
Err(_) => {
// Can only happen during CTFE.
let ptr = scalar.to_pointer(self)?;
match self.ptr_try_get_alloc_id(ptr) {
Ok((alloc_id, offset, _)) => {
let (size, _align, _kind) = self.get_alloc_info(alloc_id);
// If the pointer is out-of-bounds, it may be null.
// Note that one-past-the-end (offset == size) is still inbounds, and never null.
offset > size
}
Err(_offset) => bug!("a non-int scalar is always a pointer"),
}
}
Err(_offset) => bug!("a non-int scalar is always a pointer"),
})
}

View File

@ -19,7 +19,9 @@ use rustc_middle::mir::interpret::{
use rustc_middle::ty;
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_span::symbol::{sym, Symbol};
use rustc_target::abi::{Abi, FieldIdx, Scalar as ScalarAbi, Size, VariantIdx, Variants};
use rustc_target::abi::{
Abi, FieldIdx, Scalar as ScalarAbi, Size, VariantIdx, Variants, WrappingRange,
};
use std::hash::Hash;
@ -552,7 +554,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
// FIXME: Check if the signature matches
} else {
// Otherwise (for standalone Miri), we have to still check it to be non-null.
if self.ecx.ptr_scalar_range(value)?.contains(&0) {
if self.ecx.scalar_may_be_null(value)? {
throw_validation_failure!(self.path, NullFnPtr);
}
}
@ -593,36 +595,46 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
) -> InterpResult<'tcx> {
let size = scalar_layout.size(self.ecx);
let valid_range = scalar_layout.valid_range(self.ecx);
let WrappingRange { start, end } = valid_range;
let max_value = size.unsigned_int_max();
assert!(valid_range.end <= max_value);
match scalar.try_to_int() {
Ok(int) => {
// We have an explicit int: check it against the valid range.
let bits = int.assert_bits(size);
if valid_range.contains(bits) {
Ok(())
} else {
throw_validation_failure!(
self.path,
OutOfRange { value: format!("{bits}"), range: valid_range, max_value }
)
}
}
assert!(end <= max_value);
let bits = match scalar.try_to_int() {
Ok(int) => int.assert_bits(size),
Err(_) => {
// So this is a pointer then, and casting to an int failed.
// Can only happen during CTFE.
// We check if the possible addresses are compatible with the valid range.
let range = self.ecx.ptr_scalar_range(scalar)?;
if valid_range.contains_range(range) {
Ok(())
// We support 2 kinds of ranges here: full range, and excluding zero.
if start == 1 && end == max_value {
// Only null is the niche. So make sure the ptr is NOT null.
if self.ecx.scalar_may_be_null(scalar)? {
throw_validation_failure!(
self.path,
NullablePtrOutOfRange { range: valid_range, max_value }
)
} else {
return Ok(());
}
} else if scalar_layout.is_always_valid(self.ecx) {
// Easy. (This is reachable if `enforce_number_validity` is set.)
return Ok(());
} else {
// Reject conservatively, because the pointer *could* have a bad value.
// Conservatively, we reject, because the pointer *could* have a bad
// value.
throw_validation_failure!(
self.path,
PtrOutOfRange { range: valid_range, max_value }
)
}
}
};
// Now compare.
if valid_range.contains(bits) {
Ok(())
} else {
throw_validation_failure!(
self.path,
OutOfRange { value: format!("{bits}"), range: valid_range, max_value }
)
}
}
}

View File

@ -28,7 +28,6 @@ use rustc_span::edition::{Edition, DEFAULT_EDITION};
use rustc_span::symbol::sym;
use rustc_span::FileName;
use rustc_span::SourceFileHashAlgorithm;
use rustc_target::abi::ReferenceNichePolicy;
use rustc_target::spec::{CodeModel, LinkerFlavorCli, MergeFunctions, PanicStrategy, RelocModel};
use rustc_target::spec::{RelroLevel, SanitizerSet, SplitDebuginfo, StackProtector, TlsModel};
@ -821,7 +820,6 @@ fn test_unstable_options_tracking_hash() {
tracked!(profile_emit, Some(PathBuf::from("abc")));
tracked!(profile_sample_use, Some(PathBuf::from("abc")));
tracked!(profiler_runtime, "abc".to_string());
tracked!(reference_niches, Some(ReferenceNichePolicy { size: true, align: false }));
tracked!(relax_elf_relocations, Some(true));
tracked!(relro_level, Some(RelroLevel::Full));
tracked!(remap_cwd_prefix, Some(PathBuf::from("abc")));

View File

@ -301,7 +301,6 @@ provide! { tcx, def_id, other, cdata,
is_profiler_runtime => { cdata.root.profiler_runtime }
required_panic_strategy => { cdata.root.required_panic_strategy }
panic_in_drop_strategy => { cdata.root.panic_in_drop_strategy }
reference_niches_policy => { cdata.root.reference_niches_policy }
extern_crate => {
let r = *cdata.extern_crate.lock();
r.map(|c| &*tcx.arena.alloc(c))

View File

@ -673,7 +673,6 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
stable_crate_id: tcx.def_path_hash(LOCAL_CRATE.as_def_id()).stable_crate_id(),
required_panic_strategy: tcx.required_panic_strategy(LOCAL_CRATE),
panic_in_drop_strategy: tcx.sess.opts.unstable_opts.panic_in_drop,
reference_niches_policy: tcx.reference_niches_policy(LOCAL_CRATE),
edition: tcx.sess.edition(),
has_global_allocator: tcx.has_global_allocator(LOCAL_CRATE),
has_alloc_error_handler: tcx.has_alloc_error_handler(LOCAL_CRATE),

View File

@ -32,7 +32,7 @@ use rustc_span::edition::Edition;
use rustc_span::hygiene::{ExpnIndex, MacroKind};
use rustc_span::symbol::{Ident, Symbol};
use rustc_span::{self, ExpnData, ExpnHash, ExpnId, Span};
use rustc_target::abi::{FieldIdx, ReferenceNichePolicy, VariantIdx};
use rustc_target::abi::{FieldIdx, VariantIdx};
use rustc_target::spec::{PanicStrategy, TargetTriple};
use std::marker::PhantomData;
@ -251,7 +251,6 @@ pub(crate) struct CrateRoot {
stable_crate_id: StableCrateId,
required_panic_strategy: Option<PanicStrategy>,
panic_in_drop_strategy: PanicStrategy,
reference_niches_policy: ReferenceNichePolicy,
edition: Edition,
has_global_allocator: bool,
has_alloc_error_handler: bool,

View File

@ -388,6 +388,7 @@ pub enum ValidationErrorKind<'tcx> {
MutableRefInConst,
NullFnPtr,
NeverVal,
NullablePtrOutOfRange { range: WrappingRange, max_value: u128 },
PtrOutOfRange { range: WrappingRange, max_value: u128 },
OutOfRange { value: String, range: WrappingRange, max_value: u128 },
UnsafeCell,

View File

@ -19,19 +19,33 @@ pub trait PointerArithmetic: HasDataLayout {
#[inline(always)]
fn max_size_of_val(&self) -> Size {
Size::from_bytes(self.data_layout().target_isize_max())
Size::from_bytes(self.target_isize_max())
}
#[inline]
fn target_usize_max(&self) -> u64 {
self.pointer_size().unsigned_int_max().try_into().unwrap()
}
#[inline]
fn target_isize_min(&self) -> i64 {
self.pointer_size().signed_int_min().try_into().unwrap()
}
#[inline]
fn target_isize_max(&self) -> i64 {
self.pointer_size().signed_int_max().try_into().unwrap()
}
#[inline]
fn target_usize_to_isize(&self, val: u64) -> i64 {
let dl = self.data_layout();
let val = val as i64;
// Now wrap-around into the machine_isize range.
if val > dl.target_isize_max() {
if val > self.target_isize_max() {
// This can only happen if the ptr size is < 64, so we know max_usize_plus_1 fits into
// i64.
debug_assert!(dl.pointer_size.bits() < 64);
let max_usize_plus_1 = 1u128 << dl.pointer_size.bits();
debug_assert!(self.pointer_size().bits() < 64);
let max_usize_plus_1 = 1u128 << self.pointer_size().bits();
val - i64::try_from(max_usize_plus_1).unwrap()
} else {
val
@ -44,7 +58,7 @@ pub trait PointerArithmetic: HasDataLayout {
#[inline]
fn truncate_to_ptr(&self, (val, over): (u64, bool)) -> (u64, bool) {
let val = u128::from(val);
let max_ptr_plus_1 = 1u128 << self.data_layout().pointer_size.bits();
let max_ptr_plus_1 = 1u128 << self.pointer_size().bits();
(u64::try_from(val % max_ptr_plus_1).unwrap(), over || val >= max_ptr_plus_1)
}
@ -62,11 +76,11 @@ pub trait PointerArithmetic: HasDataLayout {
let n = i.unsigned_abs();
if i >= 0 {
let (val, over) = self.overflowing_offset(val, n);
(val, over || i > self.data_layout().target_isize_max())
(val, over || i > self.target_isize_max())
} else {
let res = val.overflowing_sub(n);
let (val, over) = self.truncate_to_ptr(res);
(val, over || i < self.data_layout().target_isize_min())
(val, over || i < self.target_isize_min())
}
}

View File

@ -111,11 +111,6 @@ impl EraseType
>()];
}
impl EraseType for Result<ty::layout::TyAndNaiveLayout<'_>, &ty::layout::LayoutError<'_>> {
type Result =
[u8; size_of::<Result<ty::layout::TyAndNaiveLayout<'_>, &ty::layout::LayoutError<'_>>>()];
}
impl EraseType for Result<ty::Const<'_>, mir::interpret::LitToConstError> {
type Result = [u8; size_of::<Result<ty::Const<'static>, mir::interpret::LitToConstError>>()];
}
@ -296,7 +291,6 @@ trivial! {
rustc_span::Symbol,
rustc_span::symbol::Ident,
rustc_target::spec::PanicStrategy,
rustc_target::abi::ReferenceNichePolicy,
rustc_type_ir::Variance,
u32,
usize,

View File

@ -1394,18 +1394,6 @@ rustc_queries! {
desc { "computing layout of `{}`", key.value }
}
/// Computes the naive layout approximation of a type. Note that this implicitly
/// executes in "reveal all" mode, and will normalize the input type.
///
/// Unlike `layout_of`, this doesn't look past references (beyond the `Pointee::Metadata`
/// projection), and as such can be called on generic types like `Option<&T>`.
query naive_layout_of(
key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>
) -> Result<ty::layout::TyAndNaiveLayout<'tcx>, &'tcx ty::layout::LayoutError<'tcx>> {
depth_limit
desc { "computing layout (naive) of `{}`", key.value }
}
/// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
///
/// NB: this doesn't handle virtual calls - those should use `fn_abi_of_instance`
@ -1481,11 +1469,6 @@ rustc_queries! {
desc { "getting a crate's configured panic-in-drop strategy" }
separate_provide_extern
}
query reference_niches_policy(_: CrateNum) -> abi::ReferenceNichePolicy {
fatal_cycle
desc { "getting a crate's policy for size and alignment niches of references" }
separate_provide_extern
}
query is_no_builtins(_: CrateNum) -> bool {
fatal_cycle
desc { "getting whether a crate has `#![no_builtins]`" }

View File

@ -313,16 +313,7 @@ impl<'tcx> SizeSkeleton<'tcx> {
) -> Result<SizeSkeleton<'tcx>, &'tcx LayoutError<'tcx>> {
debug_assert!(!ty.has_non_region_infer());
// First, try computing an exact naive layout (this covers simple types with generic
// references, where a full static layout would fail).
if let Ok(layout) = tcx.naive_layout_of(param_env.and(ty)) {
if layout.exact {
return Ok(SizeSkeleton::Known(layout.size));
}
}
// Second, try computing a full static layout (this covers cases when the naive layout
// wasn't smart enough, but cannot deal with generic references).
// First try computing a static layout.
let err = match tcx.layout_of(param_env.and(ty)) {
Ok(layout) => {
return Ok(SizeSkeleton::Known(layout.size));
@ -336,7 +327,6 @@ impl<'tcx> SizeSkeleton<'tcx> {
) => return Err(e),
};
// Third, fall back to ad-hoc cases.
match *ty.kind() {
ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
let non_zero = !ty.is_unsafe_ptr();
@ -631,219 +621,6 @@ impl<T, E> MaybeResult<T> for Result<T, E> {
pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
#[derive(Copy, Clone, Debug, HashStable)]
pub struct TyAndNaiveLayout<'tcx> {
pub ty: Ty<'tcx>,
pub layout: NaiveLayout,
}
impl std::ops::Deref for TyAndNaiveLayout<'_> {
type Target = NaiveLayout;
fn deref(&self) -> &Self::Target {
&self.layout
}
}
impl std::ops::DerefMut for TyAndNaiveLayout<'_> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.layout
}
}
/// Extremely simplified approximation of a type's layout returned by the
/// `naive_layout_of` query.
#[derive(Copy, Clone, Debug, HashStable)]
pub struct NaiveLayout {
pub abi: NaiveAbi,
/// Niche information, required for tracking non-null enum optimizations.
pub niches: NaiveNiches,
/// An underestimate of the layout's size.
pub size: Size,
/// An underestimate of the layout's required alignment.
pub align: Align,
/// If `true`, `size` and `align` must be exact values.
pub exact: bool,
}
#[derive(Copy, Clone, Debug, Eq, PartialEq, HashStable)]
pub enum NaiveNiches {
None,
Some,
Maybe,
}
#[derive(Copy, Clone, Debug, Eq, PartialEq, HashStable)]
pub enum NaiveAbi {
/// A scalar layout, always implies `exact` and a non-zero `size`.
Scalar(Primitive),
/// An uninhabited layout. (needed to properly track `Scalar` and niches)
Uninhabited,
/// An unsized aggregate. (needed to properly track `Scalar` and niches)
Unsized,
/// Any other sized layout.
Sized,
}
impl NaiveAbi {
#[inline]
pub fn as_aggregate(self) -> Self {
match self {
NaiveAbi::Scalar(_) => NaiveAbi::Sized,
_ => self,
}
}
}
impl NaiveLayout {
/// The layout of an empty aggregate, e.g. `()`.
pub const EMPTY: Self = Self {
size: Size::ZERO,
align: Align::ONE,
exact: true,
abi: NaiveAbi::Sized,
niches: NaiveNiches::None,
};
/// Returns whether `self` is a valid approximation of the given full `layout`.
///
/// This should always return `true` when both layouts are computed from the same type.
pub fn is_refined_by(&self, layout: Layout<'_>) -> bool {
if self.size > layout.size() || self.align > layout.align().abi {
return false;
}
if let NaiveAbi::Scalar(prim) = self.abi {
if !self.exact
|| self.size == Size::ZERO
|| !matches!(layout.abi(), Abi::Scalar(s) if s.primitive() == prim)
{
return false;
}
}
match (self.niches, layout.largest_niche()) {
(NaiveNiches::None, Some(_)) => return false,
(NaiveNiches::Some, None) => return false,
_ => (),
}
!self.exact || (self.size, self.align) == (layout.size(), layout.align().abi)
}
/// Returns if this layout is known to be pointer-like (`None` if uncertain)
///
/// See the corresponding `Layout::is_pointer_like` method.
pub fn is_pointer_like(&self, dl: &TargetDataLayout) -> Option<bool> {
match self.abi {
NaiveAbi::Scalar(_) => {
assert!(self.exact);
Some(self.size == dl.pointer_size && self.align == dl.pointer_align.abi)
}
NaiveAbi::Uninhabited | NaiveAbi::Unsized => Some(false),
NaiveAbi::Sized if self.exact => Some(false),
NaiveAbi::Sized => None,
}
}
/// Artificially lowers the alignment of this layout.
#[must_use]
#[inline]
pub fn packed(mut self, align: Align) -> Self {
if self.align > align {
self.align = align;
self.abi = self.abi.as_aggregate();
}
self
}
/// Artificially raises the alignment of this layout.
#[must_use]
#[inline]
pub fn align_to(mut self, align: Align) -> Self {
if align > self.align {
self.align = align;
self.abi = self.abi.as_aggregate();
}
self
}
/// Artificially makes this layout inexact.
#[must_use]
#[inline]
pub fn inexact(mut self) -> Self {
self.abi = self.abi.as_aggregate();
self.exact = false;
self
}
/// Pads this layout so that its size is a multiple of `align`.
#[must_use]
#[inline]
pub fn pad_to_align(mut self, align: Align) -> Self {
let new_size = self.size.align_to(align);
if new_size > self.size {
self.abi = self.abi.as_aggregate();
self.size = new_size;
}
self
}
/// Returns the layout of `self` immediately followed by `other`, without any
/// padding between them, as in a packed `struct` or tuple.
#[must_use]
#[inline]
pub fn concat(&self, other: &Self, dl: &TargetDataLayout) -> Option<Self> {
use NaiveAbi::*;
let size = self.size.checked_add(other.size, dl)?;
let align = cmp::max(self.align, other.align);
let exact = self.exact && other.exact;
let abi = match (self.abi, other.abi) {
// The uninhabited and unsized ABIs override everything.
(Uninhabited, _) | (_, Uninhabited) => Uninhabited,
(Unsized, _) | (_, Unsized) => Unsized,
// A scalar struct must have a single non ZST-field.
(_, s @ Scalar(_)) if exact && self.size == Size::ZERO => s,
(s @ Scalar(_), _) if exact && other.size == Size::ZERO => s,
// Default case.
(_, _) => Sized,
};
let niches = match (self.niches, other.niches) {
(NaiveNiches::Some, _) | (_, NaiveNiches::Some) => NaiveNiches::Some,
(NaiveNiches::None, NaiveNiches::None) => NaiveNiches::None,
(_, _) => NaiveNiches::Maybe,
};
Some(Self { abi, size, align, exact, niches })
}
/// Returns the layout of `self` superposed with `other`, as in an `enum`
/// or an `union`.
///
/// Note: This always ignore niche information from `other`.
#[must_use]
#[inline]
pub fn union(&self, other: &Self) -> Self {
use NaiveAbi::*;
let size = cmp::max(self.size, other.size);
let align = cmp::max(self.align, other.align);
let exact = self.exact && other.exact;
let abi = match (self.abi, other.abi) {
// The unsized ABI overrides everything.
(Unsized, _) | (_, Unsized) => Unsized,
// A scalar union must have a single non ZST-field...
(_, s @ Scalar(_)) if exact && self.size == Size::ZERO => s,
(s @ Scalar(_), _) if exact && other.size == Size::ZERO => s,
// ...or identical scalar fields.
(Scalar(s1), Scalar(s2)) if s1 == s2 => Scalar(s1),
// Default cases.
(Uninhabited, Uninhabited) => Uninhabited,
(_, _) => Sized,
};
Self { abi, size, align, exact, niches: self.niches }
}
}
/// Trait for contexts that want to be able to compute layouts of types.
/// This automatically gives access to `LayoutOf`, through a blanket `impl`.
pub trait LayoutOfHelpers<'tcx>: HasDataLayout + HasTyCtxt<'tcx> + HasParamEnv<'tcx> {
@ -896,19 +673,6 @@ pub trait LayoutOf<'tcx>: LayoutOfHelpers<'tcx> {
.map_err(|err| self.handle_layout_err(*err, span, ty)),
)
}
/// Computes the naive layout estimate of a type. Note that this implicitly
/// executes in "reveal all" mode, and will normalize the input type.
///
/// Unlike `layout_of`, this doesn't look past references (beyond the `Pointee::Metadata`
/// projection), and as such can be called on generic types like `Option<&T>`.
#[inline]
fn naive_layout_of(
&self,
ty: Ty<'tcx>,
) -> Result<TyAndNaiveLayout<'tcx>, &'tcx LayoutError<'tcx>> {
self.tcx().naive_layout_of(self.param_env().and(ty))
}
}
impl<'tcx, C: LayoutOfHelpers<'tcx>> LayoutOf<'tcx> for C {}
@ -1205,9 +969,6 @@ where
this: TyAndLayout<'tcx>,
cx: &C,
offset: Size,
// If true, assume that pointers are either null or valid (according to their type),
// enabling extra optimizations.
mut assume_valid_ptr: bool,
) -> Option<PointeeInfo> {
let tcx = cx.tcx();
let param_env = cx.param_env();
@ -1230,19 +991,19 @@ where
// Freeze/Unpin queries, and can save time in the codegen backend (noalias
// attributes in LLVM have compile-time cost even in unoptimized builds).
let optimize = tcx.sess.opts.optimize != OptLevel::No;
let safe = match (assume_valid_ptr, mt) {
(true, hir::Mutability::Not) => Some(PointerKind::SharedRef {
let kind = match mt {
hir::Mutability::Not => PointerKind::SharedRef {
frozen: optimize && ty.is_freeze(tcx, cx.param_env()),
}),
(true, hir::Mutability::Mut) => Some(PointerKind::MutableRef {
},
hir::Mutability::Mut => PointerKind::MutableRef {
unpin: optimize && ty.is_unpin(tcx, cx.param_env()),
}),
(false, _) => None,
},
};
tcx.layout_of(param_env.and(ty)).ok().map(|layout| PointeeInfo {
size: layout.size,
align: layout.align.abi,
safe,
safe: Some(kind),
})
}
@ -1251,21 +1012,19 @@ where
// Within the discriminant field, only the niche itself is
// always initialized, so we only check for a pointer at its
// offset.
//
// If the niche is a pointer, it's either valid (according
// to its type), or null (which the niche field's scalar
// validity range encodes). This allows using
// `dereferenceable_or_null` for e.g., `Option<&T>`, and
// this will continue to work as long as we don't start
// using more niches than just null (e.g., the first page of
// the address space, or unaligned pointers).
Variants::Multiple {
tag_encoding:
TagEncoding::Niche {
untagged_variant,
niche_variants: ref variants,
niche_start,
},
tag_encoding: TagEncoding::Niche { untagged_variant, .. },
tag_field,
..
} if this.fields.offset(tag_field) == offset => {
// We can only continue assuming pointer validity if the only possible
// discriminant value is null. The null special-case is permitted by LLVM's
// `dereferenceable_or_null`, and allow types like `Option<&T>` to benefit
// from optimizations.
assume_valid_ptr &= niche_start == 0 && variants.start() == variants.end();
Some(this.for_variant(cx, untagged_variant))
}
_ => Some(this),
@ -1291,12 +1050,9 @@ where
result = field.to_result().ok().and_then(|field| {
if ptr_end <= field_start + field.size {
// We found the right field, look inside it.
Self::ty_and_layout_pointee_info_at(
field,
cx,
offset - field_start,
assume_valid_ptr,
)
let field_info =
field.pointee_info_at(cx, offset - field_start);
field_info
} else {
None
}
@ -1311,7 +1067,7 @@ where
// FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
if let Some(ref mut pointee) = result {
if let ty::Adt(def, _) = this.ty.kind() {
if assume_valid_ptr && def.is_box() && offset.bytes() == 0 {
if def.is_box() && offset.bytes() == 0 {
let optimize = tcx.sess.opts.optimize != OptLevel::No;
pointee.safe = Some(PointerKind::Box {
unpin: optimize && this.ty.boxed_ty().is_unpin(tcx, cx.param_env()),

View File

@ -176,8 +176,7 @@ impl QueryJobId {
while let Some(id) = current_id {
let info = query_map.get(&id).unwrap();
// FIXME: This string comparison should probably not be done.
let query_name = format!("{:?}", info.query.dep_kind);
if query_name == "layout_of" || query_name == "naive_layout_of" {
if format!("{:?}", info.query.dep_kind) == "layout_of" {
depth += 1;
last_layout = Some((info.clone(), depth));
}

View File

@ -3117,7 +3117,6 @@ pub(crate) mod dep_tracking {
use rustc_feature::UnstableFeatures;
use rustc_span::edition::Edition;
use rustc_span::RealFileName;
use rustc_target::abi::ReferenceNichePolicy;
use rustc_target::spec::{CodeModel, MergeFunctions, PanicStrategy, RelocModel};
use rustc_target::spec::{
RelroLevel, SanitizerSet, SplitDebuginfo, StackProtector, TargetTriple, TlsModel,
@ -3213,7 +3212,6 @@ pub(crate) mod dep_tracking {
OomStrategy,
LanguageIdentifier,
TraitSolver,
ReferenceNichePolicy,
);
impl<T1, T2> DepTrackingHash for (T1, T2)

View File

@ -6,7 +6,6 @@ use crate::{lint, EarlyErrorHandler};
use rustc_data_structures::profiling::TimePassesFormat;
use rustc_errors::ColorConfig;
use rustc_errors::{LanguageIdentifier, TerminalUrl};
use rustc_target::abi::ReferenceNichePolicy;
use rustc_target::spec::{CodeModel, LinkerFlavorCli, MergeFunctions, PanicStrategy, SanitizerSet};
use rustc_target::spec::{
RelocModel, RelroLevel, SplitDebuginfo, StackProtector, TargetTriple, TlsModel,
@ -422,8 +421,6 @@ mod desc {
pub const parse_proc_macro_execution_strategy: &str =
"one of supported execution strategies (`same-thread`, or `cross-thread`)";
pub const parse_dump_solver_proof_tree: &str = "one of: `always`, `on-request`, `on-error`";
pub const parse_opt_reference_niches: &str =
"`null`, or a `,` separated combination of `size` or `align`";
}
mod parse {
@ -1256,31 +1253,6 @@ mod parse {
};
true
}
pub(crate) fn parse_opt_reference_niches(
slot: &mut Option<ReferenceNichePolicy>,
v: Option<&str>,
) -> bool {
let Some(s) = v else {
return false;
};
let slot = slot.get_or_insert_default();
if s == "null" {
return true;
}
for opt in s.split(",") {
match opt {
"size" => slot.size = true,
"align" => slot.align = true,
_ => return false,
}
}
true
}
}
options! {
@ -1729,8 +1701,6 @@ options! {
"enable queries of the dependency graph for regression testing (default: no)"),
randomize_layout: bool = (false, parse_bool, [TRACKED],
"randomize the layout of types (default: no)"),
reference_niches: Option<ReferenceNichePolicy> = (None, parse_opt_reference_niches, [TRACKED],
"override the set of discriminant niches that may be exposed by references"),
relax_elf_relocations: Option<bool> = (None, parse_opt_bool, [TRACKED],
"whether ELF relocations can be relaxed"),
relro_level: Option<RelroLevel> = (None, parse_relro_level, [TRACKED],

View File

@ -50,9 +50,6 @@ pub trait TyAbiInterface<'a, C>: Sized {
this: TyAndLayout<'a, Self>,
cx: &C,
offset: Size,
// If true, assume that pointers are either null or valid (according to their type),
// enabling extra optimizations.
assume_valid_ptr: bool,
) -> Option<PointeeInfo>;
fn is_adt(this: TyAndLayout<'a, Self>) -> bool;
fn is_never(this: TyAndLayout<'a, Self>) -> bool;
@ -79,8 +76,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
where
Ty: TyAbiInterface<'a, C>,
{
let assume_valid_ptr = true;
Ty::ty_and_layout_pointee_info_at(self, cx, offset, assume_valid_ptr)
Ty::ty_and_layout_pointee_info_at(self, cx, offset)
}
pub fn is_single_fp_element<C>(self, cx: &C) -> bool

View File

@ -223,20 +223,9 @@ impl<'tcx> assembly::GoalKind<'tcx> for TraitPredicate<'tcx> {
return ecx.evaluate_added_goals_and_make_canonical_response(Certainty::AMBIGUOUS);
}
// First, try computing an exact naive layout in case the type is generic.
let is_pointer_like = if let Ok(layout) = tcx.naive_layout_of(key) {
layout.is_pointer_like(&tcx.data_layout).unwrap_or_else(|| {
// Second, we fall back to full layout computation.
tcx.layout_of(key)
.ok()
.filter(|l| l.layout.is_pointer_like(&tcx.data_layout))
.is_some()
})
} else {
false
};
if is_pointer_like {
if let Ok(layout) = tcx.layout_of(key)
&& layout.layout.is_pointer_like(&tcx.data_layout)
{
// FIXME: We could make this faster by making a no-constraints response
ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
} else {

View File

@ -979,20 +979,9 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
return;
}
// First, try computing an exact naive layout in case the type is generic.
let is_pointer_like = if let Ok(layout) = tcx.naive_layout_of(key) {
layout.is_pointer_like(&tcx.data_layout).unwrap_or_else(|| {
// Second, we fall back to full layout computation.
tcx.layout_of(key)
.ok()
.filter(|l| l.layout.is_pointer_like(&tcx.data_layout))
.is_some()
})
} else {
false
};
if is_pointer_like {
if let Ok(layout) = tcx.layout_of(key)
&& layout.layout.is_pointer_like(&tcx.data_layout)
{
candidates.vec.push(BuiltinCandidate { has_nested: false });
}
}

View File

@ -3,7 +3,7 @@ use rustc_hir as hir;
use rustc_index::bit_set::BitSet;
use rustc_index::{IndexSlice, IndexVec};
use rustc_middle::mir::{GeneratorLayout, GeneratorSavedLocal};
use rustc_middle::query::{LocalCrate, Providers};
use rustc_middle::query::Providers;
use rustc_middle::ty::layout::{
IntegerExt, LayoutCx, LayoutError, LayoutOf, TyAndLayout, MAX_SIMD_LANES,
};
@ -24,28 +24,32 @@ use crate::errors::{
use crate::layout_sanity_check::sanity_check_layout;
pub fn provide(providers: &mut Providers) {
*providers = Providers { layout_of, reference_niches_policy, ..*providers };
*providers = Providers { layout_of, ..*providers };
}
#[instrument(skip(tcx), level = "debug")]
fn reference_niches_policy<'tcx>(tcx: TyCtxt<'tcx>, _: LocalCrate) -> ReferenceNichePolicy {
tcx.sess.opts.unstable_opts.reference_niches.unwrap_or(DEFAULT_REF_NICHES)
}
/// The reference niche policy for builtin types, and for types in
/// crates not specifying `-Z reference-niches`.
const DEFAULT_REF_NICHES: ReferenceNichePolicy = ReferenceNichePolicy { size: false, align: false };
#[instrument(skip(tcx, query), level = "debug")]
fn layout_of<'tcx>(
tcx: TyCtxt<'tcx>,
query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
) -> Result<TyAndLayout<'tcx>, &'tcx LayoutError<'tcx>> {
let (param_env, unnormalized_ty) = query.into_parts();
let (param_env, ty) = query.into_parts();
debug!(?ty);
let param_env = param_env.with_reveal_all_normalized(tcx);
// `naive_layout_of` takes care of normalizing the type.
let naive = tcx.naive_layout_of(query)?;
let ty = naive.ty;
let unnormalized_ty = ty;
// FIXME: We might want to have two different versions of `layout_of`:
// One that can be called after typecheck has completed and can use
// `normalize_erasing_regions` here and another one that can be called
// before typecheck has completed and uses `try_normalize_erasing_regions`.
let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
Ok(t) => t,
Err(normalization_error) => {
return Err(tcx
.arena
.alloc(LayoutError::NormalizationFailure(ty, normalization_error)));
}
};
if ty != unnormalized_ty {
// Ensure this layout is also cached for the normalized type.
@ -53,11 +57,13 @@ fn layout_of<'tcx>(
}
let cx = LayoutCx { tcx, param_env };
let layout = layout_of_uncached(&cx, ty)?;
let layout = layout_of_uncached(&cx, ty)?;
let layout = TyAndLayout { ty, layout };
record_layout_for_printing(&cx, layout);
sanity_check_layout(&cx, &layout, &naive);
sanity_check_layout(&cx, &layout);
Ok(layout)
}
@ -77,10 +83,12 @@ fn univariant_uninterned<'tcx>(
kind: StructKind,
) -> Result<LayoutS, &'tcx LayoutError<'tcx>> {
let dl = cx.data_layout();
assert!(
!(repr.pack.is_some() && repr.align.is_some()),
"already rejected by `naive_layout_of`"
);
let pack = repr.pack;
if pack.is_some() && repr.align.is_some() {
cx.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
return Err(cx.tcx.arena.alloc(LayoutError::Unknown(ty)));
}
cx.univariant(dl, fields, repr, kind).ok_or_else(|| error(cx, LayoutError::SizeOverflow(ty)))
}
@ -138,35 +146,75 @@ fn layout_of_uncached<'tcx>(
ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
let mut data_ptr = scalar_unit(Pointer(AddressSpace::DATA));
if !ty.is_unsafe_ptr() {
// Calling `layout_of` here would cause a query cycle for recursive types;
// so use a conservative estimate that doesn't look past references.
let naive = cx.naive_layout_of(pointee)?.layout;
data_ptr.valid_range_mut().start = 1;
}
let niches = match *pointee.kind() {
ty::FnDef(def, ..)
| ty::Foreign(def)
| ty::Generator(def, ..)
| ty::Closure(def, ..) => tcx.reference_niches_policy(def.krate),
ty::Adt(def, _) => tcx.reference_niches_policy(def.did().krate),
_ => DEFAULT_REF_NICHES,
let pointee = tcx.normalize_erasing_regions(param_env, pointee);
if pointee.is_sized(tcx, param_env) {
return Ok(tcx.mk_layout(LayoutS::scalar(cx, data_ptr)));
}
let metadata = if let Some(metadata_def_id) = tcx.lang_items().metadata_type()
// Projection eagerly bails out when the pointee references errors,
// fall back to structurally deducing metadata.
&& !pointee.references_error()
{
let pointee_metadata = Ty::new_projection(tcx,metadata_def_id, [pointee]);
let metadata_ty = match tcx.try_normalize_erasing_regions(
param_env,
pointee_metadata,
) {
Ok(metadata_ty) => metadata_ty,
Err(mut err) => {
// Usually `<Ty as Pointee>::Metadata` can't be normalized because
// its struct tail cannot be normalized either, so try to get a
// more descriptive layout error here, which will lead to less confusing
// diagnostics.
match tcx.try_normalize_erasing_regions(
param_env,
tcx.struct_tail_without_normalization(pointee),
) {
Ok(_) => {},
Err(better_err) => {
err = better_err;
}
}
return Err(error(cx, LayoutError::NormalizationFailure(pointee, err)));
},
};
let (min_addr, max_addr) = dl.address_range_for(
if niches.size { naive.size } else { Size::ZERO },
if niches.align { naive.align } else { Align::ONE },
);
let metadata_layout = cx.layout_of(metadata_ty)?;
// If the metadata is a 1-zst, then the pointer is thin.
if metadata_layout.is_zst() && metadata_layout.align.abi.bytes() == 1 {
return Ok(tcx.mk_layout(LayoutS::scalar(cx, data_ptr)));
}
*data_ptr.valid_range_mut() =
WrappingRange { start: min_addr.into(), end: max_addr.into() };
}
let Abi::Scalar(metadata) = metadata_layout.abi else {
return Err(error(cx, LayoutError::Unknown(pointee)));
};
if let Some(metadata) = ptr_metadata_scalar(cx, pointee)? {
// Effectively a (ptr, meta) tuple.
tcx.mk_layout(cx.scalar_pair(data_ptr, metadata))
metadata
} else {
// No metadata, this is a thin pointer.
tcx.mk_layout(LayoutS::scalar(cx, data_ptr))
}
let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
match unsized_part.kind() {
ty::Foreign(..) => {
return Ok(tcx.mk_layout(LayoutS::scalar(cx, data_ptr)));
}
ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
ty::Dynamic(..) => {
let mut vtable = scalar_unit(Pointer(AddressSpace::DATA));
vtable.valid_range_mut().start = 1;
vtable
}
_ => {
return Err(error(cx, LayoutError::Unknown(pointee)));
}
}
};
// Effectively a (ptr, meta) tuple.
tcx.mk_layout(cx.scalar_pair(data_ptr, metadata))
}
ty::Dynamic(_, _, ty::DynStar) => {
@ -178,8 +226,16 @@ fn layout_of_uncached<'tcx>(
}
// Arrays and slices.
ty::Array(element, count) => {
let count = compute_array_count(cx, count)
ty::Array(element, mut count) => {
if count.has_projections() {
count = tcx.normalize_erasing_regions(param_env, count);
if count.has_projections() {
return Err(error(cx, LayoutError::Unknown(ty)));
}
}
let count = count
.try_eval_target_usize(tcx, param_env)
.ok_or_else(|| error(cx, LayoutError::Unknown(ty)))?;
let element = cx.layout_of(element)?;
let size = element
@ -502,106 +558,22 @@ fn layout_of_uncached<'tcx>(
}
// Types with no meaningful known layout.
ty::Alias(..)
| ty::Bound(..)
| ty::GeneratorWitness(..)
| ty::GeneratorWitnessMIR(..)
| ty::Infer(_)
| ty::Placeholder(..)
| ty::Param(_)
| ty::Error(_) => {
unreachable!("already rejected by `naive_layout_of`");
ty::Alias(..) => {
// NOTE(eddyb) `layout_of` query should've normalized these away,
// if that was possible, so there's no reason to try again here.
return Err(error(cx, LayoutError::Unknown(ty)));
}
ty::Bound(..) | ty::GeneratorWitness(..) | ty::GeneratorWitnessMIR(..) | ty::Infer(_) => {
bug!("Layout::compute: unexpected type `{}`", ty)
}
ty::Placeholder(..) | ty::Param(_) | ty::Error(_) => {
return Err(error(cx, LayoutError::Unknown(ty)));
}
})
}
pub(crate) fn compute_array_count<'tcx>(
cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
mut count: ty::Const<'tcx>,
) -> Option<u64> {
let LayoutCx { tcx, param_env } = *cx;
if count.has_projections() {
count = tcx.normalize_erasing_regions(param_env, count);
if count.has_projections() {
return None;
}
}
count.try_eval_target_usize(tcx, param_env)
}
pub(crate) fn ptr_metadata_scalar<'tcx>(
cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
pointee: Ty<'tcx>,
) -> Result<Option<Scalar>, &'tcx LayoutError<'tcx>> {
let dl = cx.data_layout();
let scalar_unit = |value: Primitive| {
let size = value.size(dl);
assert!(size.bits() <= 128);
Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
};
let LayoutCx { tcx, param_env } = *cx;
let pointee = tcx.normalize_erasing_regions(param_env, pointee);
if pointee.is_sized(tcx, param_env) {
return Ok(None);
}
if let Some(metadata_def_id) = tcx.lang_items().metadata_type()
// Projection eagerly bails out when the pointee references errors,
// fall back to structurally deducing metadata.
&& !pointee.references_error()
{
let pointee_metadata = Ty::new_projection(tcx,metadata_def_id, [pointee]);
let metadata_ty = match tcx.try_normalize_erasing_regions(
param_env,
pointee_metadata,
) {
Ok(metadata_ty) => metadata_ty,
Err(mut err) => {
// Usually `<Ty as Pointee>::Metadata` can't be normalized because
// its struct tail cannot be normalized either, so try to get a
// more descriptive layout error here, which will lead to less confusing
// diagnostics.
match tcx.try_normalize_erasing_regions(
param_env,
tcx.struct_tail_without_normalization(pointee),
) {
Ok(_) => {},
Err(better_err) => {
err = better_err;
}
}
return Err(error(cx, LayoutError::NormalizationFailure(pointee, err)));
},
};
let metadata_layout = cx.layout_of(metadata_ty)?;
if metadata_layout.is_zst() && metadata_layout.align.abi.bytes() == 1 {
Ok(None) // If the metadata is a 1-zst, then the pointer is thin.
} else if let Abi::Scalar(metadata) = metadata_layout.abi {
Ok(Some(metadata))
} else {
Err(error(cx, LayoutError::Unknown(pointee)))
}
} else {
let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
match unsized_part.kind() {
ty::Foreign(..) => Ok(None),
ty::Slice(_) | ty::Str => Ok(Some(scalar_unit(Int(dl.ptr_sized_integer(), false)))),
ty::Dynamic(..) => {
let mut vtable = scalar_unit(Pointer(AddressSpace::DATA));
vtable.valid_range_mut().start = 1;
Ok(Some(vtable))
}
_ => Err(error(cx, LayoutError::Unknown(pointee))),
}
}
}
/// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
#[derive(Clone, Debug, PartialEq)]
enum SavedLocalEligibility {

View File

@ -1,322 +0,0 @@
use rustc_middle::query::Providers;
use rustc_middle::ty::layout::{
IntegerExt, LayoutCx, LayoutError, LayoutOf, NaiveAbi, NaiveLayout, NaiveNiches,
TyAndNaiveLayout,
};
use rustc_middle::ty::{self, ReprOptions, Ty, TyCtxt, TypeVisitableExt};
use rustc_span::DUMMY_SP;
use rustc_target::abi::*;
use std::ops::Bound;
use crate::layout::{compute_array_count, ptr_metadata_scalar};
pub fn provide(providers: &mut Providers) {
*providers = Providers { naive_layout_of, ..*providers };
}
#[instrument(skip(tcx, query), level = "debug")]
fn naive_layout_of<'tcx>(
tcx: TyCtxt<'tcx>,
query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
) -> Result<TyAndNaiveLayout<'tcx>, &'tcx LayoutError<'tcx>> {
let (param_env, ty) = query.into_parts();
debug!(?ty);
let param_env = param_env.with_reveal_all_normalized(tcx);
let unnormalized_ty = ty;
// FIXME: We might want to have two different versions of `layout_of`:
// One that can be called after typecheck has completed and can use
// `normalize_erasing_regions` here and another one that can be called
// before typecheck has completed and uses `try_normalize_erasing_regions`.
let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
Ok(t) => t,
Err(normalization_error) => {
return Err(tcx
.arena
.alloc(LayoutError::NormalizationFailure(ty, normalization_error)));
}
};
if ty != unnormalized_ty {
// Ensure this layout is also cached for the normalized type.
return tcx.naive_layout_of(param_env.and(ty));
}
let cx = LayoutCx { tcx, param_env };
let layout = naive_layout_of_uncached(&cx, ty)?;
Ok(TyAndNaiveLayout { ty, layout })
}
fn error<'tcx>(
cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
err: LayoutError<'tcx>,
) -> &'tcx LayoutError<'tcx> {
cx.tcx.arena.alloc(err)
}
fn naive_layout_of_uncached<'tcx>(
cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
ty: Ty<'tcx>,
) -> Result<NaiveLayout, &'tcx LayoutError<'tcx>> {
let tcx = cx.tcx;
let dl = cx.data_layout();
let scalar = |niched: bool, value: Primitive| NaiveLayout {
abi: NaiveAbi::Scalar(value),
niches: if niched { NaiveNiches::Some } else { NaiveNiches::None },
size: value.size(dl),
align: value.align(dl).abi,
exact: true,
};
let univariant = |fields: &mut dyn Iterator<Item = Ty<'tcx>>,
repr: &ReprOptions|
-> Result<NaiveLayout, &'tcx LayoutError<'tcx>> {
if repr.pack.is_some() && repr.align.is_some() {
cx.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
return Err(error(cx, LayoutError::Unknown(ty)));
}
let linear = repr.inhibit_struct_field_reordering_opt();
let pack = repr.pack.unwrap_or(Align::MAX);
let mut layout = NaiveLayout::EMPTY;
for field in fields {
let field = cx.naive_layout_of(field)?.packed(pack);
if linear {
layout = layout.pad_to_align(field.align);
}
layout = layout
.concat(&field, dl)
.ok_or_else(|| error(cx, LayoutError::SizeOverflow(ty)))?;
}
if let Some(align) = repr.align {
layout = layout.align_to(align);
}
if linear {
layout.abi = layout.abi.as_aggregate();
}
Ok(layout.pad_to_align(layout.align))
};
debug_assert!(!ty.has_non_region_infer());
Ok(match *ty.kind() {
// Basic scalars
ty::Bool => scalar(true, Int(I8, false)),
ty::Char => scalar(true, Int(I32, false)),
ty::Int(ity) => scalar(false, Int(Integer::from_int_ty(dl, ity), true)),
ty::Uint(ity) => scalar(false, Int(Integer::from_uint_ty(dl, ity), false)),
ty::Float(fty) => scalar(
false,
match fty {
ty::FloatTy::F32 => F32,
ty::FloatTy::F64 => F64,
},
),
ty::FnPtr(_) => scalar(true, Pointer(dl.instruction_address_space)),
// The never type.
ty::Never => NaiveLayout { abi: NaiveAbi::Uninhabited, ..NaiveLayout::EMPTY },
// Potentially-wide pointers.
ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
let data_ptr = scalar(!ty.is_unsafe_ptr(), Pointer(AddressSpace::DATA));
if let Some(metadata) = ptr_metadata_scalar(cx, pointee)? {
// Effectively a (ptr, meta) tuple.
let meta = scalar(!metadata.is_always_valid(dl), metadata.primitive());
let l = data_ptr
.concat(&meta, dl)
.ok_or_else(|| error(cx, LayoutError::SizeOverflow(ty)))?;
l.pad_to_align(l.align)
} else {
// No metadata, this is a thin pointer.
data_ptr
}
}
ty::Dynamic(_, _, ty::DynStar) => {
let ptr = scalar(false, Pointer(AddressSpace::DATA));
let vtable = scalar(true, Pointer(AddressSpace::DATA));
ptr.concat(&vtable, dl).ok_or_else(|| error(cx, LayoutError::SizeOverflow(ty)))?
}
// Arrays and slices.
ty::Array(element, count) => {
let count = compute_array_count(cx, count)
.ok_or_else(|| error(cx, LayoutError::Unknown(ty)))?;
let element = cx.naive_layout_of(element)?;
NaiveLayout {
abi: element.abi.as_aggregate(),
size: element
.size
.checked_mul(count, cx)
.ok_or_else(|| error(cx, LayoutError::SizeOverflow(ty)))?,
niches: if count == 0 { NaiveNiches::None } else { element.niches },
..*element
}
}
ty::Slice(element) => NaiveLayout {
abi: NaiveAbi::Unsized,
size: Size::ZERO,
niches: NaiveNiches::None,
..*cx.naive_layout_of(element)?
},
ty::FnDef(..) => NaiveLayout::EMPTY,
// Unsized types.
ty::Str | ty::Dynamic(_, _, ty::Dyn) | ty::Foreign(..) => {
NaiveLayout { abi: NaiveAbi::Unsized, ..NaiveLayout::EMPTY }
}
// FIXME(reference_niches): try to actually compute a reasonable layout estimate,
// without duplicating too much code from `generator_layout`.
ty::Generator(..) => {
NaiveLayout { exact: false, niches: NaiveNiches::Maybe, ..NaiveLayout::EMPTY }
}
ty::Closure(_, ref substs) => {
univariant(&mut substs.as_closure().upvar_tys(), &ReprOptions::default())?
}
ty::Tuple(tys) => univariant(&mut tys.iter(), &ReprOptions::default())?,
ty::Adt(def, substs) if def.is_union() => {
assert_eq!(def.variants().len(), 1, "union should have a single variant");
let repr = def.repr();
let pack = repr.pack.unwrap_or(Align::MAX);
if repr.pack.is_some() && repr.align.is_some() {
cx.tcx.sess.delay_span_bug(DUMMY_SP, "union cannot be packed and aligned");
return Err(error(cx, LayoutError::Unknown(ty)));
}
let mut layout = NaiveLayout {
// Unions never have niches.
niches: NaiveNiches::None,
..NaiveLayout::EMPTY
};
for f in &def.variants()[FIRST_VARIANT].fields {
let field = cx.naive_layout_of(f.ty(tcx, substs))?;
layout = layout.union(&field.packed(pack));
}
// Unions are always inhabited, and never scalar if `repr(C)`.
if !matches!(layout.abi, NaiveAbi::Scalar(_)) || repr.inhibit_enum_layout_opt() {
layout.abi = NaiveAbi::Sized;
}
if let Some(align) = repr.align {
layout = layout.align_to(align);
}
layout.pad_to_align(layout.align)
}
ty::Adt(def, substs) => {
let repr = def.repr();
let mut layout = NaiveLayout {
// An ADT with no inhabited variants should have an uninhabited ABI.
abi: NaiveAbi::Uninhabited,
..NaiveLayout::EMPTY
};
let mut empty_variants = 0;
for v in def.variants() {
let mut fields = v.fields.iter().map(|f| f.ty(tcx, substs));
let vlayout = univariant(&mut fields, &repr)?;
if vlayout.size == Size::ZERO && vlayout.exact {
empty_variants += 1;
} else {
// Remember the niches of the last seen variant.
layout.niches = vlayout.niches;
}
layout = layout.union(&vlayout);
}
if def.is_enum() {
let may_need_discr = match def.variants().len() {
0 | 1 => false,
// Simple Option-like niche optimization.
// Handling this special case allows enums like `Option<&T>`
// to be recognized as `PointerLike` and to be transmutable
// in generic contexts.
2 if empty_variants == 1 && layout.niches == NaiveNiches::Some => {
layout.niches = NaiveNiches::Maybe; // fill up the niche.
false
}
_ => true,
};
if may_need_discr || repr.inhibit_enum_layout_opt() {
// For simplicity, assume that the discriminant always get niched.
// This will be wrong in many cases, which will cause the size (and
// sometimes the alignment) to be underestimated.
// FIXME(reference_niches): Be smarter here.
layout.niches = NaiveNiches::Maybe;
layout = layout.inexact();
}
} else {
assert_eq!(def.variants().len(), 1, "struct should have a single variant");
// We don't compute exact alignment for SIMD structs.
if repr.simd() {
layout = layout.inexact();
}
// `UnsafeCell` hides all niches.
if def.is_unsafe_cell() {
layout.niches = NaiveNiches::None;
}
}
let valid_range = tcx.layout_scalar_valid_range(def.did());
if valid_range != (Bound::Unbounded, Bound::Unbounded) {
let get = |bound, default| match bound {
Bound::Unbounded => default,
Bound::Included(v) => v,
Bound::Excluded(_) => bug!("exclusive `layout_scalar_valid_range` bound"),
};
let valid_range = WrappingRange {
start: get(valid_range.0, 0),
// FIXME: this is wrong for scalar-pair ABIs. Fortunately, the
// only type this could currently affect is`NonNull<T: !Sized>`,
// and the `NaiveNiches` result still ends up correct.
end: get(valid_range.1, layout.size.unsigned_int_max()),
};
assert!(
valid_range.is_in_range_for(layout.size),
"`layout_scalar_valid_range` values are out of bounds",
);
if !valid_range.is_full_for(layout.size) {
layout.niches = NaiveNiches::Some;
}
}
layout.pad_to_align(layout.align)
}
// Types with no meaningful known layout.
ty::Alias(..) => {
// NOTE(eddyb) `layout_of` query should've normalized these away,
// if that was possible, so there's no reason to try again here.
return Err(error(cx, LayoutError::Unknown(ty)));
}
ty::Bound(..) | ty::GeneratorWitness(..) | ty::GeneratorWitnessMIR(..) | ty::Infer(_) => {
bug!("Layout::compute: unexpected type `{}`", ty)
}
ty::Placeholder(..) | ty::Param(_) | ty::Error(_) => {
return Err(error(cx, LayoutError::Unknown(ty)));
}
})
}

View File

@ -1,5 +1,5 @@
use rustc_middle::ty::{
layout::{LayoutCx, NaiveLayout, TyAndLayout},
layout::{LayoutCx, TyAndLayout},
TyCtxt,
};
use rustc_target::abi::*;
@ -10,7 +10,6 @@ use std::assert_matches::assert_matches;
pub(super) fn sanity_check_layout<'tcx>(
cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
layout: &TyAndLayout<'tcx>,
naive: &NaiveLayout,
) {
// Type-level uninhabitedness should always imply ABI uninhabitedness.
if layout.ty.is_privately_uninhabited(cx.tcx, cx.param_env) {
@ -21,10 +20,6 @@ pub(super) fn sanity_check_layout<'tcx>(
bug!("size is not a multiple of align, in the following layout:\n{layout:#?}");
}
if !naive.is_refined_by(layout.layout) {
bug!("the naive layout isn't refined by the actual layout:\n{:#?}\n{:#?}", naive, layout);
}
if !cfg!(debug_assertions) {
// Stop here, the rest is kind of expensive.
return;

View File

@ -31,7 +31,6 @@ mod errors;
mod implied_bounds;
pub mod instance;
mod layout;
mod layout_naive;
mod layout_sanity_check;
mod needs_drop;
mod opaque_types;
@ -48,7 +47,6 @@ pub fn provide(providers: &mut Providers) {
consts::provide(providers);
implied_bounds::provide(providers);
layout::provide(providers);
layout_naive::provide(providers);
needs_drop::provide(providers);
opaque_types::provide(providers);
representability::provide(providers);

View File

@ -207,7 +207,7 @@ impl<'mir, 'tcx> GlobalStateInner {
.checked_add(max(size.bytes(), 1))
.ok_or_else(|| err_exhaust!(AddressSpaceFull))?;
// Even if `Size` didn't overflow, we might still have filled up the address space.
if global_state.next_base_addr > ecx.data_layout().target_usize_max() {
if global_state.next_base_addr > ecx.target_usize_max() {
throw_exhaust!(AddressSpaceFull);
}
// Given that `next_base_addr` increases in each allocation, pushing the

View File

@ -21,7 +21,6 @@ use log::trace;
use rustc_middle::{mir, ty};
use rustc_target::spec::abi::Abi;
use rustc_target::abi::HasDataLayout as _;
use crate::*;
use helpers::check_arg_count;
@ -109,8 +108,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
}
// Return error result (usize::MAX), and jump to caller.
let usize_max = this.data_layout().target_usize_max();
this.write_scalar(Scalar::from_target_usize(usize_max, this), dest)?;
this.write_scalar(Scalar::from_target_usize(this.target_usize_max(), this), dest)?;
this.go_to_block(ret);
Ok(true)
}

View File

@ -12,7 +12,7 @@ use log::trace;
use rustc_data_structures::fx::FxHashMap;
use rustc_middle::ty::TyCtxt;
use rustc_target::abi::{Align, Size, HasDataLayout as _};
use rustc_target::abi::{Align, Size};
use crate::shims::os_str::bytes_to_os_str;
use crate::*;
@ -753,7 +753,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
// We cap the number of read bytes to the largest value that we are able to fit in both the
// host's and target's `isize`. This saves us from having to handle overflows later.
let count = count
.min(u64::try_from(this.data_layout().target_isize_max()).unwrap())
.min(u64::try_from(this.target_isize_max()).unwrap())
.min(u64::try_from(isize::MAX).unwrap());
let communicate = this.machine.communicate();
@ -807,7 +807,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
// We cap the number of written bytes to the largest value that we are able to fit in both the
// host's and target's `isize`. This saves us from having to handle overflows later.
let count = count
.min(u64::try_from(this.data_layout().target_isize_max()).unwrap())
.min(u64::try_from(this.target_isize_max()).unwrap())
.min(u64::try_from(isize::MAX).unwrap());
let communicate = this.machine.communicate();

View File

@ -1,5 +1,5 @@
//@error-in-other-file: a cycle occurred during layout computation
//~^ ERROR: cycle detected when computing layout (naive) of
//~^ ERROR: cycle detected when computing layout of
use std::mem;

View File

@ -1,8 +1,7 @@
error[E0391]: cycle detected when computing layout (naive) of `S<S<()>>`
error[E0391]: cycle detected when computing layout of `S<S<()>>`
|
= note: ...which requires computing layout (naive) of `<S<()> as Tr>::I`...
= note: ...which again requires computing layout (naive) of `S<S<()>>`, completing the cycle
= note: cycle used when computing layout of `S<S<()>>`
= note: ...which requires computing layout of `<S<()> as Tr>::I`...
= note: ...which again requires computing layout of `S<S<()>>`, completing the cycle
= note: see https://rustc-dev-guide.rust-lang.org/overview.html#queries and https://rustc-dev-guide.rust-lang.org/query.html for more information
error: post-monomorphization error: a cycle occurred during layout computation

View File

@ -15,8 +15,7 @@ note: ...which requires const-evaluating + checking `Foo::bytes::{constant#0}`..
LL | bytes: [u8; std::mem::size_of::<Foo>()]
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
= note: ...which requires computing layout of `Foo`...
= note: ...which requires computing layout (naive) of `Foo`...
= note: ...which requires computing layout (naive) of `[u8; std::mem::size_of::<Foo>()]`...
= note: ...which requires computing layout of `[u8; std::mem::size_of::<Foo>()]`...
= note: ...which requires normalizing `[u8; std::mem::size_of::<Foo>()]`...
= note: ...which again requires evaluating type-level constant, completing the cycle
note: cycle used when checking that `Foo` is well-formed

View File

@ -15,8 +15,7 @@ note: ...which requires const-evaluating + checking `Foo::bytes::{constant#0}`..
LL | bytes: [u8; unsafe { intrinsics::size_of::<Foo>() }],
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
= note: ...which requires computing layout of `Foo`...
= note: ...which requires computing layout (naive) of `Foo`...
= note: ...which requires computing layout (naive) of `[u8; unsafe { intrinsics::size_of::<Foo>() }]`...
= note: ...which requires computing layout of `[u8; unsafe { intrinsics::size_of::<Foo>() }]`...
= note: ...which requires normalizing `[u8; unsafe { intrinsics::size_of::<Foo>() }]`...
= note: ...which again requires evaluating type-level constant, completing the cycle
note: cycle used when checking that `Foo` is well-formed

View File

@ -9,7 +9,7 @@ note: ...which requires type-checking `make_dyn_star`...
|
LL | fn make_dyn_star<'a, T: PointerLike + Debug + 'a>(t: T) -> impl PointerLike + Debug + 'a {
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
= note: ...which requires computing layout (naive) of `make_dyn_star::{opaque#0}`...
= note: ...which requires computing layout of `make_dyn_star::{opaque#0}`...
= note: ...which requires normalizing `make_dyn_star::{opaque#0}`...
= note: ...which again requires computing type of `make_dyn_star::{opaque#0}`, completing the cycle
note: cycle used when checking item types in top-level module

View File

@ -1,6 +1,5 @@
// run-pass
#![allow(dead_code)]
#![recursion_limit = "129"]
// Making sure that no overflow occurs.

View File

@ -1,6 +1,6 @@
error: the compiler unexpectedly panicked. this is a bug.
query stack during panic:
#0 [naive_layout_of] computing layout (naive) of `Foo`
#1 [layout_of] computing layout of `Foo`
#0 [layout_of] computing layout of `Foo`
#1 [eval_to_allocation_raw] const-evaluating + checking `FOO`
end of query stack

View File

@ -34,7 +34,8 @@ LL | let _val: Wrap<&'static T> = mem::zeroed();
| this code causes undefined behavior when executed
| help: use `MaybeUninit<T>` instead, and only call `assume_init` after initialization is done
|
note: references must be non-null (in this struct field)
= note: `Wrap<&T>` must be non-null
note: because references must be non-null (in this struct field)
--> $DIR/invalid_value.rs:17:18
|
LL | struct Wrap<T> { wrapped: T }
@ -49,7 +50,8 @@ LL | let _val: Wrap<&'static T> = mem::uninitialized();
| this code causes undefined behavior when executed
| help: use `MaybeUninit<T>` instead, and only call `assume_init` after initialization is done
|
note: references must be non-null (in this struct field)
= note: `Wrap<&T>` must be non-null
note: because references must be non-null (in this struct field)
--> $DIR/invalid_value.rs:17:18
|
LL | struct Wrap<T> { wrapped: T }

View File

@ -1,9 +1,9 @@
//~ ERROR cycle detected when computing layout (naive) of `core::option::Option<S>`
//~ ERROR cycle detected when computing layout of `core::option::Option<S>`
//~| NOTE see https://rustc-dev-guide.rust-lang.org/overview.html#queries and https://rustc-dev-guide.rust-lang.org/query.html for more information
//~| NOTE ...which requires computing layout (naive) of `S`...
//~| NOTE ...which requires computing layout (naive) of `core::option::Option<<S as Mirror>::It>`...
//~| NOTE ...which again requires computing layout (naive) of `core::option::Option<S>`, completing the cycle
//~| NOTE cycle used when computing layout (naive) of `core::option::Option<<S as Mirror>::It>`
//~| NOTE ...which requires computing layout of `S`...
//~| NOTE ...which requires computing layout of `core::option::Option<<S as Mirror>::It>`...
//~| NOTE ...which again requires computing layout of `core::option::Option<S>`, completing the cycle
//~| NOTE cycle used when computing layout of `core::option::Option<<S as Mirror>::It>`
trait Mirror {
type It: ?Sized;

View File

@ -1,9 +1,9 @@
error[E0391]: cycle detected when computing layout (naive) of `core::option::Option<S>`
error[E0391]: cycle detected when computing layout of `core::option::Option<S>`
|
= note: ...which requires computing layout (naive) of `S`...
= note: ...which requires computing layout (naive) of `core::option::Option<<S as Mirror>::It>`...
= note: ...which again requires computing layout (naive) of `core::option::Option<S>`, completing the cycle
= note: cycle used when computing layout (naive) of `core::option::Option<<S as Mirror>::It>`
= note: ...which requires computing layout of `S`...
= note: ...which requires computing layout of `core::option::Option<<S as Mirror>::It>`...
= note: ...which again requires computing layout of `core::option::Option<S>`, completing the cycle
= note: cycle used when computing layout of `core::option::Option<<S as Mirror>::It>`
= note: see https://rustc-dev-guide.rust-lang.org/overview.html#queries and https://rustc-dev-guide.rust-lang.org/query.html for more information
error: aborting due to previous error

View File

@ -1,4 +1,4 @@
//~ ERROR queries overflow the depth limit!
//~ ERROR overflow evaluating the requirement `&mut Self: DispatchFromDyn<&mut RustaceansAreAwesome>
//~| HELP consider increasing the recursion limit
// build-fail

View File

@ -1,7 +1,7 @@
error: queries overflow the depth limit!
error[E0275]: overflow evaluating the requirement `&mut Self: DispatchFromDyn<&mut RustaceansAreAwesome>`
|
= help: consider increasing the recursion limit by adding a `#![recursion_limit = "2"]` attribute to your crate (`zero_overflow`)
= note: query depth increased by 2 when computing layout of `()`
error: aborting due to previous error
For more information about this error, try `rustc --explain E0275`.

View File

@ -1,5 +1,5 @@
// build-fail
//~^ ERROR cycle detected when computing layout (naive) of `Foo<()>`
//~^ ERROR cycle detected when computing layout of `Foo<()>`
trait A { type Assoc: ?Sized; }

View File

@ -1,8 +1,12 @@
error[E0391]: cycle detected when computing layout (naive) of `Foo<()>`
error[E0391]: cycle detected when computing layout of `Foo<()>`
|
= note: ...which requires computing layout (naive) of `<() as A>::Assoc`...
= note: ...which again requires computing layout (naive) of `Foo<()>`, completing the cycle
= note: cycle used when computing layout of `Foo<()>`
= note: ...which requires computing layout of `<() as A>::Assoc`...
= note: ...which again requires computing layout of `Foo<()>`, completing the cycle
note: cycle used when elaborating drops for `main`
--> $DIR/recursive-type-2.rs:11:1
|
LL | fn main() {
| ^^^^^^^^^
= note: see https://rustc-dev-guide.rust-lang.org/overview.html#queries and https://rustc-dev-guide.rust-lang.org/query.html for more information
error: aborting due to previous error

View File

@ -30,16 +30,4 @@ fn f<T, U: ?Sized>(x: &T) -> &U {
unsafe { transmute(x) } //~ ERROR cannot transmute between types of different sizes
}
fn g<T, U>(x: &T) -> Option<&U> {
unsafe { transmute(x) }
}
fn h<T>(x: &[T]) -> Option<&dyn Send> {
unsafe { transmute(x) }
}
fn i<T>(x: [usize; 1]) -> Option<&'static T> {
unsafe { transmute(x) }
}
fn main() { }

View File

@ -9,7 +9,7 @@ note: ...which requires type-checking `CONST_BUG`...
|
LL | const CONST_BUG: Bug<u8, ()> = unsafe { std::mem::transmute(|_: u8| ()) };
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
= note: ...which requires computing layout (naive) of `Bug<u8, ()>`...
= note: ...which requires computing layout of `Bug<u8, ()>`...
= note: ...which requires normalizing `Bug<u8, ()>`...
= note: ...which again requires computing type of `Bug::{opaque#0}`, completing the cycle
note: cycle used when checking item types in top-level module