Auto merge of #94527 - oli-obk:undef_scalars, r=nagisa,erikdesjardin

Let CTFE to handle partially uninitialized unions without marking the entire value as uninitialized.

follow up to #94411

To fix https://github.com/rust-lang/rust/issues/69488 and by extension fix https://github.com/rust-lang/rust/issues/94371, we should stop treating types like `MaybeUninit<usize>` as something that the `Scalar` type in the interpreter engine can represent. So we add a new field to `abi::Primitive` that records whether the primitive is nested in a union

cc `@RalfJung`

r? `@ghost`
This commit is contained in:
bors 2022-04-05 16:46:13 +00:00
commit f262ca12aa
42 changed files with 466 additions and 333 deletions

View File

@ -21,7 +21,7 @@ pub(crate) fn pointer_ty(tcx: TyCtxt<'_>) -> types::Type {
} }
pub(crate) fn scalar_to_clif_type(tcx: TyCtxt<'_>, scalar: Scalar) -> Type { pub(crate) fn scalar_to_clif_type(tcx: TyCtxt<'_>, scalar: Scalar) -> Type {
match scalar.value { match scalar.primitive() {
Primitive::Int(int, _sign) => match int { Primitive::Int(int, _sign) => match int {
Integer::I8 => types::I8, Integer::I8 => types::I8,
Integer::I16 => types::I16, Integer::I16 => types::I16,

View File

@ -105,7 +105,7 @@ pub(crate) fn codegen_get_discriminant<'tcx>(
// Decode the discriminant (specifically if it's niche-encoded). // Decode the discriminant (specifically if it's niche-encoded).
match *tag_encoding { match *tag_encoding {
TagEncoding::Direct => { TagEncoding::Direct => {
let signed = match tag_scalar.value { let signed = match tag_scalar.primitive() {
Int(_, signed) => signed, Int(_, signed) => signed,
_ => false, _ => false,
}; };

View File

@ -50,7 +50,7 @@ fn codegen_field<'tcx>(
} }
fn scalar_pair_calculate_b_offset(tcx: TyCtxt<'_>, a_scalar: Scalar, b_scalar: Scalar) -> Offset32 { fn scalar_pair_calculate_b_offset(tcx: TyCtxt<'_>, a_scalar: Scalar, b_scalar: Scalar) -> Offset32 {
let b_offset = a_scalar.value.size(&tcx).align_to(b_scalar.value.align(&tcx).abi); let b_offset = a_scalar.size(&tcx).align_to(b_scalar.align(&tcx).abi);
Offset32::new(b_offset.bytes().try_into().unwrap()) Offset32::new(b_offset.bytes().try_into().unwrap())
} }

View File

@ -694,11 +694,11 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
} }
fn scalar_load_metadata<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, load: RValue<'gcc>, scalar: &abi::Scalar) { fn scalar_load_metadata<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, load: RValue<'gcc>, scalar: &abi::Scalar) {
let vr = scalar.valid_range.clone(); let vr = scalar.valid_range(bx);
match scalar.value { match scalar.primitive() {
abi::Int(..) => { abi::Int(..) => {
if !scalar.is_always_valid(bx) { if !scalar.is_always_valid(bx) {
bx.range_metadata(load, scalar.valid_range); bx.range_metadata(load, vr);
} }
} }
abi::Pointer if vr.start < vr.end && !vr.contains(0) => { abi::Pointer if vr.start < vr.end && !vr.contains(0) => {
@ -720,7 +720,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
OperandValue::Immediate(self.to_immediate(load, place.layout)) OperandValue::Immediate(self.to_immediate(load, place.layout))
} }
else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi { else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
let b_offset = a.value.size(self).align_to(b.value.align(self).abi); let b_offset = a.size(self).align_to(b.align(self).abi);
let pair_type = place.layout.gcc_type(self, false); let pair_type = place.layout.gcc_type(self, false);
let mut load = |i, scalar: &abi::Scalar, align| { let mut load = |i, scalar: &abi::Scalar, align| {

View File

@ -158,14 +158,14 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
} }
fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, ty: Type<'gcc>) -> RValue<'gcc> { fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, ty: Type<'gcc>) -> RValue<'gcc> {
let bitsize = if layout.is_bool() { 1 } else { layout.value.size(self).bits() }; let bitsize = if layout.is_bool() { 1 } else { layout.size(self).bits() };
match cv { match cv {
Scalar::Int(ScalarInt::ZST) => { Scalar::Int(ScalarInt::ZST) => {
assert_eq!(0, layout.value.size(self).bytes()); assert_eq!(0, layout.size(self).bytes());
self.const_undef(self.type_ix(0)) self.const_undef(self.type_ix(0))
} }
Scalar::Int(int) => { Scalar::Int(int) => {
let data = int.assert_bits(layout.value.size(self)); let data = int.assert_bits(layout.size(self));
// FIXME(antoyo): there's some issues with using the u128 code that follows, so hard-code // FIXME(antoyo): there's some issues with using the u128 code that follows, so hard-code
// the paths for floating-point values. // the paths for floating-point values.
@ -209,7 +209,7 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
let base_addr = self.const_bitcast(base_addr, self.usize_type); let base_addr = self.const_bitcast(base_addr, self.usize_type);
let offset = self.context.new_rvalue_from_long(self.usize_type, offset.bytes() as i64); let offset = self.context.new_rvalue_from_long(self.usize_type, offset.bytes() as i64);
let ptr = self.const_bitcast(base_addr + offset, ptr_type); let ptr = self.const_bitcast(base_addr + offset, ptr_type);
if layout.value != Pointer { if layout.primitive() != Pointer {
self.const_bitcast(ptr.dereference(None).to_rvalue(), ty) self.const_bitcast(ptr.dereference(None).to_rvalue(), ty)
} }
else { else {

View File

@ -328,7 +328,7 @@ pub fn const_alloc_to_gcc<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, alloc: ConstAl
interpret::Pointer::new(alloc_id, Size::from_bytes(ptr_offset)), interpret::Pointer::new(alloc_id, Size::from_bytes(ptr_offset)),
&cx.tcx, &cx.tcx,
), ),
abi::Scalar { value: Primitive::Pointer, valid_range: WrappingRange { start: 0, end: !0 } }, abi::Scalar::Initialized { value: Primitive::Pointer, valid_range: WrappingRange::full(dl.pointer_size) },
cx.type_i8p(), cx.type_i8p(),
)); ));
next_offset = offset + pointer_size; next_offset = offset + pointer_size;

View File

@ -224,7 +224,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
} }
fn scalar_gcc_type_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, scalar: &abi::Scalar, offset: Size) -> Type<'gcc> { fn scalar_gcc_type_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, scalar: &abi::Scalar, offset: Size) -> Type<'gcc> {
match scalar.value { match scalar.primitive() {
Int(i, true) => cx.type_from_integer(i), Int(i, true) => cx.type_from_integer(i),
Int(i, false) => cx.type_from_unsigned_integer(i), Int(i, false) => cx.type_from_unsigned_integer(i),
F32 => cx.type_f32(), F32 => cx.type_f32(),
@ -282,7 +282,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
Size::ZERO Size::ZERO
} }
else { else {
a.value.size(cx).align_to(b.value.align(cx).abi) a.size(cx).align_to(b.align(cx).abi)
}; };
self.scalar_gcc_type_at(cx, scalar, offset) self.scalar_gcc_type_at(cx, scalar, offset)
} }

View File

@ -510,9 +510,9 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
// If the value is a boolean, the range is 0..2 and that ultimately // If the value is a boolean, the range is 0..2 and that ultimately
// become 0..0 when the type becomes i1, which would be rejected // become 0..0 when the type becomes i1, which would be rejected
// by the LLVM verifier. // by the LLVM verifier.
if let Int(..) = scalar.value { if let Int(..) = scalar.primitive() {
if !scalar.is_bool() && !scalar.is_always_valid(bx) { if !scalar.is_bool() && !scalar.is_always_valid(bx) {
bx.range_metadata(callsite, scalar.valid_range); bx.range_metadata(callsite, scalar.valid_range(bx));
} }
} }
} }

View File

@ -753,7 +753,7 @@ fn dummy_output_type<'ll>(cx: &CodegenCx<'ll, '_>, reg: InlineAsmRegClass) -> &'
/// Helper function to get the LLVM type for a Scalar. Pointers are returned as /// Helper function to get the LLVM type for a Scalar. Pointers are returned as
/// the equivalent integer type. /// the equivalent integer type.
fn llvm_asm_scalar_type<'ll>(cx: &CodegenCx<'ll, '_>, scalar: Scalar) -> &'ll Type { fn llvm_asm_scalar_type<'ll>(cx: &CodegenCx<'ll, '_>, scalar: Scalar) -> &'ll Type {
match scalar.value { match scalar.primitive() {
Primitive::Int(Integer::I8, _) => cx.type_i8(), Primitive::Int(Integer::I8, _) => cx.type_i8(),
Primitive::Int(Integer::I16, _) => cx.type_i16(), Primitive::Int(Integer::I16, _) => cx.type_i16(),
Primitive::Int(Integer::I32, _) => cx.type_i32(), Primitive::Int(Integer::I32, _) => cx.type_i32(),
@ -774,7 +774,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
) -> &'ll Value { ) -> &'ll Value {
match (reg, layout.abi) { match (reg, layout.abi) {
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => { (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
if let Primitive::Int(Integer::I8, _) = s.value { if let Primitive::Int(Integer::I8, _) = s.primitive() {
let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8); let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8);
bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0)) bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
} else { } else {
@ -785,7 +785,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
let elem_ty = llvm_asm_scalar_type(bx.cx, s); let elem_ty = llvm_asm_scalar_type(bx.cx, s);
let count = 16 / layout.size.bytes(); let count = 16 / layout.size.bytes();
let vec_ty = bx.cx.type_vector(elem_ty, count); let vec_ty = bx.cx.type_vector(elem_ty, count);
if let Primitive::Pointer = s.value { if let Primitive::Pointer = s.primitive() {
value = bx.ptrtoint(value, bx.cx.type_isize()); value = bx.ptrtoint(value, bx.cx.type_isize());
} }
bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0)) bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
@ -800,7 +800,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices)) bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
} }
(InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s)) (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
if s.value == Primitive::F64 => if s.primitive() == Primitive::F64 =>
{ {
bx.bitcast(value, bx.cx.type_i64()) bx.bitcast(value, bx.cx.type_i64())
} }
@ -812,7 +812,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
Abi::Scalar(s), Abi::Scalar(s),
) => { ) => {
if let Primitive::Int(Integer::I32, _) = s.value { if let Primitive::Int(Integer::I32, _) = s.primitive() {
bx.bitcast(value, bx.cx.type_f32()) bx.bitcast(value, bx.cx.type_f32())
} else { } else {
value value
@ -826,19 +826,21 @@ fn llvm_fixup_input<'ll, 'tcx>(
), ),
Abi::Scalar(s), Abi::Scalar(s),
) => { ) => {
if let Primitive::Int(Integer::I64, _) = s.value { if let Primitive::Int(Integer::I64, _) = s.primitive() {
bx.bitcast(value, bx.cx.type_f64()) bx.bitcast(value, bx.cx.type_f64())
} else { } else {
value value
} }
} }
(InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => match s.value { (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
// MIPS only supports register-length arithmetics. match s.primitive() {
Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()), // MIPS only supports register-length arithmetics.
Primitive::F32 => bx.bitcast(value, bx.cx.type_i32()), Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()),
Primitive::F64 => bx.bitcast(value, bx.cx.type_i64()), Primitive::F32 => bx.bitcast(value, bx.cx.type_i32()),
_ => value, Primitive::F64 => bx.bitcast(value, bx.cx.type_i64()),
}, _ => value,
}
}
_ => value, _ => value,
} }
} }
@ -852,7 +854,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
) -> &'ll Value { ) -> &'ll Value {
match (reg, layout.abi) { match (reg, layout.abi) {
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => { (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
if let Primitive::Int(Integer::I8, _) = s.value { if let Primitive::Int(Integer::I8, _) = s.primitive() {
bx.extract_element(value, bx.const_i32(0)) bx.extract_element(value, bx.const_i32(0))
} else { } else {
value value
@ -860,7 +862,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
} }
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => { (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => {
value = bx.extract_element(value, bx.const_i32(0)); value = bx.extract_element(value, bx.const_i32(0));
if let Primitive::Pointer = s.value { if let Primitive::Pointer = s.primitive() {
value = bx.inttoptr(value, layout.llvm_type(bx.cx)); value = bx.inttoptr(value, layout.llvm_type(bx.cx));
} }
value value
@ -875,7 +877,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices)) bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
} }
(InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s)) (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
if s.value == Primitive::F64 => if s.primitive() == Primitive::F64 =>
{ {
bx.bitcast(value, bx.cx.type_f64()) bx.bitcast(value, bx.cx.type_f64())
} }
@ -887,7 +889,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
Abi::Scalar(s), Abi::Scalar(s),
) => { ) => {
if let Primitive::Int(Integer::I32, _) = s.value { if let Primitive::Int(Integer::I32, _) = s.primitive() {
bx.bitcast(value, bx.cx.type_i32()) bx.bitcast(value, bx.cx.type_i32())
} else { } else {
value value
@ -901,20 +903,22 @@ fn llvm_fixup_output<'ll, 'tcx>(
), ),
Abi::Scalar(s), Abi::Scalar(s),
) => { ) => {
if let Primitive::Int(Integer::I64, _) = s.value { if let Primitive::Int(Integer::I64, _) = s.primitive() {
bx.bitcast(value, bx.cx.type_i64()) bx.bitcast(value, bx.cx.type_i64())
} else { } else {
value value
} }
} }
(InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => match s.value { (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
// MIPS only supports register-length arithmetics. match s.primitive() {
Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()), // MIPS only supports register-length arithmetics.
Primitive::Int(Integer::I16, _) => bx.trunc(value, bx.cx.type_i16()), Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()),
Primitive::F32 => bx.bitcast(value, bx.cx.type_f32()), Primitive::Int(Integer::I16, _) => bx.trunc(value, bx.cx.type_i16()),
Primitive::F64 => bx.bitcast(value, bx.cx.type_f64()), Primitive::F32 => bx.bitcast(value, bx.cx.type_f32()),
_ => value, Primitive::F64 => bx.bitcast(value, bx.cx.type_f64()),
}, _ => value,
}
}
_ => value, _ => value,
} }
} }
@ -927,7 +931,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
) -> &'ll Type { ) -> &'ll Type {
match (reg, layout.abi) { match (reg, layout.abi) {
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => { (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
if let Primitive::Int(Integer::I8, _) = s.value { if let Primitive::Int(Integer::I8, _) = s.primitive() {
cx.type_vector(cx.type_i8(), 8) cx.type_vector(cx.type_i8(), 8)
} else { } else {
layout.llvm_type(cx) layout.llvm_type(cx)
@ -946,7 +950,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
cx.type_vector(elem_ty, count * 2) cx.type_vector(elem_ty, count * 2)
} }
(InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s)) (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
if s.value == Primitive::F64 => if s.primitive() == Primitive::F64 =>
{ {
cx.type_i64() cx.type_i64()
} }
@ -958,7 +962,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
Abi::Scalar(s), Abi::Scalar(s),
) => { ) => {
if let Primitive::Int(Integer::I32, _) = s.value { if let Primitive::Int(Integer::I32, _) = s.primitive() {
cx.type_f32() cx.type_f32()
} else { } else {
layout.llvm_type(cx) layout.llvm_type(cx)
@ -972,19 +976,21 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
), ),
Abi::Scalar(s), Abi::Scalar(s),
) => { ) => {
if let Primitive::Int(Integer::I64, _) = s.value { if let Primitive::Int(Integer::I64, _) = s.primitive() {
cx.type_f64() cx.type_f64()
} else { } else {
layout.llvm_type(cx) layout.llvm_type(cx)
} }
} }
(InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => match s.value { (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
// MIPS only supports register-length arithmetics. match s.primitive() {
Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(), // MIPS only supports register-length arithmetics.
Primitive::F32 => cx.type_i32(), Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(),
Primitive::F64 => cx.type_i64(), Primitive::F32 => cx.type_i32(),
_ => layout.llvm_type(cx), Primitive::F64 => cx.type_i64(),
}, _ => layout.llvm_type(cx),
}
}
_ => layout.llvm_type(cx), _ => layout.llvm_type(cx),
} }
} }

View File

@ -484,14 +484,14 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
bx.noundef_metadata(load); bx.noundef_metadata(load);
} }
match scalar.value { match scalar.primitive() {
abi::Int(..) => { abi::Int(..) => {
if !scalar.is_always_valid(bx) { if !scalar.is_always_valid(bx) {
bx.range_metadata(load, scalar.valid_range); bx.range_metadata(load, scalar.valid_range(bx));
} }
} }
abi::Pointer => { abi::Pointer => {
if !scalar.valid_range.contains(0) { if !scalar.valid_range(bx).contains(0) {
bx.nonnull_metadata(load); bx.nonnull_metadata(load);
} }
@ -525,7 +525,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
}); });
OperandValue::Immediate(self.to_immediate(llval, place.layout)) OperandValue::Immediate(self.to_immediate(llval, place.layout))
} else if let abi::Abi::ScalarPair(a, b) = place.layout.abi { } else if let abi::Abi::ScalarPair(a, b) = place.layout.abi {
let b_offset = a.value.size(self).align_to(b.value.align(self).abi); let b_offset = a.size(self).align_to(b.align(self).abi);
let pair_ty = place.layout.llvm_type(self); let pair_ty = place.layout.llvm_type(self);
let mut load = |i, scalar: abi::Scalar, layout, align, offset| { let mut load = |i, scalar: abi::Scalar, layout, align, offset| {

View File

@ -221,16 +221,16 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
} }
fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, llty: &'ll Type) -> &'ll Value { fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, llty: &'ll Type) -> &'ll Value {
let bitsize = if layout.is_bool() { 1 } else { layout.value.size(self).bits() }; let bitsize = if layout.is_bool() { 1 } else { layout.size(self).bits() };
match cv { match cv {
Scalar::Int(ScalarInt::ZST) => { Scalar::Int(ScalarInt::ZST) => {
assert_eq!(0, layout.value.size(self).bytes()); assert_eq!(0, layout.size(self).bytes());
self.const_undef(self.type_ix(0)) self.const_undef(self.type_ix(0))
} }
Scalar::Int(int) => { Scalar::Int(int) => {
let data = int.assert_bits(layout.value.size(self)); let data = int.assert_bits(layout.size(self));
let llval = self.const_uint_big(self.type_ix(bitsize), data); let llval = self.const_uint_big(self.type_ix(bitsize), data);
if layout.value == Pointer { if layout.primitive() == Pointer {
unsafe { llvm::LLVMConstIntToPtr(llval, llty) } unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
} else { } else {
self.const_bitcast(llval, llty) self.const_bitcast(llval, llty)
@ -269,7 +269,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
1, 1,
) )
}; };
if layout.value != Pointer { if layout.primitive() != Pointer {
unsafe { llvm::LLVMConstPtrToInt(llval, llty) } unsafe { llvm::LLVMConstPtrToInt(llval, llty) }
} else { } else {
self.const_bitcast(llval, llty) self.const_bitcast(llval, llty)

View File

@ -109,7 +109,10 @@ pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation<
Pointer::new(alloc_id, Size::from_bytes(ptr_offset)), Pointer::new(alloc_id, Size::from_bytes(ptr_offset)),
&cx.tcx, &cx.tcx,
), ),
Scalar { value: Primitive::Pointer, valid_range: WrappingRange { start: 0, end: !0 } }, Scalar::Initialized {
value: Primitive::Pointer,
valid_range: WrappingRange::full(dl.pointer_size),
},
cx.type_i8p_ext(address_space), cx.type_i8p_ext(address_space),
)); ));
next_offset = offset + pointer_size; next_offset = offset + pointer_size;

View File

@ -118,7 +118,7 @@ fn tag_base_type<'ll, 'tcx>(
Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, tag, .. } => { Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, tag, .. } => {
// Niche tags are always normalized to unsized integers of the correct size. // Niche tags are always normalized to unsized integers of the correct size.
match tag.value { match tag.primitive() {
Primitive::Int(t, _) => t, Primitive::Int(t, _) => t,
Primitive::F32 => Integer::I32, Primitive::F32 => Integer::I32,
Primitive::F64 => Integer::I64, Primitive::F64 => Integer::I64,
@ -136,7 +136,7 @@ fn tag_base_type<'ll, 'tcx>(
Variants::Multiple { tag_encoding: TagEncoding::Direct, tag, .. } => { Variants::Multiple { tag_encoding: TagEncoding::Direct, tag, .. } => {
// Direct tags preserve the sign. // Direct tags preserve the sign.
tag.value.to_ty(cx.tcx) tag.primitive().to_ty(cx.tcx)
} }
} }
} }
@ -425,7 +425,7 @@ fn compute_discriminant_value<'ll, 'tcx>(
let value = (variant_index.as_u32() as u128) let value = (variant_index.as_u32() as u128)
.wrapping_sub(niche_variants.start().as_u32() as u128) .wrapping_sub(niche_variants.start().as_u32() as u128)
.wrapping_add(niche_start); .wrapping_add(niche_start);
let value = tag.value.size(cx).truncate(value); let value = tag.size(cx).truncate(value);
// NOTE(eddyb) do *NOT* remove this assert, until // NOTE(eddyb) do *NOT* remove this assert, until
// we pass the full 128-bit value to LLVM, otherwise // we pass the full 128-bit value to LLVM, otherwise
// truncation will be silent and remain undetected. // truncation will be silent and remain undetected.

View File

@ -134,7 +134,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
sym::va_arg => { sym::va_arg => {
match fn_abi.ret.layout.abi { match fn_abi.ret.layout.abi {
abi::Abi::Scalar(scalar) => { abi::Abi::Scalar(scalar) => {
match scalar.value { match scalar.primitive() {
Primitive::Int(..) => { Primitive::Int(..) => {
if self.cx().size_of(ret_ty).bytes() < 4 { if self.cx().size_of(ret_ty).bytes() < 4 {
// `va_arg` should not be called on an integer type // `va_arg` should not be called on an integer type

View File

@ -309,7 +309,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
scalar: Scalar, scalar: Scalar,
offset: Size, offset: Size,
) -> &'a Type { ) -> &'a Type {
match scalar.value { match scalar.primitive() {
Int(i, _) => cx.type_from_integer(i), Int(i, _) => cx.type_from_integer(i),
F32 => cx.type_f32(), F32 => cx.type_f32(),
F64 => cx.type_f64(), F64 => cx.type_f64(),
@ -362,8 +362,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
return cx.type_i1(); return cx.type_i1();
} }
let offset = let offset = if index == 0 { Size::ZERO } else { a.size(cx).align_to(b.align(cx).abi) };
if index == 0 { Size::ZERO } else { a.value.size(cx).align_to(b.value.align(cx).abi) };
self.scalar_llvm_type_at(cx, scalar, offset) self.scalar_llvm_type_at(cx, scalar, offset)
} }

View File

@ -464,13 +464,13 @@ fn push_debuginfo_type_name<'tcx>(
// calculate the range of values for the dataful variant // calculate the range of values for the dataful variant
let dataful_discriminant_range = let dataful_discriminant_range =
dataful_variant_layout.largest_niche().unwrap().scalar.valid_range; dataful_variant_layout.largest_niche().unwrap().valid_range;
let min = dataful_discriminant_range.start; let min = dataful_discriminant_range.start;
let min = tag.value.size(&tcx).truncate(min); let min = tag.size(&tcx).truncate(min);
let max = dataful_discriminant_range.end; let max = dataful_discriminant_range.end;
let max = tag.value.size(&tcx).truncate(max); let max = tag.size(&tcx).truncate(max);
let dataful_variant_name = variant_name(*dataful_variant); let dataful_variant_name = variant_name(*dataful_variant);
write!(output, ", {}, {}, {}", min, max, dataful_variant_name).unwrap(); write!(output, ", {}, {}, {}", min, max, dataful_variant_name).unwrap();

View File

@ -1572,7 +1572,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
match (src.layout.abi, dst.layout.abi) { match (src.layout.abi, dst.layout.abi) {
(abi::Abi::Scalar(src_scalar), abi::Abi::Scalar(dst_scalar)) => { (abi::Abi::Scalar(src_scalar), abi::Abi::Scalar(dst_scalar)) => {
// HACK(eddyb) LLVM doesn't like `bitcast`s between pointers and non-pointers. // HACK(eddyb) LLVM doesn't like `bitcast`s between pointers and non-pointers.
if (src_scalar.value == abi::Pointer) == (dst_scalar.value == abi::Pointer) { if (src_scalar.primitive() == abi::Pointer)
== (dst_scalar.primitive() == abi::Pointer)
{
assert_eq!(src.layout.size, dst.layout.size); assert_eq!(src.layout.size, dst.layout.size);
// NOTE(eddyb) the `from_immediate` and `to_immediate_scalar` // NOTE(eddyb) the `from_immediate` and `to_immediate_scalar`

View File

@ -207,11 +207,11 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
// Extract a scalar component from a pair. // Extract a scalar component from a pair.
(OperandValue::Pair(a_llval, b_llval), Abi::ScalarPair(a, b)) => { (OperandValue::Pair(a_llval, b_llval), Abi::ScalarPair(a, b)) => {
if offset.bytes() == 0 { if offset.bytes() == 0 {
assert_eq!(field.size, a.value.size(bx.cx())); assert_eq!(field.size, a.size(bx.cx()));
OperandValue::Immediate(a_llval) OperandValue::Immediate(a_llval)
} else { } else {
assert_eq!(offset, a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi)); assert_eq!(offset, a.size(bx.cx()).align_to(b.align(bx.cx()).abi));
assert_eq!(field.size, b.value.size(bx.cx())); assert_eq!(field.size, b.size(bx.cx()));
OperandValue::Immediate(b_llval) OperandValue::Immediate(b_llval)
} }
} }
@ -316,7 +316,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
bug!("store_with_flags: invalid ScalarPair layout: {:#?}", dest.layout); bug!("store_with_flags: invalid ScalarPair layout: {:#?}", dest.layout);
}; };
let ty = bx.backend_type(dest.layout); let ty = bx.backend_type(dest.layout);
let b_offset = a_scalar.value.size(bx).align_to(b_scalar.value.align(bx).abi); let b_offset = a_scalar.size(bx).align_to(b_scalar.align(bx).abi);
let llptr = bx.struct_gep(ty, dest.llval, 0); let llptr = bx.struct_gep(ty, dest.llval, 0);
let val = bx.from_immediate(a); let val = bx.from_immediate(a);

View File

@ -100,7 +100,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
self.llval self.llval
} }
Abi::ScalarPair(a, b) Abi::ScalarPair(a, b)
if offset == a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi) => if offset == a.size(bx.cx()).align_to(b.align(bx.cx()).abi) =>
{ {
// Offset matches second field. // Offset matches second field.
let ty = bx.backend_type(self.layout); let ty = bx.backend_type(self.layout);
@ -234,7 +234,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
// Decode the discriminant (specifically if it's niche-encoded). // Decode the discriminant (specifically if it's niche-encoded).
match *tag_encoding { match *tag_encoding {
TagEncoding::Direct => { TagEncoding::Direct => {
let signed = match tag_scalar.value { let signed = match tag_scalar.primitive() {
// We use `i1` for bytes that are always `0` or `1`, // We use `i1` for bytes that are always `0` or `1`,
// e.g., `#[repr(i8)] enum E { A, B }`, but we can't // e.g., `#[repr(i8)] enum E { A, B }`, but we can't
// let LLVM interpret the `i1` as signed, because // let LLVM interpret the `i1` as signed, because

View File

@ -299,7 +299,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let mut signed = false; let mut signed = false;
if let Abi::Scalar(scalar) = operand.layout.abi { if let Abi::Scalar(scalar) = operand.layout.abi {
if let Int(_, s) = scalar.value { if let Int(_, s) = scalar.primitive() {
// We use `i1` for bytes that are always `0` or `1`, // We use `i1` for bytes that are always `0` or `1`,
// e.g., `#[repr(i8)] enum E { A, B }`, but we can't // e.g., `#[repr(i8)] enum E { A, B }`, but we can't
// let LLVM interpret the `i1` as signed, because // let LLVM interpret the `i1` as signed, because
@ -307,15 +307,17 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
signed = !scalar.is_bool() && s; signed = !scalar.is_bool() && s;
if !scalar.is_always_valid(bx.cx()) if !scalar.is_always_valid(bx.cx())
&& scalar.valid_range.end >= scalar.valid_range.start && scalar.valid_range(bx.cx()).end
>= scalar.valid_range(bx.cx()).start
{ {
// We want `table[e as usize ± k]` to not // We want `table[e as usize ± k]` to not
// have bound checks, and this is the most // have bound checks, and this is the most
// convenient place to put the `assume`s. // convenient place to put the `assume`s.
if scalar.valid_range.start > 0 { if scalar.valid_range(bx.cx()).start > 0 {
let enum_value_lower_bound = bx let enum_value_lower_bound = bx.cx().const_uint_big(
.cx() ll_t_in,
.const_uint_big(ll_t_in, scalar.valid_range.start); scalar.valid_range(bx.cx()).start,
);
let cmp_start = bx.icmp( let cmp_start = bx.icmp(
IntPredicate::IntUGE, IntPredicate::IntUGE,
llval, llval,
@ -324,8 +326,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bx.assume(cmp_start); bx.assume(cmp_start);
} }
let enum_value_upper_bound = let enum_value_upper_bound = bx
bx.cx().const_uint_big(ll_t_in, scalar.valid_range.end); .cx()
.const_uint_big(ll_t_in, scalar.valid_range(bx.cx()).end);
let cmp_end = bx.icmp( let cmp_end = bx.icmp(
IntPredicate::IntULE, IntPredicate::IntULE,
llval, llval,

View File

@ -15,7 +15,7 @@ use rustc_middle::ty::layout::LayoutOf;
use rustc_middle::ty::print::with_no_trimmed_paths; use rustc_middle::ty::print::with_no_trimmed_paths;
use rustc_middle::ty::{self, subst::Subst, TyCtxt}; use rustc_middle::ty::{self, subst::Subst, TyCtxt};
use rustc_span::source_map::Span; use rustc_span::source_map::Span;
use rustc_target::abi::Abi; use rustc_target::abi::{self, Abi};
use std::borrow::Cow; use std::borrow::Cow;
use std::convert::TryInto; use std::convert::TryInto;
@ -118,7 +118,7 @@ pub(super) fn op_to_const<'tcx>(
// the usual cases of extracting e.g. a `usize`, without there being a real use case for the // the usual cases of extracting e.g. a `usize`, without there being a real use case for the
// `Undef` situation. // `Undef` situation.
let try_as_immediate = match op.layout.abi { let try_as_immediate = match op.layout.abi {
Abi::Scalar(..) => true, Abi::Scalar(abi::Scalar::Initialized { .. }) => true,
Abi::ScalarPair(..) => match op.layout.ty.kind() { Abi::ScalarPair(..) => match op.layout.ty.kind() {
ty::Ref(_, inner, _) => match *inner.kind() { ty::Ref(_, inner, _) => match *inner.kind() {
ty::Slice(elem) => elem == ecx.tcx.types.u8, ty::Slice(elem) => elem == ecx.tcx.types.u8,

View File

@ -188,7 +188,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let val = self.read_scalar(&args[0])?.check_init()?; let val = self.read_scalar(&args[0])?.check_init()?;
let bits = val.to_bits(layout_of.size)?; let bits = val.to_bits(layout_of.size)?;
let kind = match layout_of.abi { let kind = match layout_of.abi {
Abi::Scalar(scalar) => scalar.value, Abi::Scalar(scalar) => scalar.primitive(),
_ => span_bug!( _ => span_bug!(
self.cur_span(), self.cur_span(),
"{} called on invalid type {:?}", "{} called on invalid type {:?}",

View File

@ -10,7 +10,7 @@ use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout};
use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter, Printer}; use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter, Printer};
use rustc_middle::ty::{ConstInt, DelaySpanBugEmitted, Ty}; use rustc_middle::ty::{ConstInt, DelaySpanBugEmitted, Ty};
use rustc_middle::{mir, ty}; use rustc_middle::{mir, ty};
use rustc_target::abi::{Abi, HasDataLayout, Size, TagEncoding}; use rustc_target::abi::{self, Abi, HasDataLayout, Size, TagEncoding};
use rustc_target::abi::{VariantIdx, Variants}; use rustc_target::abi::{VariantIdx, Variants};
use super::{ use super::{
@ -265,16 +265,24 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
})); }));
}; };
// It may seem like all types with `Scalar` or `ScalarPair` ABI are fair game at this point.
// However, `MaybeUninit<u64>` is considered a `Scalar` as far as its layout is concerned --
// and yet cannot be represented by an interpreter `Scalar`, since we have to handle the
// case where some of the bytes are initialized and others are not. So, we need an extra
// check that walks over the type of `mplace` to make sure it is truly correct to treat this
// like a `Scalar` (or `ScalarPair`).
match mplace.layout.abi { match mplace.layout.abi {
Abi::Scalar(..) => { Abi::Scalar(abi::Scalar::Initialized { .. }) => {
let scalar = alloc.read_scalar(alloc_range(Size::ZERO, mplace.layout.size))?; let scalar = alloc.read_scalar(alloc_range(Size::ZERO, mplace.layout.size))?;
Ok(Some(ImmTy { imm: scalar.into(), layout: mplace.layout })) Ok(Some(ImmTy { imm: scalar.into(), layout: mplace.layout }))
} }
Abi::ScalarPair(a, b) => { Abi::ScalarPair(
abi::Scalar::Initialized { value: a, .. },
abi::Scalar::Initialized { value: b, .. },
) => {
// We checked `ptr_align` above, so all fields will have the alignment they need. // We checked `ptr_align` above, so all fields will have the alignment they need.
// We would anyway check against `ptr_align.restrict_for_offset(b_offset)`, // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
// which `ptr.offset(b_offset)` cannot possibly fail to satisfy. // which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
let (a, b) = (a.value, b.value);
let (a_size, b_size) = (a.size(self), b.size(self)); let (a_size, b_size) = (a.size(self), b.size(self));
let b_offset = a_size.align_to(b.align(self).abi); let b_offset = a_size.align_to(b.align(self).abi);
assert!(b_offset.bytes() > 0); // we later use the offset to tell apart the fields assert!(b_offset.bytes() > 0); // we later use the offset to tell apart the fields
@ -676,7 +684,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// may be a pointer. This is `tag_val.layout`; we just use it for sanity checks. // may be a pointer. This is `tag_val.layout`; we just use it for sanity checks.
// Get layout for tag. // Get layout for tag.
let tag_layout = self.layout_of(tag_scalar_layout.value.to_int_ty(*self.tcx))?; let tag_layout = self.layout_of(tag_scalar_layout.primitive().to_int_ty(*self.tcx))?;
// Read tag and sanity-check `tag_layout`. // Read tag and sanity-check `tag_layout`.
let tag_val = self.read_immediate(&self.operand_field(op, tag_field)?)?; let tag_val = self.read_immediate(&self.operand_field(op, tag_field)?)?;

View File

@ -772,13 +772,11 @@ where
// We checked `ptr_align` above, so all fields will have the alignment they need. // We checked `ptr_align` above, so all fields will have the alignment they need.
// We would anyway check against `ptr_align.restrict_for_offset(b_offset)`, // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
// which `ptr.offset(b_offset)` cannot possibly fail to satisfy. // which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
let (a, b) = match dest.layout.abi { let Abi::ScalarPair(a, b) = dest.layout.abi else { span_bug!(
Abi::ScalarPair(a, b) => (a.value, b.value),
_ => span_bug!(
self.cur_span(), self.cur_span(),
"write_immediate_to_mplace: invalid ScalarPair layout: {:#?}", "write_immediate_to_mplace: invalid ScalarPair layout: {:#?}",
dest.layout dest.layout
), )
}; };
let (a_size, b_size) = (a.size(&tcx), b.size(&tcx)); let (a_size, b_size) = (a.size(&tcx), b.size(&tcx));
let b_offset = a_size.align_to(b.align(&tcx).abi); let b_offset = a_size.align_to(b.align(&tcx).abi);
@ -1046,7 +1044,7 @@ where
// raw discriminants for enums are isize or bigger during // raw discriminants for enums are isize or bigger during
// their computation, but the in-memory tag is the smallest possible // their computation, but the in-memory tag is the smallest possible
// representation // representation
let size = tag_layout.value.size(self); let size = tag_layout.size(self);
let tag_val = size.truncate(discr_val); let tag_val = size.truncate(discr_val);
let tag_dest = self.place_field(dest, tag_field)?; let tag_dest = self.place_field(dest, tag_field)?;
@ -1070,7 +1068,7 @@ where
.expect("overflow computing relative variant idx"); .expect("overflow computing relative variant idx");
// We need to use machine arithmetic when taking into account `niche_start`: // We need to use machine arithmetic when taking into account `niche_start`:
// tag_val = variant_index_relative + niche_start_val // tag_val = variant_index_relative + niche_start_val
let tag_layout = self.layout_of(tag_layout.value.to_int_ty(*self.tcx))?; let tag_layout = self.layout_of(tag_layout.primitive().to_int_ty(*self.tcx))?;
let niche_start_val = ImmTy::from_uint(niche_start, tag_layout); let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
let variant_index_relative_val = let variant_index_relative_val =
ImmTy::from_uint(variant_index_relative, tag_layout); ImmTy::from_uint(variant_index_relative, tag_layout);

View File

@ -189,12 +189,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// that will take care to make it UB to leave the range, just // that will take care to make it UB to leave the range, just
// like for transmute). // like for transmute).
(abi::Abi::Scalar(caller), abi::Abi::Scalar(callee)) => { (abi::Abi::Scalar(caller), abi::Abi::Scalar(callee)) => {
caller.value == callee.value caller.primitive() == callee.primitive()
} }
( (
abi::Abi::ScalarPair(caller1, caller2), abi::Abi::ScalarPair(caller1, caller2),
abi::Abi::ScalarPair(callee1, callee2), abi::Abi::ScalarPair(callee1, callee2),
) => caller1.value == callee1.value && caller2.value == callee2.value, ) => {
caller1.primitive() == callee1.primitive()
&& caller2.primitive() == callee2.primitive()
}
// Be conservative // Be conservative
_ => false, _ => false,
} }

View File

@ -629,12 +629,12 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
op: &OpTy<'tcx, M::PointerTag>, op: &OpTy<'tcx, M::PointerTag>,
scalar_layout: ScalarAbi, scalar_layout: ScalarAbi,
) -> InterpResult<'tcx> { ) -> InterpResult<'tcx> {
if scalar_layout.valid_range.is_full_for(op.layout.size) { if scalar_layout.valid_range(self.ecx).is_full_for(op.layout.size) {
// Nothing to check // Nothing to check
return Ok(()); return Ok(());
} }
// At least one value is excluded. // At least one value is excluded.
let valid_range = scalar_layout.valid_range; let valid_range = scalar_layout.valid_range(self.ecx);
let WrappingRange { start, end } = valid_range; let WrappingRange { start, end } = valid_range;
let max_value = op.layout.size.unsigned_int_max(); let max_value = op.layout.size.unsigned_int_max();
assert!(end <= max_value); assert!(end <= max_value);

View File

@ -12,7 +12,7 @@ use rustc_middle::ty::{self, AdtKind, DefIdTree, Ty, TyCtxt, TypeFoldable};
use rustc_span::source_map; use rustc_span::source_map;
use rustc_span::symbol::sym; use rustc_span::symbol::sym;
use rustc_span::{Span, Symbol, DUMMY_SP}; use rustc_span::{Span, Symbol, DUMMY_SP};
use rustc_target::abi::Abi; use rustc_target::abi::{Abi, WrappingRange};
use rustc_target::abi::{Integer, TagEncoding, Variants}; use rustc_target::abi::{Integer, TagEncoding, Variants};
use rustc_target::spec::abi::Abi as SpecAbi; use rustc_target::spec::abi::Abi as SpecAbi;
@ -796,14 +796,18 @@ crate fn repr_nullable_ptr<'tcx>(
// Return the nullable type this Option-like enum can be safely represented with. // Return the nullable type this Option-like enum can be safely represented with.
let field_ty_abi = &cx.layout_of(field_ty).unwrap().abi; let field_ty_abi = &cx.layout_of(field_ty).unwrap().abi;
if let Abi::Scalar(field_ty_scalar) = field_ty_abi { if let Abi::Scalar(field_ty_scalar) = field_ty_abi {
match (field_ty_scalar.valid_range.start, field_ty_scalar.valid_range.end) { match field_ty_scalar.valid_range(cx) {
(0, x) if x == field_ty_scalar.value.size(&cx.tcx).unsigned_int_max() - 1 => { WrappingRange { start: 0, end }
if end == field_ty_scalar.size(&cx.tcx).unsigned_int_max() - 1 =>
{
return Some(get_nullable_type(cx, field_ty).unwrap()); return Some(get_nullable_type(cx, field_ty).unwrap());
} }
(1, _) => { WrappingRange { start: 1, .. } => {
return Some(get_nullable_type(cx, field_ty).unwrap()); return Some(get_nullable_type(cx, field_ty).unwrap());
} }
(start, end) => unreachable!("Unhandled start and end range: ({}, {})", start, end), WrappingRange { start, end } => {
unreachable!("Unhandled start and end range: ({}, {})", start, end)
}
}; };
} }
} }
@ -1342,7 +1346,7 @@ impl<'tcx> LateLintPass<'tcx> for VariantSizeDifferences {
return return
}; };
let tag_size = tag.value.size(&cx.tcx).bytes(); let tag_size = tag.size(&cx.tcx).bytes();
debug!( debug!(
"enum `{}` is {} bytes large with layout:\n{:#?}", "enum `{}` is {} bytes large with layout:\n{:#?}",

View File

@ -305,10 +305,10 @@ fn invert_mapping(map: &[u32]) -> Vec<u32> {
impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutS<'tcx> { fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutS<'tcx> {
let dl = self.data_layout(); let dl = self.data_layout();
let b_align = b.value.align(dl); let b_align = b.align(dl);
let align = a.value.align(dl).max(b_align).max(dl.aggregate_align); let align = a.align(dl).max(b_align).max(dl.aggregate_align);
let b_offset = a.value.size(dl).align_to(b_align.abi); let b_offset = a.size(dl).align_to(b_align.abi);
let size = (b_offset + b.value.size(dl)).align_to(align.abi); let size = (b_offset + b.size(dl)).align_to(align.abi);
// HACK(nox): We iter on `b` and then `a` because `max_by_key` // HACK(nox): We iter on `b` and then `a` because `max_by_key`
// returns the last maximum. // returns the last maximum.
@ -567,7 +567,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
let scalar_unit = |value: Primitive| { let scalar_unit = |value: Primitive| {
let size = value.size(dl); let size = value.size(dl);
assert!(size.bits() <= 128); assert!(size.bits() <= 128);
Scalar { value, valid_range: WrappingRange { start: 0, end: size.unsigned_int_max() } } Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
}; };
let scalar = let scalar =
|value: Primitive| tcx.intern_layout(LayoutS::scalar(self, scalar_unit(value))); |value: Primitive| tcx.intern_layout(LayoutS::scalar(self, scalar_unit(value)));
@ -581,11 +581,14 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
// Basic scalars. // Basic scalars.
ty::Bool => tcx.intern_layout(LayoutS::scalar( ty::Bool => tcx.intern_layout(LayoutS::scalar(
self, self,
Scalar { value: Int(I8, false), valid_range: WrappingRange { start: 0, end: 1 } }, Scalar::Initialized {
value: Int(I8, false),
valid_range: WrappingRange { start: 0, end: 1 },
},
)), )),
ty::Char => tcx.intern_layout(LayoutS::scalar( ty::Char => tcx.intern_layout(LayoutS::scalar(
self, self,
Scalar { Scalar::Initialized {
value: Int(I32, false), value: Int(I32, false),
valid_range: WrappingRange { start: 0, end: 0x10FFFF }, valid_range: WrappingRange { start: 0, end: 0x10FFFF },
}, },
@ -598,7 +601,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
}), }),
ty::FnPtr(_) => { ty::FnPtr(_) => {
let mut ptr = scalar_unit(Pointer); let mut ptr = scalar_unit(Pointer);
ptr.valid_range = ptr.valid_range.with_start(1); ptr.valid_range_mut().start = 1;
tcx.intern_layout(LayoutS::scalar(self, ptr)) tcx.intern_layout(LayoutS::scalar(self, ptr))
} }
@ -616,7 +619,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => { ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
let mut data_ptr = scalar_unit(Pointer); let mut data_ptr = scalar_unit(Pointer);
if !ty.is_unsafe_ptr() { if !ty.is_unsafe_ptr() {
data_ptr.valid_range = data_ptr.valid_range.with_start(1); data_ptr.valid_range_mut().start = 1;
} }
let pointee = tcx.normalize_erasing_regions(param_env, pointee); let pointee = tcx.normalize_erasing_regions(param_env, pointee);
@ -632,7 +635,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)), ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
ty::Dynamic(..) => { ty::Dynamic(..) => {
let mut vtable = scalar_unit(Pointer); let mut vtable = scalar_unit(Pointer);
vtable.valid_range = vtable.valid_range.with_start(1); vtable.valid_range_mut().start = 1;
vtable vtable
} }
_ => return Err(LayoutError::Unknown(unsized_part)), _ => return Err(LayoutError::Unknown(unsized_part)),
@ -889,14 +892,14 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
// If all non-ZST fields have the same ABI, forward this ABI // If all non-ZST fields have the same ABI, forward this ABI
if optimize && !field.is_zst() { if optimize && !field.is_zst() {
// Normalize scalar_unit to the maximal valid range // Discard valid range information and allow undef
let field_abi = match field.abi { let field_abi = match field.abi {
Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)), Abi::Scalar(x) => Abi::Scalar(x.to_union()),
Abi::ScalarPair(x, y) => { Abi::ScalarPair(x, y) => {
Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value)) Abi::ScalarPair(x.to_union(), y.to_union())
} }
Abi::Vector { element: x, count } => { Abi::Vector { element: x, count } => {
Abi::Vector { element: scalar_unit(x.value), count } Abi::Vector { element: x.to_union(), count }
} }
Abi::Uninhabited | Abi::Aggregate { .. } => { Abi::Uninhabited | Abi::Aggregate { .. } => {
Abi::Aggregate { sized: true } Abi::Aggregate { sized: true }
@ -1000,14 +1003,16 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
if let Bound::Included(start) = start { if let Bound::Included(start) = start {
// FIXME(eddyb) this might be incorrect - it doesn't // FIXME(eddyb) this might be incorrect - it doesn't
// account for wrap-around (end < start) ranges. // account for wrap-around (end < start) ranges.
assert!(scalar.valid_range.start <= start); let valid_range = scalar.valid_range_mut();
scalar.valid_range.start = start; assert!(valid_range.start <= start);
valid_range.start = start;
} }
if let Bound::Included(end) = end { if let Bound::Included(end) = end {
// FIXME(eddyb) this might be incorrect - it doesn't // FIXME(eddyb) this might be incorrect - it doesn't
// account for wrap-around (end < start) ranges. // account for wrap-around (end < start) ranges.
assert!(scalar.valid_range.end >= end); let valid_range = scalar.valid_range_mut();
scalar.valid_range.end = end; assert!(valid_range.end >= end);
valid_range.end = end;
} }
// Update `largest_niche` if we have introduced a larger niche. // Update `largest_niche` if we have introduced a larger niche.
@ -1133,9 +1138,15 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
// guaranteed to be initialised, not the // guaranteed to be initialised, not the
// other primitive. // other primitive.
if offset.bytes() == 0 { if offset.bytes() == 0 {
Abi::ScalarPair(niche_scalar, scalar_unit(second.value)) Abi::ScalarPair(
niche_scalar,
scalar_unit(second.primitive()),
)
} else { } else {
Abi::ScalarPair(scalar_unit(first.value), niche_scalar) Abi::ScalarPair(
scalar_unit(first.primitive()),
niche_scalar,
)
} }
} }
_ => Abi::Aggregate { sized: true }, _ => Abi::Aggregate { sized: true },
@ -1314,7 +1325,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
} }
let tag_mask = ity.size().unsigned_int_max(); let tag_mask = ity.size().unsigned_int_max();
let tag = Scalar { let tag = Scalar::Initialized {
value: Int(ity, signed), value: Int(ity, signed),
valid_range: WrappingRange { valid_range: WrappingRange {
start: (min as u128 & tag_mask), start: (min as u128 & tag_mask),
@ -1325,7 +1336,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
// Without latter check aligned enums with custom discriminant values // Without latter check aligned enums with custom discriminant values
// Would result in ICE see the issue #92464 for more info // Would result in ICE see the issue #92464 for more info
if tag.value.size(dl) == size || variants.iter().all(|layout| layout.is_empty()) { if tag.size(dl) == size || variants.iter().all(|layout| layout.is_empty()) {
abi = Abi::Scalar(tag); abi = Abi::Scalar(tag);
} else { } else {
// Try to use a ScalarPair for all tagged enums. // Try to use a ScalarPair for all tagged enums.
@ -1345,7 +1356,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
} }
}; };
let prim = match field.abi { let prim = match field.abi {
Abi::Scalar(scalar) => scalar.value, Abi::Scalar(scalar) => scalar.primitive(),
_ => { _ => {
common_prim = None; common_prim = None;
break; break;
@ -1599,7 +1610,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
let max_discr = (info.variant_fields.len() - 1) as u128; let max_discr = (info.variant_fields.len() - 1) as u128;
let discr_int = Integer::fit_unsigned(max_discr); let discr_int = Integer::fit_unsigned(max_discr);
let discr_int_ty = discr_int.to_ty(tcx, false); let discr_int_ty = discr_int.to_ty(tcx, false);
let tag = Scalar { let tag = Scalar::Initialized {
value: Primitive::Int(discr_int, false), value: Primitive::Int(discr_int, false),
valid_range: WrappingRange { start: 0, end: max_discr }, valid_range: WrappingRange { start: 0, end: max_discr },
}; };
@ -1898,7 +1909,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
adt_kind.into(), adt_kind.into(),
adt_packed, adt_packed,
match tag_encoding { match tag_encoding {
TagEncoding::Direct => Some(tag.value.size(self)), TagEncoding::Direct => Some(tag.size(self)),
_ => None, _ => None,
}, },
variant_infos, variant_infos,
@ -2304,7 +2315,7 @@ where
let tag_layout = |tag: Scalar| -> TyAndLayout<'tcx> { let tag_layout = |tag: Scalar| -> TyAndLayout<'tcx> {
TyAndLayout { TyAndLayout {
layout: tcx.intern_layout(LayoutS::scalar(cx, tag)), layout: tcx.intern_layout(LayoutS::scalar(cx, tag)),
ty: tag.value.to_ty(tcx), ty: tag.primitive().to_ty(tcx),
} }
}; };
@ -3079,11 +3090,9 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
} }
// Only pointer types handled below. // Only pointer types handled below.
if scalar.value != Pointer { let Scalar::Initialized { value: Pointer, valid_range} = scalar else { return };
return;
}
if !scalar.valid_range.contains(0) { if !valid_range.contains(0) {
attrs.set(ArgAttribute::NonNull); attrs.set(ArgAttribute::NonNull);
} }

View File

@ -6,7 +6,7 @@ use crate::abi::{self, HasDataLayout, Size, TyAbiInterface};
fn extend_integer_width_mips<Ty>(arg: &mut ArgAbi<'_, Ty>, bits: u64) { fn extend_integer_width_mips<Ty>(arg: &mut ArgAbi<'_, Ty>, bits: u64) {
// Always sign extend u32 values on 64-bit mips // Always sign extend u32 values on 64-bit mips
if let abi::Abi::Scalar(scalar) = arg.layout.abi { if let abi::Abi::Scalar(scalar) = arg.layout.abi {
if let abi::Int(i, signed) = scalar.value { if let abi::Int(i, signed) = scalar.primitive() {
if !signed && i.size().bits() == 32 { if !signed && i.size().bits() == 32 {
if let PassMode::Direct(ref mut attrs) = arg.mode { if let PassMode::Direct(ref mut attrs) = arg.mode {
attrs.ext(ArgExtension::Sext); attrs.ext(ArgExtension::Sext);
@ -25,7 +25,7 @@ where
C: HasDataLayout, C: HasDataLayout,
{ {
match ret.layout.field(cx, i).abi { match ret.layout.field(cx, i).abi {
abi::Abi::Scalar(scalar) => match scalar.value { abi::Abi::Scalar(scalar) => match scalar.primitive() {
abi::F32 => Some(Reg::f32()), abi::F32 => Some(Reg::f32()),
abi::F64 => Some(Reg::f64()), abi::F64 => Some(Reg::f64()),
_ => None, _ => None,
@ -110,7 +110,7 @@ where
// We only care about aligned doubles // We only care about aligned doubles
if let abi::Abi::Scalar(scalar) = field.abi { if let abi::Abi::Scalar(scalar) = field.abi {
if let abi::F64 = scalar.value { if let abi::F64 = scalar.primitive() {
if offset.is_aligned(dl.f64_align.abi) { if offset.is_aligned(dl.f64_align.abi) {
// Insert enough integers to cover [last_offset, offset) // Insert enough integers to cover [last_offset, offset)
assert!(last_offset.is_aligned(dl.f64_align.abi)); assert!(last_offset.is_aligned(dl.f64_align.abi));

View File

@ -348,7 +348,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
// The primitive for this algorithm. // The primitive for this algorithm.
Abi::Scalar(scalar) => { Abi::Scalar(scalar) => {
let kind = match scalar.value { let kind = match scalar.primitive() {
abi::Int(..) | abi::Pointer => RegKind::Integer, abi::Int(..) | abi::Pointer => RegKind::Integer,
abi::F32 | abi::F64 => RegKind::Float, abi::F32 | abi::F64 => RegKind::Float,
}; };
@ -482,7 +482,7 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
Abi::Scalar(scalar) => PassMode::Direct(scalar_attrs(&layout, scalar, Size::ZERO)), Abi::Scalar(scalar) => PassMode::Direct(scalar_attrs(&layout, scalar, Size::ZERO)),
Abi::ScalarPair(a, b) => PassMode::Pair( Abi::ScalarPair(a, b) => PassMode::Pair(
scalar_attrs(&layout, a, Size::ZERO), scalar_attrs(&layout, a, Size::ZERO),
scalar_attrs(&layout, b, a.value.size(cx).align_to(b.value.align(cx).abi)), scalar_attrs(&layout, b, a.size(cx).align_to(b.align(cx).abi)),
), ),
Abi::Vector { .. } => PassMode::Direct(ArgAttributes::new()), Abi::Vector { .. } => PassMode::Direct(ArgAttributes::new()),
Abi::Aggregate { .. } => PassMode::Direct(ArgAttributes::new()), Abi::Aggregate { .. } => PassMode::Direct(ArgAttributes::new()),
@ -534,7 +534,7 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
pub fn extend_integer_width_to(&mut self, bits: u64) { pub fn extend_integer_width_to(&mut self, bits: u64) {
// Only integers have signedness // Only integers have signedness
if let Abi::Scalar(scalar) = self.layout.abi { if let Abi::Scalar(scalar) = self.layout.abi {
if let abi::Int(i, signed) = scalar.value { if let abi::Int(i, signed) = scalar.primitive() {
if i.size().bits() < bits { if i.size().bits() < bits {
if let PassMode::Direct(ref mut attrs) = self.mode { if let PassMode::Direct(ref mut attrs) = self.mode {
if signed { if signed {

View File

@ -44,7 +44,7 @@ where
Ty: TyAbiInterface<'a, C> + Copy, Ty: TyAbiInterface<'a, C> + Copy,
{ {
match arg_layout.abi { match arg_layout.abi {
Abi::Scalar(scalar) => match scalar.value { Abi::Scalar(scalar) => match scalar.primitive() {
abi::Int(..) | abi::Pointer => { abi::Int(..) | abi::Pointer => {
if arg_layout.size.bits() > xlen { if arg_layout.size.bits() > xlen {
return Err(CannotUseFpConv); return Err(CannotUseFpConv);
@ -298,7 +298,7 @@ fn classify_arg<'a, Ty, C>(
fn extend_integer_width<'a, Ty>(arg: &mut ArgAbi<'a, Ty>, xlen: u64) { fn extend_integer_width<'a, Ty>(arg: &mut ArgAbi<'a, Ty>, xlen: u64) {
if let Abi::Scalar(scalar) = arg.layout.abi { if let Abi::Scalar(scalar) = arg.layout.abi {
if let abi::Int(i, _) = scalar.value { if let abi::Int(i, _) = scalar.primitive() {
// 32-bit integers are always sign-extended // 32-bit integers are always sign-extended
if i.size().bits() == 32 && xlen > 32 { if i.size().bits() == 32 && xlen > 32 {
if let PassMode::Direct(ref mut attrs) = arg.mode { if let PassMode::Direct(ref mut attrs) = arg.mode {

View File

@ -20,7 +20,7 @@ where
{ {
let dl = cx.data_layout(); let dl = cx.data_layout();
if scalar.value != abi::F32 && scalar.value != abi::F64 { if !scalar.primitive().is_float() {
return data; return data;
} }
@ -56,7 +56,7 @@ where
return data; return data;
} }
if scalar.value == abi::F32 { if scalar.primitive() == abi::F32 {
data.arg_attribute = ArgAttribute::InReg; data.arg_attribute = ArgAttribute::InReg;
data.prefix[data.prefix_index] = Some(Reg::f32()); data.prefix[data.prefix_index] = Some(Reg::f32());
data.last_offset = offset + Reg::f32().size; data.last_offset = offset + Reg::f32().size;
@ -79,17 +79,15 @@ where
C: HasDataLayout, C: HasDataLayout,
{ {
data = arg_scalar(cx, &scalar1, offset, data); data = arg_scalar(cx, &scalar1, offset, data);
if scalar1.value == abi::F32 { match (scalar1.primitive(), scalar2.primitive()) {
offset += Reg::f32().size; (abi::F32, _) => offset += Reg::f32().size,
} else if scalar2.value == abi::F64 { (_, abi::F64) => offset += Reg::f64().size,
offset += Reg::f64().size; (abi::Int(i, _signed), _) => offset += i.size(),
} else if let abi::Int(i, _signed) = scalar1.value { (abi::Pointer, _) => offset += Reg::i64().size,
offset += i.size(); _ => {}
} else if scalar1.value == abi::Pointer {
offset = offset + Reg::i64().size;
} }
if (offset.raw % 4) != 0 && (scalar2.value == abi::F32 || scalar2.value == abi::F64) { if (offset.raw % 4) != 0 && scalar2.primitive().is_float() {
offset.raw += 4 - (offset.raw % 4); offset.raw += 4 - (offset.raw % 4);
} }
data = arg_scalar(cx, &scalar2, offset, data); data = arg_scalar(cx, &scalar2, offset, data);

View File

@ -49,7 +49,7 @@ where
let mut c = match layout.abi { let mut c = match layout.abi {
Abi::Uninhabited => return Ok(()), Abi::Uninhabited => return Ok(()),
Abi::Scalar(scalar) => match scalar.value { Abi::Scalar(scalar) => match scalar.primitive() {
abi::Int(..) | abi::Pointer => Class::Int, abi::Int(..) | abi::Pointer => Class::Int,
abi::F32 | abi::F64 => Class::Sse, abi::F32 | abi::F64 => Class::Sse,
}, },

View File

@ -752,6 +752,10 @@ pub struct WrappingRange {
} }
impl WrappingRange { impl WrappingRange {
pub fn full(size: Size) -> Self {
Self { start: 0, end: size.unsigned_int_max() }
}
/// Returns `true` if `v` is contained in the range. /// Returns `true` if `v` is contained in the range.
#[inline(always)] #[inline(always)]
pub fn contains(&self, v: u128) -> bool { pub fn contains(&self, v: u128) -> bool {
@ -799,13 +803,23 @@ impl fmt::Debug for WrappingRange {
/// Information about one scalar component of a Rust type. /// Information about one scalar component of a Rust type.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
#[derive(HashStable_Generic)] #[derive(HashStable_Generic)]
pub struct Scalar { pub enum Scalar {
pub value: Primitive, Initialized {
value: Primitive,
// FIXME(eddyb) always use the shortest range, e.g., by finding // FIXME(eddyb) always use the shortest range, e.g., by finding
// the largest space between two consecutive valid values and // the largest space between two consecutive valid values and
// taking everything else as the (shortest) valid range. // taking everything else as the (shortest) valid range.
pub valid_range: WrappingRange, valid_range: WrappingRange,
},
Union {
/// Even for unions, we need to use the correct registers for the kind of
/// values inside the union, so we keep the `Primitive` type around. We
/// also use it to compute the size of the scalar.
/// However, unions never have niches and even allow undef,
/// so there is no `valid_range`.
value: Primitive,
},
} }
impl Scalar { impl Scalar {
@ -813,14 +827,58 @@ impl Scalar {
pub fn is_bool(&self) -> bool { pub fn is_bool(&self) -> bool {
matches!( matches!(
self, self,
Scalar { value: Int(I8, false), valid_range: WrappingRange { start: 0, end: 1 } } Scalar::Initialized {
value: Int(I8, false),
valid_range: WrappingRange { start: 0, end: 1 }
}
) )
} }
/// Get the primitive representation of this type, ignoring the valid range and whether the
/// value is allowed to be undefined (due to being a union).
pub fn primitive(&self) -> Primitive {
match *self {
Scalar::Initialized { value, .. } | Scalar::Union { value } => value,
}
}
pub fn align(self, cx: &impl HasDataLayout) -> AbiAndPrefAlign {
self.primitive().align(cx)
}
pub fn size(self, cx: &impl HasDataLayout) -> Size {
self.primitive().size(cx)
}
#[inline]
pub fn to_union(&self) -> Self {
Self::Union { value: self.primitive() }
}
#[inline]
pub fn valid_range(&self, cx: &impl HasDataLayout) -> WrappingRange {
match *self {
Scalar::Initialized { valid_range, .. } => valid_range,
Scalar::Union { value } => WrappingRange::full(value.size(cx)),
}
}
#[inline]
/// Allows the caller to mutate the valid range. This operation will panic if attempted on a union.
pub fn valid_range_mut(&mut self) -> &mut WrappingRange {
match self {
Scalar::Initialized { valid_range, .. } => valid_range,
Scalar::Union { .. } => panic!("cannot change the valid range of a union"),
}
}
/// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole layout /// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole layout
#[inline] #[inline]
pub fn is_always_valid<C: HasDataLayout>(&self, cx: &C) -> bool { pub fn is_always_valid<C: HasDataLayout>(&self, cx: &C) -> bool {
self.valid_range.is_full_for(self.value.size(cx)) match *self {
Scalar::Initialized { valid_range, .. } => valid_range.is_full_for(self.size(cx)),
Scalar::Union { .. } => true,
}
} }
} }
@ -988,7 +1046,7 @@ impl Abi {
#[inline] #[inline]
pub fn is_signed(&self) -> bool { pub fn is_signed(&self) -> bool {
match self { match self {
Abi::Scalar(scal) => match scal.value { Abi::Scalar(scal) => match scal.primitive() {
Primitive::Int(_, signed) => signed, Primitive::Int(_, signed) => signed,
_ => false, _ => false,
}, },
@ -1060,17 +1118,19 @@ pub enum TagEncoding {
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)] #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
pub struct Niche { pub struct Niche {
pub offset: Size, pub offset: Size,
pub scalar: Scalar, pub value: Primitive,
pub valid_range: WrappingRange,
} }
impl Niche { impl Niche {
pub fn from_scalar<C: HasDataLayout>(cx: &C, offset: Size, scalar: Scalar) -> Option<Self> { pub fn from_scalar<C: HasDataLayout>(cx: &C, offset: Size, scalar: Scalar) -> Option<Self> {
let niche = Niche { offset, scalar }; let Scalar::Initialized { value, valid_range } = scalar else { return None };
let niche = Niche { offset, value, valid_range };
if niche.available(cx) > 0 { Some(niche) } else { None } if niche.available(cx) > 0 { Some(niche) } else { None }
} }
pub fn available<C: HasDataLayout>(&self, cx: &C) -> u128 { pub fn available<C: HasDataLayout>(&self, cx: &C) -> u128 {
let Scalar { value, valid_range: v } = self.scalar; let Self { value, valid_range: v, .. } = *self;
let size = value.size(cx); let size = value.size(cx);
assert!(size.bits() <= 128); assert!(size.bits() <= 128);
let max_value = size.unsigned_int_max(); let max_value = size.unsigned_int_max();
@ -1083,7 +1143,7 @@ impl Niche {
pub fn reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> { pub fn reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> {
assert!(count > 0); assert!(count > 0);
let Scalar { value, valid_range: v } = self.scalar; let Self { value, valid_range: v, .. } = *self;
let size = value.size(cx); let size = value.size(cx);
assert!(size.bits() <= 128); assert!(size.bits() <= 128);
let max_value = size.unsigned_int_max(); let max_value = size.unsigned_int_max();
@ -1107,12 +1167,12 @@ impl Niche {
// If niche zero is already reserved, the selection of bounds are of little interest. // If niche zero is already reserved, the selection of bounds are of little interest.
let move_start = |v: WrappingRange| { let move_start = |v: WrappingRange| {
let start = v.start.wrapping_sub(count) & max_value; let start = v.start.wrapping_sub(count) & max_value;
Some((start, Scalar { value, valid_range: v.with_start(start) })) Some((start, Scalar::Initialized { value, valid_range: v.with_start(start) }))
}; };
let move_end = |v: WrappingRange| { let move_end = |v: WrappingRange| {
let start = v.end.wrapping_add(1) & max_value; let start = v.end.wrapping_add(1) & max_value;
let end = v.end.wrapping_add(count) & max_value; let end = v.end.wrapping_add(count) & max_value;
Some((start, Scalar { value, valid_range: v.with_end(end) })) Some((start, Scalar::Initialized { value, valid_range: v.with_end(end) }))
}; };
let distance_end_zero = max_value - v.end; let distance_end_zero = max_value - v.end;
if v.start > v.end { if v.start > v.end {
@ -1172,8 +1232,8 @@ pub struct LayoutS<'a> {
impl<'a> LayoutS<'a> { impl<'a> LayoutS<'a> {
pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self { pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar); let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar);
let size = scalar.value.size(cx); let size = scalar.size(cx);
let align = scalar.value.align(cx); let align = scalar.align(cx);
LayoutS { LayoutS {
variants: Variants::Single { index: VariantIdx::new(0) }, variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldsShape::Primitive, fields: FieldsShape::Primitive,
@ -1325,7 +1385,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
C: HasDataLayout, C: HasDataLayout,
{ {
match self.abi { match self.abi {
Abi::Scalar(scalar) => scalar.value.is_float(), Abi::Scalar(scalar) => scalar.primitive().is_float(),
Abi::Aggregate { .. } => { Abi::Aggregate { .. } => {
if self.fields.count() == 1 && self.fields.offset(0).bytes() == 0 { if self.fields.count() == 1 && self.fields.offset(0).bytes() == 0 {
self.field(cx, 0).is_single_fp_element(cx) self.field(cx, 0).is_single_fp_element(cx)
@ -1371,7 +1431,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
let scalar_allows_raw_init = move |s: Scalar| -> bool { let scalar_allows_raw_init = move |s: Scalar| -> bool {
if zero { if zero {
// The range must contain 0. // The range must contain 0.
s.valid_range.contains(0) s.valid_range(cx).contains(0)
} else { } else {
// The range must include all values. // The range must include all values.
s.is_always_valid(cx) s.is_always_valid(cx)

View File

@ -1769,7 +1769,7 @@ fn document_type_layout(w: &mut Buffer, cx: &Context<'_>, ty_def_id: DefId) {
let tag_size = if let TagEncoding::Niche { .. } = tag_encoding { let tag_size = if let TagEncoding::Niche { .. } = tag_encoding {
0 0
} else if let Primitive::Int(i, _) = tag.value { } else if let Primitive::Int(i, _) = tag.primitive() {
i.size().bytes() i.size().bytes()
} else { } else {
span_bug!(tcx.def_span(ty_def_id), "tag is neither niche nor int") span_bug!(tcx.def_span(ty_def_id), "tag is neither niche nor int")

View File

@ -3,56 +3,70 @@
fn main() -> () { fn main() -> () {
let mut _0: (); // return place in scope 0 at $DIR/invalid_constant.rs:15:11: 15:11 let mut _0: (); // return place in scope 0 at $DIR/invalid_constant.rs:15:11: 15:11
let _1: main::InvalidChar; // in scope 0 at $DIR/invalid_constant.rs:21:9: 21:22 let _1: char; // in scope 0 at $DIR/invalid_constant.rs:21:9: 21:22
let mut _3: main::InvalidTag; // in scope 0 at $DIR/invalid_constant.rs:28:25: 28:46 let mut _2: main::InvalidChar; // in scope 0 at $DIR/invalid_constant.rs:21:34: 21:63
let mut _5: main::NoVariants; // in scope 0 at $DIR/invalid_constant.rs:35:35: 35:56 let mut _4: E; // in scope 0 at $DIR/invalid_constant.rs:28:25: 28:59
let mut _5: main::InvalidTag; // in scope 0 at $DIR/invalid_constant.rs:28:34: 28:55
let mut _7: Empty; // in scope 0 at $DIR/invalid_constant.rs:35:35: 35:73
let mut _8: main::NoVariants; // in scope 0 at $DIR/invalid_constant.rs:35:44: 35:65
scope 1 { scope 1 {
debug _invalid_char => _1; // in scope 1 at $DIR/invalid_constant.rs:21:9: 21:22 debug _invalid_char => _1; // in scope 1 at $DIR/invalid_constant.rs:21:9: 21:22
let _2: [main::InvalidTag; 1]; // in scope 1 at $DIR/invalid_constant.rs:28:9: 28:21 let _3: [E; 1]; // in scope 1 at $DIR/invalid_constant.rs:28:9: 28:21
scope 2 { scope 3 {
debug _invalid_tag => _2; // in scope 2 at $DIR/invalid_constant.rs:28:9: 28:21 debug _invalid_tag => _3; // in scope 3 at $DIR/invalid_constant.rs:28:9: 28:21
let _4: [main::NoVariants; 1]; // in scope 2 at $DIR/invalid_constant.rs:35:9: 35:31 let _6: [Empty; 1]; // in scope 3 at $DIR/invalid_constant.rs:35:9: 35:31
scope 3 { scope 5 {
debug _enum_without_variants => _4; // in scope 3 at $DIR/invalid_constant.rs:35:9: 35:31 debug _enum_without_variants => _6; // in scope 5 at $DIR/invalid_constant.rs:35:9: 35:31
let _6: main::Str<"<22><><EFBFBD>">; // in scope 3 at $DIR/invalid_constant.rs:39:9: 39:22 let _9: main::Str<"<22><><EFBFBD>">; // in scope 5 at $DIR/invalid_constant.rs:39:9: 39:22
scope 4 { scope 7 {
debug _non_utf8_str => _6; // in scope 4 at $DIR/invalid_constant.rs:39:9: 39:22 debug _non_utf8_str => _9; // in scope 7 at $DIR/invalid_constant.rs:39:9: 39:22
} }
} }
scope 6 {
}
} }
scope 4 {
}
}
scope 2 {
} }
bb0: { bb0: {
StorageLive(_1); // scope 0 at $DIR/invalid_constant.rs:21:9: 21:22 StorageLive(_1); // scope 0 at $DIR/invalid_constant.rs:21:9: 21:22
- _1 = const { InvalidChar { int: 0x110001 } }; // scope 0 at $DIR/invalid_constant.rs:21:25: 21:64 StorageLive(_2); // scope 2 at $DIR/invalid_constant.rs:21:34: 21:63
+ _1 = const InvalidChar { int: 1114113_u32, chr: {transmute(0x00110001): char} }; // scope 0 at $DIR/invalid_constant.rs:21:25: 21:64 (_2.0: u32) = const 1114113_u32; // scope 2 at $DIR/invalid_constant.rs:21:34: 21:63
// mir::Constant - _1 = (_2.1: char); // scope 2 at $DIR/invalid_constant.rs:21:34: 21:67
// + span: $DIR/invalid_constant.rs:21:25: 21:64 + _1 = const {transmute(0x00110001): char}; // scope 2 at $DIR/invalid_constant.rs:21:34: 21:67
- // + literal: Const { ty: InvalidChar, val: Unevaluated(main::{constant#0}, [main::InvalidChar], None) } StorageDead(_2); // scope 0 at $DIR/invalid_constant.rs:21:69: 21:70
+ // + literal: Const { ty: InvalidChar, val: Value(Scalar(0x00110001)) } StorageLive(_3); // scope 1 at $DIR/invalid_constant.rs:28:9: 28:21
StorageLive(_2); // scope 1 at $DIR/invalid_constant.rs:28:9: 28:21 StorageLive(_4); // scope 1 at $DIR/invalid_constant.rs:28:25: 28:59
StorageLive(_3); // scope 1 at $DIR/invalid_constant.rs:28:25: 28:46 StorageLive(_5); // scope 4 at $DIR/invalid_constant.rs:28:34: 28:55
(_3.0: u32) = const 4_u32; // scope 1 at $DIR/invalid_constant.rs:28:25: 28:46 (_5.0: u32) = const 4_u32; // scope 4 at $DIR/invalid_constant.rs:28:34: 28:55
- _2 = [move _3]; // scope 1 at $DIR/invalid_constant.rs:28:24: 28:47 - _4 = (_5.1: E); // scope 4 at $DIR/invalid_constant.rs:28:34: 28:57
+ _2 = [const InvalidTag { int: 4_u32, e: Scalar(0x00000004): E }]; // scope 1 at $DIR/invalid_constant.rs:28:24: 28:47 - _3 = [move _4]; // scope 1 at $DIR/invalid_constant.rs:28:24: 28:60
+ _4 = const Scalar(0x00000004): E; // scope 4 at $DIR/invalid_constant.rs:28:34: 28:57
+ // mir::Constant + // mir::Constant
+ // + span: $DIR/invalid_constant.rs:28:24: 28:47 + // + span: $DIR/invalid_constant.rs:28:34: 28:57
+ // + literal: Const { ty: InvalidTag, val: Value(Scalar(0x00000004)) } + // + literal: Const { ty: E, val: Value(Scalar(0x00000004)) }
StorageDead(_3); // scope 1 at $DIR/invalid_constant.rs:28:46: 28:47 + _3 = [const Scalar(0x00000004): E]; // scope 1 at $DIR/invalid_constant.rs:28:24: 28:60
StorageLive(_4); // scope 2 at $DIR/invalid_constant.rs:35:9: 35:31
StorageLive(_5); // scope 2 at $DIR/invalid_constant.rs:35:35: 35:56
(_5.0: u32) = const 0_u32; // scope 2 at $DIR/invalid_constant.rs:35:35: 35:56
- _4 = [move _5]; // scope 2 at $DIR/invalid_constant.rs:35:34: 35:57
+ _4 = [const NoVariants { int: 0_u32, empty: Scalar(<ZST>): Empty }]; // scope 2 at $DIR/invalid_constant.rs:35:34: 35:57
+ // mir::Constant + // mir::Constant
+ // + span: $DIR/invalid_constant.rs:35:34: 35:57 + // + span: $DIR/invalid_constant.rs:28:24: 28:60
+ // + literal: Const { ty: NoVariants, val: Value(Scalar(0x00000000)) } + // + literal: Const { ty: E, val: Value(Scalar(0x00000004)) }
StorageDead(_5); // scope 2 at $DIR/invalid_constant.rs:35:56: 35:57 StorageDead(_4); // scope 1 at $DIR/invalid_constant.rs:28:59: 28:60
StorageLive(_6); // scope 3 at $DIR/invalid_constant.rs:39:9: 39:22 StorageDead(_5); // scope 1 at $DIR/invalid_constant.rs:28:60: 28:61
StorageLive(_6); // scope 3 at $DIR/invalid_constant.rs:35:9: 35:31
StorageLive(_7); // scope 3 at $DIR/invalid_constant.rs:35:35: 35:73
StorageLive(_8); // scope 6 at $DIR/invalid_constant.rs:35:44: 35:65
(_8.0: u32) = const 0_u32; // scope 6 at $DIR/invalid_constant.rs:35:44: 35:65
nop; // scope 6 at $DIR/invalid_constant.rs:35:44: 35:71
nop; // scope 3 at $DIR/invalid_constant.rs:35:34: 35:74
StorageDead(_7); // scope 3 at $DIR/invalid_constant.rs:35:73: 35:74
StorageDead(_8); // scope 3 at $DIR/invalid_constant.rs:35:74: 35:75
StorageLive(_9); // scope 5 at $DIR/invalid_constant.rs:39:9: 39:22
nop; // scope 0 at $DIR/invalid_constant.rs:15:11: 42:2 nop; // scope 0 at $DIR/invalid_constant.rs:15:11: 42:2
StorageDead(_9); // scope 5 at $DIR/invalid_constant.rs:42:1: 42:2
StorageDead(_6); // scope 3 at $DIR/invalid_constant.rs:42:1: 42:2 StorageDead(_6); // scope 3 at $DIR/invalid_constant.rs:42:1: 42:2
StorageDead(_4); // scope 2 at $DIR/invalid_constant.rs:42:1: 42:2 StorageDead(_3); // scope 1 at $DIR/invalid_constant.rs:42:1: 42:2
StorageDead(_2); // scope 1 at $DIR/invalid_constant.rs:42:1: 42:2
StorageDead(_1); // scope 0 at $DIR/invalid_constant.rs:42:1: 42:2 StorageDead(_1); // scope 0 at $DIR/invalid_constant.rs:42:1: 42:2
return; // scope 0 at $DIR/invalid_constant.rs:42:2: 42:2 return; // scope 0 at $DIR/invalid_constant.rs:42:2: 42:2
} }

View File

@ -18,21 +18,21 @@ fn main() {
int: u32, int: u32,
chr: char, chr: char,
} }
let _invalid_char = const { InvalidChar { int: 0x110001 } }; let _invalid_char = unsafe { InvalidChar { int: 0x110001 }.chr };
// An enum with an invalid tag. Regression test for #93688. // An enum with an invalid tag. Regression test for #93688.
union InvalidTag { union InvalidTag {
int: u32, int: u32,
e: E, e: E,
} }
let _invalid_tag = [InvalidTag { int: 4 }]; let _invalid_tag = [unsafe { InvalidTag { int: 4 }.e }];
// An enum without variants. Regression test for #94073. // An enum without variants. Regression test for #94073.
union NoVariants { union NoVariants {
int: u32, int: u32,
empty: Empty, empty: Empty,
} }
let _enum_without_variants = [NoVariants { int: 0 }]; let _enum_without_variants = [unsafe { NoVariants { int: 0 }.empty }];
// A non-UTF-8 string slice. Regression test for #75763 and #78520. // A non-UTF-8 string slice. Regression test for #75763 and #78520.
struct Str<const S: &'static str>; struct Str<const S: &'static str>;

View File

@ -0,0 +1,34 @@
// run-pass
#![feature(const_ptr_write)]
#![feature(const_mut_refs)]
// Or, equivalently: `MaybeUninit`.
pub union BagOfBits<T: Copy> {
uninit: (),
_storage: T,
}
pub const fn make_1u8_bag<T: Copy>() -> BagOfBits<T> {
assert!(core::mem::size_of::<T>() >= 1);
let mut bag = BagOfBits { uninit: () };
unsafe { (&mut bag as *mut _ as *mut u8).write(1); };
bag
}
pub fn check_bag<T: Copy>(bag: &BagOfBits<T>) {
let val = unsafe { (bag as *const _ as *const u8).read() };
assert_eq!(val, 1);
}
fn main() {
check_bag(&make_1u8_bag::<[usize; 1]>()); // Fine
check_bag(&make_1u8_bag::<usize>()); // Fine
const CONST_ARRAY_BAG: BagOfBits<[usize; 1]> = make_1u8_bag();
check_bag(&CONST_ARRAY_BAG); // Fine.
const CONST_USIZE_BAG: BagOfBits<usize> = make_1u8_bag();
// Used to panic since CTFE would make the entire `BagOfBits<usize>` uninit
check_bag(&CONST_USIZE_BAG);
}

View File

@ -0,0 +1,16 @@
// check-pass
#![feature(const_swap)]
#![feature(const_mut_refs)]
#[repr(C)]
struct Demo(u64, bool, u64, u32, u64, u64, u64);
const C: (Demo, Demo) = {
let mut x = Demo(1, true, 3, 4, 5, 6, 7);
let mut y = Demo(10, false, 12, 13, 14, 15, 16);
std::mem::swap(&mut x, &mut y);
(x, y)
};
fn main() {}

View File

@ -10,7 +10,7 @@ error: layout_of(E) = Layout {
], ],
}, },
variants: Multiple { variants: Multiple {
tag: Scalar { tag: Initialized {
value: Int( value: Int(
I32, I32,
false, false,
@ -86,13 +86,11 @@ error: layout_of(E) = Layout {
offset: Size { offset: Size {
raw: 0, raw: 0,
}, },
scalar: Scalar { value: Int(
value: Int( I32,
I32, false,
false, ),
), valid_range: 0..=0,
valid_range: 0..=0,
},
}, },
), ),
align: AbiAndPrefAlign { align: AbiAndPrefAlign {
@ -133,14 +131,14 @@ error: layout_of(S) = Layout {
index: 0, index: 0,
}, },
abi: ScalarPair( abi: ScalarPair(
Scalar { Initialized {
value: Int( value: Int(
I32, I32,
true, true,
), ),
valid_range: 0..=4294967295, valid_range: 0..=4294967295,
}, },
Scalar { Initialized {
value: Int( value: Int(
I32, I32,
true, true,
@ -202,7 +200,7 @@ error: layout_of(std::result::Result<i32, i32>) = Layout {
], ],
}, },
variants: Multiple { variants: Multiple {
tag: Scalar { tag: Initialized {
value: Int( value: Int(
I32, I32,
false, false,
@ -271,14 +269,14 @@ error: layout_of(std::result::Result<i32, i32>) = Layout {
], ],
}, },
abi: ScalarPair( abi: ScalarPair(
Scalar { Initialized {
value: Int( value: Int(
I32, I32,
false, false,
), ),
valid_range: 0..=1, valid_range: 0..=1,
}, },
Scalar { Initialized {
value: Int( value: Int(
I32, I32,
true, true,
@ -291,13 +289,11 @@ error: layout_of(std::result::Result<i32, i32>) = Layout {
offset: Size { offset: Size {
raw: 0, raw: 0,
}, },
scalar: Scalar { value: Int(
value: Int( I32,
I32, false,
false, ),
), valid_range: 0..=1,
valid_range: 0..=1,
},
}, },
), ),
align: AbiAndPrefAlign { align: AbiAndPrefAlign {
@ -321,7 +317,7 @@ error: layout_of(i32) = Layout {
index: 0, index: 0,
}, },
abi: Scalar( abi: Scalar(
Scalar { Initialized {
value: Int( value: Int(
I32, I32,
true, true,

View File

@ -10,7 +10,7 @@ error: layout_of(A) = Layout {
], ],
}, },
variants: Multiple { variants: Multiple {
tag: Scalar { tag: Initialized {
value: Int( value: Int(
I8, I8,
false, false,
@ -47,7 +47,7 @@ error: layout_of(A) = Layout {
], ],
}, },
abi: Scalar( abi: Scalar(
Scalar { Initialized {
value: Int( value: Int(
I8, I8,
false, false,
@ -60,13 +60,11 @@ error: layout_of(A) = Layout {
offset: Size { offset: Size {
raw: 0, raw: 0,
}, },
scalar: Scalar { value: Int(
value: Int( I8,
I8, false,
false, ),
), valid_range: 0..=0,
valid_range: 0..=0,
},
}, },
), ),
align: AbiAndPrefAlign { align: AbiAndPrefAlign {
@ -98,7 +96,7 @@ error: layout_of(B) = Layout {
], ],
}, },
variants: Multiple { variants: Multiple {
tag: Scalar { tag: Initialized {
value: Int( value: Int(
I8, I8,
false, false,
@ -135,7 +133,7 @@ error: layout_of(B) = Layout {
], ],
}, },
abi: Scalar( abi: Scalar(
Scalar { Initialized {
value: Int( value: Int(
I8, I8,
false, false,
@ -148,13 +146,11 @@ error: layout_of(B) = Layout {
offset: Size { offset: Size {
raw: 0, raw: 0,
}, },
scalar: Scalar { value: Int(
value: Int( I8,
I8, false,
false, ),
), valid_range: 255..=255,
valid_range: 255..=255,
},
}, },
), ),
align: AbiAndPrefAlign { align: AbiAndPrefAlign {
@ -186,7 +182,7 @@ error: layout_of(C) = Layout {
], ],
}, },
variants: Multiple { variants: Multiple {
tag: Scalar { tag: Initialized {
value: Int( value: Int(
I16, I16,
false, false,
@ -223,7 +219,7 @@ error: layout_of(C) = Layout {
], ],
}, },
abi: Scalar( abi: Scalar(
Scalar { Initialized {
value: Int( value: Int(
I16, I16,
false, false,
@ -236,13 +232,11 @@ error: layout_of(C) = Layout {
offset: Size { offset: Size {
raw: 0, raw: 0,
}, },
scalar: Scalar { value: Int(
value: Int( I16,
I16, false,
false, ),
), valid_range: 256..=256,
valid_range: 256..=256,
},
}, },
), ),
align: AbiAndPrefAlign { align: AbiAndPrefAlign {
@ -274,7 +268,7 @@ error: layout_of(P) = Layout {
], ],
}, },
variants: Multiple { variants: Multiple {
tag: Scalar { tag: Initialized {
value: Int( value: Int(
I32, I32,
false, false,
@ -311,7 +305,7 @@ error: layout_of(P) = Layout {
], ],
}, },
abi: Scalar( abi: Scalar(
Scalar { Initialized {
value: Int( value: Int(
I32, I32,
false, false,
@ -324,13 +318,11 @@ error: layout_of(P) = Layout {
offset: Size { offset: Size {
raw: 0, raw: 0,
}, },
scalar: Scalar { value: Int(
value: Int( I32,
I32, false,
false, ),
), valid_range: 268435456..=268435456,
valid_range: 268435456..=268435456,
},
}, },
), ),
align: AbiAndPrefAlign { align: AbiAndPrefAlign {
@ -362,7 +354,7 @@ error: layout_of(T) = Layout {
], ],
}, },
variants: Multiple { variants: Multiple {
tag: Scalar { tag: Initialized {
value: Int( value: Int(
I32, I32,
true, true,
@ -399,7 +391,7 @@ error: layout_of(T) = Layout {
], ],
}, },
abi: Scalar( abi: Scalar(
Scalar { Initialized {
value: Int( value: Int(
I32, I32,
true, true,
@ -412,13 +404,11 @@ error: layout_of(T) = Layout {
offset: Size { offset: Size {
raw: 0, raw: 0,
}, },
scalar: Scalar { value: Int(
value: Int( I32,
I32, true,
true, ),
), valid_range: 2164260864..=2164260864,
valid_range: 2164260864..=2164260864,
},
}, },
), ),
align: AbiAndPrefAlign { align: AbiAndPrefAlign {

View File

@ -10,7 +10,7 @@ error: layout_of(A) = Layout {
], ],
}, },
variants: Multiple { variants: Multiple {
tag: Scalar { tag: Initialized {
value: Int( value: Int(
I8, I8,
false, false,
@ -47,7 +47,7 @@ error: layout_of(A) = Layout {
], ],
}, },
abi: Scalar( abi: Scalar(
Scalar { Initialized {
value: Int( value: Int(
I8, I8,
false, false,
@ -60,13 +60,11 @@ error: layout_of(A) = Layout {
offset: Size { offset: Size {
raw: 0, raw: 0,
}, },
scalar: Scalar { value: Int(
value: Int( I8,
I8, false,
false, ),
), valid_range: 0..=0,
valid_range: 0..=0,
},
}, },
), ),
align: AbiAndPrefAlign { align: AbiAndPrefAlign {
@ -98,7 +96,7 @@ error: layout_of(B) = Layout {
], ],
}, },
variants: Multiple { variants: Multiple {
tag: Scalar { tag: Initialized {
value: Int( value: Int(
I8, I8,
false, false,
@ -135,7 +133,7 @@ error: layout_of(B) = Layout {
], ],
}, },
abi: Scalar( abi: Scalar(
Scalar { Initialized {
value: Int( value: Int(
I8, I8,
false, false,
@ -148,13 +146,11 @@ error: layout_of(B) = Layout {
offset: Size { offset: Size {
raw: 0, raw: 0,
}, },
scalar: Scalar { value: Int(
value: Int( I8,
I8, false,
false, ),
), valid_range: 255..=255,
valid_range: 255..=255,
},
}, },
), ),
align: AbiAndPrefAlign { align: AbiAndPrefAlign {
@ -186,7 +182,7 @@ error: layout_of(C) = Layout {
], ],
}, },
variants: Multiple { variants: Multiple {
tag: Scalar { tag: Initialized {
value: Int( value: Int(
I16, I16,
false, false,
@ -223,7 +219,7 @@ error: layout_of(C) = Layout {
], ],
}, },
abi: Scalar( abi: Scalar(
Scalar { Initialized {
value: Int( value: Int(
I16, I16,
false, false,
@ -236,13 +232,11 @@ error: layout_of(C) = Layout {
offset: Size { offset: Size {
raw: 0, raw: 0,
}, },
scalar: Scalar { value: Int(
value: Int( I16,
I16, false,
false, ),
), valid_range: 256..=256,
valid_range: 256..=256,
},
}, },
), ),
align: AbiAndPrefAlign { align: AbiAndPrefAlign {
@ -274,7 +268,7 @@ error: layout_of(P) = Layout {
], ],
}, },
variants: Multiple { variants: Multiple {
tag: Scalar { tag: Initialized {
value: Int( value: Int(
I32, I32,
false, false,
@ -311,7 +305,7 @@ error: layout_of(P) = Layout {
], ],
}, },
abi: Scalar( abi: Scalar(
Scalar { Initialized {
value: Int( value: Int(
I32, I32,
false, false,
@ -324,13 +318,11 @@ error: layout_of(P) = Layout {
offset: Size { offset: Size {
raw: 0, raw: 0,
}, },
scalar: Scalar { value: Int(
value: Int( I32,
I32, false,
false, ),
), valid_range: 268435456..=268435456,
valid_range: 268435456..=268435456,
},
}, },
), ),
align: AbiAndPrefAlign { align: AbiAndPrefAlign {
@ -362,7 +354,7 @@ error: layout_of(T) = Layout {
], ],
}, },
variants: Multiple { variants: Multiple {
tag: Scalar { tag: Initialized {
value: Int( value: Int(
I32, I32,
true, true,
@ -399,7 +391,7 @@ error: layout_of(T) = Layout {
], ],
}, },
abi: Scalar( abi: Scalar(
Scalar { Initialized {
value: Int( value: Int(
I32, I32,
true, true,
@ -412,13 +404,11 @@ error: layout_of(T) = Layout {
offset: Size { offset: Size {
raw: 0, raw: 0,
}, },
scalar: Scalar { value: Int(
value: Int( I32,
I32, true,
true, ),
), valid_range: 2164260864..=2164260864,
valid_range: 2164260864..=2164260864,
},
}, },
), ),
align: AbiAndPrefAlign { align: AbiAndPrefAlign {