From 2f2031d2b22d64d015625db2dd11e44e7ab37091 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Sat, 8 Jun 2024 18:05:44 +0200 Subject: [PATCH] simd packed types: update outdated check, extend codegen test --- compiler/rustc_codegen_llvm/src/intrinsic.rs | 10 +++++---- tests/codegen/simd/packed-simd.rs | 23 ++++++++++++++++---- 2 files changed, 25 insertions(+), 8 deletions(-) diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index 7b1038d5617..ad4e753fe9d 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -1109,10 +1109,12 @@ fn generic_simd_intrinsic<'ll, 'tcx>( tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), callee_ty.fn_sig(tcx)); let arg_tys = sig.inputs(); - // Vectors must be immediates (non-power-of-2 #[repr(packed)] are not) - for (ty, arg) in arg_tys.iter().zip(args) { - if ty.is_simd() && !matches!(arg.val, OperandValue::Immediate(_)) { - return_error!(InvalidMonomorphization::SimdArgument { span, name, ty: *ty }); + // Sanity-check: all vector arguments must be immediates. + if cfg!(debug_assertions) { + for (ty, arg) in arg_tys.iter().zip(args) { + if ty.is_simd() { + assert!(matches!(arg.val, OperandValue::Immediate(_))); + } } } diff --git a/tests/codegen/simd/packed-simd.rs b/tests/codegen/simd/packed-simd.rs index f0911b6e360..1df09c96e6c 100644 --- a/tests/codegen/simd/packed-simd.rs +++ b/tests/codegen/simd/packed-simd.rs @@ -9,10 +9,11 @@ use core::intrinsics::simd as intrinsics; use core::{mem, ptr}; // Test codegen for not only "packed" but also "fully aligned" SIMD types, and conversion between -// A repr(packed,simd) type with 3 elements can't exceed its element alignment, -// whereas the same type as repr(simd) will instead have padding. +// them. A repr(packed,simd) type with 3 elements can't exceed its element alignment, whereas the +// same type as repr(simd) will instead have padding. #[repr(simd, packed)] +#[derive(Copy, Clone)] pub struct Simd([T; N]); #[repr(simd)] @@ -28,11 +29,11 @@ fn load(v: Simd) -> FullSimd { } } -// CHECK-LABEL: square_packed +// CHECK-LABEL: square_packed_full // CHECK-SAME: ptr{{[a-z_ ]*}} sret([[RET_TYPE:[^)]+]]) [[RET_ALIGN:align (8|16)]]{{[^%]*}} [[RET_VREG:%[_0-9]*]] // CHECK-SAME: ptr{{[a-z_ ]*}} align 4 #[no_mangle] -pub fn square_packed(x: Simd) -> FullSimd { +pub fn square_packed_full(x: Simd) -> FullSimd { // CHECK-NEXT: start // noopt: alloca [[RET_TYPE]], [[RET_ALIGN]] // CHECK: load <3 x float> @@ -42,3 +43,17 @@ pub fn square_packed(x: Simd) -> FullSimd { // CHECK-NEXT: ret void unsafe { intrinsics::simd_mul(x, x) } } + +// CHECK-LABEL: square_packed +// CHECK-SAME: ptr{{[a-z_ ]*}} sret([[RET_TYPE:[^)]+]]) [[RET_ALIGN:align 4]]{{[^%]*}} [[RET_VREG:%[_0-9]*]] +// CHECK-SAME: ptr{{[a-z_ ]*}} align 4 +#[no_mangle] +pub fn square_packed(x: Simd) -> Simd { + // CHECK-NEXT: start + // CHECK-NEXT: load <3 x float> + // noopt-NEXT: load <3 x float> + // CHECK-NEXT: [[VREG:%[a-z0-9_]+]] = fmul <3 x float> + // CHECK-NEXT: store <3 x float> [[VREG]], ptr [[RET_VREG]], [[RET_ALIGN]] + // CHECK-NEXT: ret void + unsafe { intrinsics::simd_mul(x, x) } +}