diff --git a/src/intrinsics/llvm.rs b/src/intrinsics/llvm.rs index be3704ca276..20f8699d12a 100644 --- a/src/intrinsics/llvm.rs +++ b/src/intrinsics/llvm.rs @@ -73,32 +73,30 @@ pub(crate) fn codegen_llvm_intrinsic_call<'tcx>( kind => unreachable!("kind {:?}", kind), }; - simd_pair_for_each_lane(fx, x, y, ret, |fx, lane_layout, res_lane_layout, x_lane, y_lane| { - let res_lane = match lane_layout.ty.kind() { + simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, res_lane_ty, x_lane, y_lane| { + let res_lane = match lane_ty.kind() { ty::Float(_) => fx.bcx.ins().fcmp(flt_cc, x_lane, y_lane), - _ => unreachable!("{:?}", lane_layout.ty), + _ => unreachable!("{:?}", lane_ty), }; - bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane) + bool_to_zero_or_max_uint(fx, res_lane_ty, res_lane) }); }; "llvm.x86.sse2.psrli.d", (c a, o imm8) { let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8).expect("llvm.x86.sse2.psrli.d imm8 not const"); - simd_for_each_lane(fx, a, ret, |fx, _lane_layout, res_lane_layout, lane| { - let res_lane = match imm8.try_to_bits(Size::from_bytes(4)).unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) { + simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| { + match imm8.try_to_bits(Size::from_bytes(4)).unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) { imm8 if imm8 < 32 => fx.bcx.ins().ushr_imm(lane, i64::from(imm8 as u8)), _ => fx.bcx.ins().iconst(types::I32, 0), - }; - CValue::by_val(res_lane, res_lane_layout) + } }); }; "llvm.x86.sse2.pslli.d", (c a, o imm8) { let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8).expect("llvm.x86.sse2.psrli.d imm8 not const"); - simd_for_each_lane(fx, a, ret, |fx, _lane_layout, res_lane_layout, lane| { - let res_lane = match imm8.try_to_bits(Size::from_bytes(4)).unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) { + simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| { + match imm8.try_to_bits(Size::from_bytes(4)).unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) { imm8 if imm8 < 32 => fx.bcx.ins().ishl_imm(lane, i64::from(imm8 as u8)), _ => fx.bcx.ins().iconst(types::I32, 0), - }; - CValue::by_val(res_lane, res_lane_layout) + } }); }; "llvm.x86.sse2.storeu.dq", (v mem_addr, c a) { diff --git a/src/intrinsics/mod.rs b/src/intrinsics/mod.rs index f4703b22ecb..1e384668fc7 100644 --- a/src/intrinsics/mod.rs +++ b/src/intrinsics/mod.rs @@ -9,7 +9,8 @@ pub(crate) use cpuid::codegen_cpuid_call; pub(crate) use llvm::codegen_llvm_intrinsic_call; use rustc_middle::ty::print::with_no_trimmed_paths; -use rustc_span::symbol::{kw, sym}; +use rustc_middle::ty::subst::SubstsRef; +use rustc_span::symbol::{kw, sym, Symbol}; use crate::prelude::*; use cranelift_codegen::ir::AtomicRmwOp; @@ -41,19 +42,11 @@ macro intrinsic_arg { } } -macro intrinsic_substs { - ($substs:expr, $index:expr,) => {}, - ($substs:expr, $index:expr, $first:ident $(,$rest:ident)*) => { - let $first = $substs.type_at($index); - intrinsic_substs!($substs, $index+1, $($rest),*); - } -} - macro intrinsic_match { ($fx:expr, $intrinsic:expr, $substs:expr, $args:expr, _ => $unknown:block; $( - $($($name:tt).*)|+ $(if $cond:expr)?, $(<$($subst:ident),*>)? ($($a:ident $arg:ident),*) $content:block; + $($($name:tt).*)|+ $(if $cond:expr)?, ($($a:ident $arg:ident),*) $content:block; )*) => { let _ = $substs; // Silence warning when substs is unused. match $intrinsic { @@ -61,9 +54,6 @@ macro intrinsic_match { $(intrinsic_pat!($($name).*))|* $(if $cond)? => { #[allow(unused_parens, non_snake_case)] { - $( - intrinsic_substs!($substs, 0, $($subst),*); - )? if let [$($arg),*] = $args { let ($($arg,)*) = ( $(intrinsic_arg!($a $fx, $arg),)* @@ -83,38 +73,6 @@ macro intrinsic_match { } } -macro call_intrinsic_match { - ($fx:expr, $intrinsic:expr, $substs:expr, $ret:expr, $destination:expr, $args:expr, $( - $name:ident($($arg:ident),*) -> $ty:ident => $func:ident, - )*) => { - match $intrinsic { - $( - sym::$name => { - assert!($substs.is_noop()); - if let [$(ref $arg),*] = *$args { - let ($($arg,)*) = ( - $(codegen_operand($fx, $arg),)* - ); - let res = $fx.easy_call(stringify!($func), &[$($arg),*], $fx.tcx.types.$ty); - $ret.write_cvalue($fx, res); - - if let Some((_, dest)) = $destination { - let ret_block = $fx.get_block(dest); - $fx.bcx.ins().jump(ret_block, &[]); - return; - } else { - unreachable!(); - } - } else { - bug!("wrong number of args for intrinsic {:?}", $intrinsic); - } - } - )* - _ => {} - } - } -} - macro validate_atomic_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) { match $ty.kind() { ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {} @@ -133,15 +91,6 @@ macro validate_atomic_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) { } } -macro validate_simd_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) { - if !$ty.is_simd() { - $fx.tcx.sess.span_err($span, &format!("invalid monomorphization of `{}` intrinsic: expected SIMD input type, found non-SIMD `{}`", $intrinsic, $ty)); - // Prevent verifier error - crate::trap::trap_unreachable($fx, "compilation should not have succeeded"); - return; - } -} - pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option { let (element, count) = match layout.abi { Abi::Vector { element, count } => (element, count), @@ -159,12 +108,7 @@ fn simd_for_each_lane<'tcx>( fx: &mut FunctionCx<'_, '_, 'tcx>, val: CValue<'tcx>, ret: CPlace<'tcx>, - f: impl Fn( - &mut FunctionCx<'_, '_, 'tcx>, - TyAndLayout<'tcx>, - TyAndLayout<'tcx>, - Value, - ) -> CValue<'tcx>, + f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value) -> Value, ) { let layout = val.layout(); @@ -177,7 +121,8 @@ fn simd_for_each_lane<'tcx>( for lane_idx in 0..lane_count { let lane = val.value_lane(fx, lane_idx).load_scalar(fx); - let res_lane = f(fx, lane_layout, ret_lane_layout, lane); + let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, lane); + let res_lane = CValue::by_val(res_lane, ret_lane_layout); ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane); } @@ -188,13 +133,7 @@ fn simd_pair_for_each_lane<'tcx>( x: CValue<'tcx>, y: CValue<'tcx>, ret: CPlace<'tcx>, - f: impl Fn( - &mut FunctionCx<'_, '_, 'tcx>, - TyAndLayout<'tcx>, - TyAndLayout<'tcx>, - Value, - Value, - ) -> CValue<'tcx>, + f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value, Value) -> Value, ) { assert_eq!(x.layout(), y.layout()); let layout = x.layout(); @@ -209,7 +148,8 @@ fn simd_pair_for_each_lane<'tcx>( let x_lane = x.value_lane(fx, lane_idx).load_scalar(fx); let y_lane = y.value_lane(fx, lane_idx).load_scalar(fx); - let res_lane = f(fx, lane_layout, ret_lane_layout, x_lane, y_lane); + let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, x_lane, y_lane); + let res_lane = CValue::by_val(res_lane, ret_lane_layout); ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane); } @@ -220,7 +160,7 @@ fn simd_reduce<'tcx>( val: CValue<'tcx>, acc: Option, ret: CPlace<'tcx>, - f: impl Fn(&mut FunctionCx<'_, '_, 'tcx>, TyAndLayout<'tcx>, Value, Value) -> Value, + f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Value, Value) -> Value, ) { let (lane_count, lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx); let lane_layout = fx.layout_of(lane_ty); @@ -230,7 +170,7 @@ fn simd_reduce<'tcx>( if let Some(acc) = acc { (acc, 0) } else { (val.value_lane(fx, 0).load_scalar(fx), 1) }; for lane_idx in start_lane..lane_count { let lane = val.value_lane(fx, lane_idx).load_scalar(fx); - res_val = f(fx, lane_layout, res_val, lane); + res_val = f(fx, lane_layout.ty, res_val, lane); } let res = CValue::by_val(res_val, lane_layout); ret.write_cvalue(fx, res); @@ -241,7 +181,7 @@ fn simd_reduce_bool<'tcx>( fx: &mut FunctionCx<'_, '_, 'tcx>, val: CValue<'tcx>, ret: CPlace<'tcx>, - f: impl Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value, + f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value, ) { let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx); assert!(ret.layout().ty.is_bool()); @@ -264,10 +204,10 @@ fn simd_reduce_bool<'tcx>( fn bool_to_zero_or_max_uint<'tcx>( fx: &mut FunctionCx<'_, '_, 'tcx>, - layout: TyAndLayout<'tcx>, + ty: Ty<'tcx>, val: Value, -) -> CValue<'tcx> { - let ty = fx.clif_type(layout.ty).unwrap(); +) -> Value { + let ty = fx.clif_type(ty).unwrap(); let int_ty = match ty { types::F32 => types::I32, @@ -282,122 +222,7 @@ fn bool_to_zero_or_max_uint<'tcx>( res = fx.bcx.ins().bitcast(ty, res); } - CValue::by_val(res, layout) -} - -macro simd_cmp { - ($fx:expr, $cc:ident|$cc_f:ident($x:ident, $y:ident) -> $ret:ident) => { - let vector_ty = clif_vector_type($fx.tcx, $x.layout()); - - if let Some(vector_ty) = vector_ty { - let x = $x.load_scalar($fx); - let y = $y.load_scalar($fx); - let val = if vector_ty.lane_type().is_float() { - $fx.bcx.ins().fcmp(FloatCC::$cc_f, x, y) - } else { - $fx.bcx.ins().icmp(IntCC::$cc, x, y) - }; - - // HACK This depends on the fact that icmp for vectors represents bools as 0 and !0, not 0 and 1. - let val = $fx.bcx.ins().raw_bitcast(vector_ty, val); - - $ret.write_cvalue($fx, CValue::by_val(val, $ret.layout())); - } else { - simd_pair_for_each_lane( - $fx, - $x, - $y, - $ret, - |fx, lane_layout, res_lane_layout, x_lane, y_lane| { - let res_lane = match lane_layout.ty.kind() { - ty::Uint(_) | ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc, x_lane, y_lane), - ty::Float(_) => fx.bcx.ins().fcmp(FloatCC::$cc_f, x_lane, y_lane), - _ => unreachable!("{:?}", lane_layout.ty), - }; - bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane) - }, - ); - } - }, - ($fx:expr, $cc_u:ident|$cc_s:ident|$cc_f:ident($x:ident, $y:ident) -> $ret:ident) => { - // FIXME use vector icmp when possible - simd_pair_for_each_lane( - $fx, - $x, - $y, - $ret, - |fx, lane_layout, res_lane_layout, x_lane, y_lane| { - let res_lane = match lane_layout.ty.kind() { - ty::Uint(_) => fx.bcx.ins().icmp(IntCC::$cc_u, x_lane, y_lane), - ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc_s, x_lane, y_lane), - ty::Float(_) => fx.bcx.ins().fcmp(FloatCC::$cc_f, x_lane, y_lane), - _ => unreachable!("{:?}", lane_layout.ty), - }; - bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane) - }, - ); - }, -} - -macro simd_int_binop { - ($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) => { - simd_int_binop!($fx, $op|$op($x, $y) -> $ret); - }, - ($fx:expr, $op_u:ident|$op_s:ident($x:ident, $y:ident) -> $ret:ident) => { - simd_pair_for_each_lane( - $fx, - $x, - $y, - $ret, - |fx, lane_layout, ret_lane_layout, x_lane, y_lane| { - let res_lane = match lane_layout.ty.kind() { - ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane), - ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane), - _ => unreachable!("{:?}", lane_layout.ty), - }; - CValue::by_val(res_lane, ret_lane_layout) - }, - ); - }, -} - -macro simd_int_flt_binop { - ($fx:expr, $op:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => { - simd_int_flt_binop!($fx, $op|$op|$op_f($x, $y) -> $ret); - }, - ($fx:expr, $op_u:ident|$op_s:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => { - simd_pair_for_each_lane( - $fx, - $x, - $y, - $ret, - |fx, lane_layout, ret_lane_layout, x_lane, y_lane| { - let res_lane = match lane_layout.ty.kind() { - ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane), - ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane), - ty::Float(_) => fx.bcx.ins().$op_f(x_lane, y_lane), - _ => unreachable!("{:?}", lane_layout.ty), - }; - CValue::by_val(res_lane, ret_lane_layout) - }, - ); - }, -} - -macro simd_flt_binop($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) { - simd_pair_for_each_lane( - $fx, - $x, - $y, - $ret, - |fx, lane_layout, ret_lane_layout, x_lane, y_lane| { - let res_lane = match lane_layout.ty.kind() { - ty::Float(_) => fx.bcx.ins().$op(x_lane, y_lane), - _ => unreachable!("{:?}", lane_layout.ty), - }; - CValue::by_val(res_lane, ret_lane_layout) - }, - ); + res } pub(crate) fn codegen_intrinsic_call<'tcx>( @@ -428,57 +253,109 @@ pub(crate) fn codegen_intrinsic_call<'tcx>( }; if intrinsic.as_str().starts_with("simd_") { - self::simd::codegen_simd_intrinsic_call(fx, instance, args, ret, span); + self::simd::codegen_simd_intrinsic_call(fx, intrinsic, substs, args, ret, span); let ret_block = fx.get_block(destination.expect("SIMD intrinsics don't diverge").1); fx.bcx.ins().jump(ret_block, &[]); - return; + } else if codegen_float_intrinsic_call(fx, intrinsic, args, ret) { + let ret_block = fx.get_block(destination.expect("Float intrinsics don't diverge").1); + fx.bcx.ins().jump(ret_block, &[]); + } else { + codegen_regular_intrinsic_call( + fx, + instance, + intrinsic, + substs, + args, + ret, + span, + destination, + ); + } +} + +fn codegen_float_intrinsic_call<'tcx>( + fx: &mut FunctionCx<'_, '_, 'tcx>, + intrinsic: Symbol, + args: &[mir::Operand<'tcx>], + ret: CPlace<'tcx>, +) -> bool { + let (name, arg_count, ty) = match intrinsic { + sym::expf32 => ("expf", 1, fx.tcx.types.f32), + sym::expf64 => ("exp", 1, fx.tcx.types.f64), + sym::exp2f32 => ("exp2f", 1, fx.tcx.types.f32), + sym::exp2f64 => ("exp2", 1, fx.tcx.types.f64), + sym::sqrtf32 => ("sqrtf", 1, fx.tcx.types.f32), + sym::sqrtf64 => ("sqrt", 1, fx.tcx.types.f64), + sym::powif32 => ("__powisf2", 2, fx.tcx.types.f32), // compiler-builtins + sym::powif64 => ("__powidf2", 2, fx.tcx.types.f64), // compiler-builtins + sym::powf32 => ("powf", 2, fx.tcx.types.f32), + sym::powf64 => ("pow", 2, fx.tcx.types.f64), + sym::logf32 => ("logf", 1, fx.tcx.types.f32), + sym::logf64 => ("log", 1, fx.tcx.types.f64), + sym::log2f32 => ("log2f", 1, fx.tcx.types.f32), + sym::log2f64 => ("log2", 1, fx.tcx.types.f64), + sym::log10f32 => ("log10f", 1, fx.tcx.types.f32), + sym::log10f64 => ("log10", 1, fx.tcx.types.f64), + sym::fabsf32 => ("fabsf", 1, fx.tcx.types.f32), + sym::fabsf64 => ("fabs", 1, fx.tcx.types.f64), + sym::fmaf32 => ("fmaf", 3, fx.tcx.types.f32), + sym::fmaf64 => ("fma", 3, fx.tcx.types.f64), + sym::copysignf32 => ("copysignf", 2, fx.tcx.types.f32), + sym::copysignf64 => ("copysign", 2, fx.tcx.types.f64), + sym::floorf32 => ("floorf", 1, fx.tcx.types.f32), + sym::floorf64 => ("floor", 1, fx.tcx.types.f64), + sym::ceilf32 => ("ceilf", 1, fx.tcx.types.f32), + sym::ceilf64 => ("ceil", 1, fx.tcx.types.f64), + sym::truncf32 => ("truncf", 1, fx.tcx.types.f32), + sym::truncf64 => ("trunc", 1, fx.tcx.types.f64), + sym::roundf32 => ("roundf", 1, fx.tcx.types.f32), + sym::roundf64 => ("round", 1, fx.tcx.types.f64), + sym::sinf32 => ("sinf", 1, fx.tcx.types.f32), + sym::sinf64 => ("sin", 1, fx.tcx.types.f64), + sym::cosf32 => ("cosf", 1, fx.tcx.types.f32), + sym::cosf64 => ("cos", 1, fx.tcx.types.f64), + _ => return false, + }; + + if args.len() != arg_count { + bug!("wrong number of args for intrinsic {:?}", intrinsic); } + let (a, b, c); + let args = match args { + [x] => { + a = [codegen_operand(fx, x)]; + &a as &[_] + } + [x, y] => { + b = [codegen_operand(fx, x), codegen_operand(fx, y)]; + &b + } + [x, y, z] => { + c = [codegen_operand(fx, x), codegen_operand(fx, y), codegen_operand(fx, z)]; + &c + } + _ => unreachable!(), + }; + + let res = fx.easy_call(name, &args, ty); + ret.write_cvalue(fx, res); + + true +} + +fn codegen_regular_intrinsic_call<'tcx>( + fx: &mut FunctionCx<'_, '_, 'tcx>, + instance: Instance<'tcx>, + intrinsic: Symbol, + substs: SubstsRef<'tcx>, + args: &[mir::Operand<'tcx>], + ret: CPlace<'tcx>, + span: Span, + destination: Option<(CPlace<'tcx>, BasicBlock)>, +) { let usize_layout = fx.layout_of(fx.tcx.types.usize); - call_intrinsic_match! { - fx, intrinsic, substs, ret, destination, args, - expf32(flt) -> f32 => expf, - expf64(flt) -> f64 => exp, - exp2f32(flt) -> f32 => exp2f, - exp2f64(flt) -> f64 => exp2, - sqrtf32(flt) -> f32 => sqrtf, - sqrtf64(flt) -> f64 => sqrt, - powif32(a, x) -> f32 => __powisf2, // compiler-builtins - powif64(a, x) -> f64 => __powidf2, // compiler-builtins - powf32(a, x) -> f32 => powf, - powf64(a, x) -> f64 => pow, - logf32(flt) -> f32 => logf, - logf64(flt) -> f64 => log, - log2f32(flt) -> f32 => log2f, - log2f64(flt) -> f64 => log2, - log10f32(flt) -> f32 => log10f, - log10f64(flt) -> f64 => log10, - fabsf32(flt) -> f32 => fabsf, - fabsf64(flt) -> f64 => fabs, - fmaf32(x, y, z) -> f32 => fmaf, - fmaf64(x, y, z) -> f64 => fma, - copysignf32(x, y) -> f32 => copysignf, - copysignf64(x, y) -> f64 => copysign, - - // rounding variants - // FIXME use clif insts - floorf32(flt) -> f32 => floorf, - floorf64(flt) -> f64 => floor, - ceilf32(flt) -> f32 => ceilf, - ceilf64(flt) -> f64 => ceil, - truncf32(flt) -> f32 => truncf, - truncf64(flt) -> f64 => trunc, - roundf32(flt) -> f32 => roundf, - roundf64(flt) -> f64 => round, - - // trigonometry - sinf32(flt) -> f32 => sinf, - sinf64(flt) -> f64 => sin, - cosf32(flt) -> f32 => cosf, - cosf64(flt) -> f64 => cos, - } - intrinsic_match! { fx, intrinsic, substs, args, _ => { @@ -492,7 +369,8 @@ pub(crate) fn codegen_intrinsic_call<'tcx>( breakpoint, () { fx.bcx.ins().debugtrap(); }; - copy | copy_nonoverlapping, (v src, v dst, v count) { + copy | copy_nonoverlapping, (v src, v dst, v count) { + let elem_ty = substs.type_at(0); let elem_size: u64 = fx.layout_of(elem_ty).size.bytes(); assert_eq!(args.len(), 3); let byte_amount = if elem_size != 1 { @@ -510,7 +388,8 @@ pub(crate) fn codegen_intrinsic_call<'tcx>( } }; // NOTE: the volatile variants have src and dst swapped - volatile_copy_memory | volatile_copy_nonoverlapping_memory, (v dst, v src, v count) { + volatile_copy_memory | volatile_copy_nonoverlapping_memory, (v dst, v src, v count) { + let elem_ty = substs.type_at(0); let elem_size: u64 = fx.layout_of(elem_ty).size.bytes(); assert_eq!(args.len(), 3); let byte_amount = if elem_size != 1 { @@ -528,8 +407,8 @@ pub(crate) fn codegen_intrinsic_call<'tcx>( fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount); } }; - size_of_val, (c ptr) { - let layout = fx.layout_of(T); + size_of_val, (c ptr) { + let layout = fx.layout_of(substs.type_at(0)); let size = if layout.is_unsized() { let (_ptr, info) = ptr.load_scalar_pair(fx); let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info); @@ -542,8 +421,8 @@ pub(crate) fn codegen_intrinsic_call<'tcx>( }; ret.write_cvalue(fx, CValue::by_val(size, usize_layout)); }; - min_align_of_val, (c ptr) { - let layout = fx.layout_of(T); + min_align_of_val, (c ptr) { + let layout = fx.layout_of(substs.type_at(0)); let align = if layout.is_unsized() { let (_ptr, info) = ptr.load_scalar_pair(fx); let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info); @@ -589,7 +468,7 @@ pub(crate) fn codegen_intrinsic_call<'tcx>( ); ret.write_cvalue(fx, res); }; - saturating_add | saturating_sub, (c lhs, c rhs) { + saturating_add | saturating_sub, (c lhs, c rhs) { assert_eq!(lhs.layout().ty, rhs.layout().ty); let bin_op = match intrinsic { sym::saturating_add => BinOp::Add, @@ -597,7 +476,7 @@ pub(crate) fn codegen_intrinsic_call<'tcx>( _ => unreachable!(), }; - let signed = type_sign(T); + let signed = type_sign(lhs.layout().ty); let checked_res = crate::num::codegen_checked_int_binop( fx, @@ -607,7 +486,7 @@ pub(crate) fn codegen_intrinsic_call<'tcx>( ); let (val, has_overflow) = checked_res.load_scalar_pair(fx); - let clif_ty = fx.clif_type(T).unwrap(); + let clif_ty = fx.clif_type(lhs.layout().ty).unwrap(); let (min, max) = type_min_max_value(&mut fx.bcx, clif_ty, signed); @@ -629,17 +508,19 @@ pub(crate) fn codegen_intrinsic_call<'tcx>( _ => unreachable!(), }; - let res = CValue::by_val(val, fx.layout_of(T)); + let res = CValue::by_val(val, lhs.layout()); ret.write_cvalue(fx, res); }; - rotate_left, (v x, v y) { - let layout = fx.layout_of(T); + rotate_left, (c x, v y) { + let layout = x.layout(); + let x = x.load_scalar(fx); let res = fx.bcx.ins().rotl(x, y); ret.write_cvalue(fx, CValue::by_val(res, layout)); }; - rotate_right, (v x, v y) { - let layout = fx.layout_of(T); + rotate_right, (c x, v y) { + let layout = x.layout(); + let x = x.load_scalar(fx); let res = fx.bcx.ins().rotr(x, y); ret.write_cvalue(fx, CValue::by_val(res, layout)); }; @@ -675,29 +556,33 @@ pub(crate) fn codegen_intrinsic_call<'tcx>( // FIXME use emit_small_memset fx.bcx.call_memset(fx.target_config, dst_ptr, val, count); }; - ctlz | ctlz_nonzero, (v arg) { + ctlz | ctlz_nonzero, (c arg) { + let val = arg.load_scalar(fx); // FIXME trap on `ctlz_nonzero` with zero arg. - let res = fx.bcx.ins().clz(arg); - let res = CValue::by_val(res, fx.layout_of(T)); + let res = fx.bcx.ins().clz(val); + let res = CValue::by_val(res, arg.layout()); ret.write_cvalue(fx, res); }; - cttz | cttz_nonzero, (v arg) { + cttz | cttz_nonzero, (c arg) { + let val = arg.load_scalar(fx); // FIXME trap on `cttz_nonzero` with zero arg. - let res = fx.bcx.ins().ctz(arg); - let res = CValue::by_val(res, fx.layout_of(T)); + let res = fx.bcx.ins().ctz(val); + let res = CValue::by_val(res, arg.layout()); ret.write_cvalue(fx, res); }; - ctpop, (v arg) { - let res = fx.bcx.ins().popcnt(arg); - let res = CValue::by_val(res, fx.layout_of(T)); + ctpop, (c arg) { + let val = arg.load_scalar(fx); + let res = fx.bcx.ins().popcnt(val); + let res = CValue::by_val(res, arg.layout()); ret.write_cvalue(fx, res); }; - bitreverse, (v arg) { - let res = fx.bcx.ins().bitrev(arg); - let res = CValue::by_val(res, fx.layout_of(T)); + bitreverse, (c arg) { + let val = arg.load_scalar(fx); + let res = fx.bcx.ins().bitrev(val); + let res = CValue::by_val(res, arg.layout()); ret.write_cvalue(fx, res); }; - bswap, (v arg) { + bswap, (c arg) { // FIXME(CraneStation/cranelift#794) add bswap instruction to cranelift fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value { match bcx.func.dfg.value_type(v) { @@ -773,15 +658,16 @@ pub(crate) fn codegen_intrinsic_call<'tcx>( ty => unreachable!("bswap {}", ty), } } - let res = CValue::by_val(swap(&mut fx.bcx, arg), fx.layout_of(T)); + let val = arg.load_scalar(fx); + let res = CValue::by_val(swap(&mut fx.bcx, val), arg.layout()); ret.write_cvalue(fx, res); }; - assert_inhabited | assert_zero_valid | assert_uninit_valid, () { - let layout = fx.layout_of(T); + assert_inhabited | assert_zero_valid | assert_uninit_valid, () { + let layout = fx.layout_of(substs.type_at(0)); if layout.abi.is_uninhabited() { with_no_trimmed_paths(|| crate::base::codegen_panic( fx, - &format!("attempted to instantiate uninhabited type `{}`", T), + &format!("attempted to instantiate uninhabited type `{}`", layout.ty), span, )); return; @@ -790,7 +676,7 @@ pub(crate) fn codegen_intrinsic_call<'tcx>( if intrinsic == sym::assert_zero_valid && !layout.might_permit_raw_init(fx, /*zero:*/ true) { with_no_trimmed_paths(|| crate::base::codegen_panic( fx, - &format!("attempted to zero-initialize type `{}`, which is invalid", T), + &format!("attempted to zero-initialize type `{}`, which is invalid", layout.ty), span, )); return; @@ -799,7 +685,7 @@ pub(crate) fn codegen_intrinsic_call<'tcx>( if intrinsic == sym::assert_uninit_valid && !layout.might_permit_raw_init(fx, /*zero:*/ false) { with_no_trimmed_paths(|| crate::base::codegen_panic( fx, - &format!("attempted to leave type `{}` uninitialized, which is invalid", T), + &format!("attempted to leave type `{}` uninitialized, which is invalid", layout.ty), span, )); return; @@ -832,10 +718,11 @@ pub(crate) fn codegen_intrinsic_call<'tcx>( ret.write_cvalue(fx, val); }; - ptr_offset_from, (v ptr, v base) { + ptr_offset_from, (v ptr, v base) { + let ty = substs.type_at(0); let isize_layout = fx.layout_of(fx.tcx.types.isize); - let pointee_size: u64 = fx.layout_of(T).size.bytes(); + let pointee_size: u64 = fx.layout_of(ty).size.bytes(); let diff = fx.bcx.ins().isub(ptr, base); // FIXME this can be an exact division. let val = CValue::by_val(fx.bcx.ins().sdiv_imm(diff, pointee_size as i64), isize_layout); @@ -864,13 +751,14 @@ pub(crate) fn codegen_intrinsic_call<'tcx>( // FIXME use a compiler fence once Cranelift supports it fx.bcx.ins().fence(); }; - _ if intrinsic.as_str().starts_with("atomic_load"), (v ptr) { - validate_atomic_type!(fx, intrinsic, span, T); - let ty = fx.clif_type(T).unwrap(); + _ if intrinsic.as_str().starts_with("atomic_load"), (v ptr) { + let ty = substs.type_at(0); + validate_atomic_type!(fx, intrinsic, span, ty); + let clif_ty = fx.clif_type(ty).unwrap(); - let val = fx.bcx.ins().atomic_load(ty, MemFlags::trusted(), ptr); + let val = fx.bcx.ins().atomic_load(clif_ty, MemFlags::trusted(), ptr); - let val = CValue::by_val(val, fx.layout_of(T)); + let val = CValue::by_val(val, fx.layout_of(ty)); ret.write_cvalue(fx, val); }; _ if intrinsic.as_str().starts_with("atomic_store"), (v ptr, c val) { @@ -1101,18 +989,14 @@ pub(crate) fn codegen_intrinsic_call<'tcx>( ret.write_cvalue(fx, CValue::by_val(res, ret.layout())); }; - raw_eq, (v lhs_ref, v rhs_ref) { - fn type_by_size(size: Size) -> Option { - Type::int(size.bits().try_into().ok()?) - } - - let size = fx.layout_of(T).layout.size; + raw_eq, (v lhs_ref, v rhs_ref) { + let size = fx.layout_of(substs.type_at(0)).layout.size; // FIXME add and use emit_small_memcmp let is_eq_value = if size == Size::ZERO { // No bytes means they're trivially equal fx.bcx.ins().iconst(types::I8, 1) - } else if let Some(clty) = type_by_size(size) { + } else if let Some(clty) = size.bits().try_into().ok().and_then(Type::int) { // Can't use `trusted` for these loads; they could be unaligned. let mut flags = MemFlags::new(); flags.set_notrap(); diff --git a/src/intrinsics/simd.rs b/src/intrinsics/simd.rs index 6c0631d9ecb..106a190096d 100644 --- a/src/intrinsics/simd.rs +++ b/src/intrinsics/simd.rs @@ -1,20 +1,78 @@ //! Codegen `extern "platform-intrinsic"` intrinsics. +use rustc_middle::ty::subst::SubstsRef; +use rustc_span::Symbol; + use super::*; use crate::prelude::*; +fn validate_simd_type(fx: &mut FunctionCx<'_, '_, '_>, intrinsic: Symbol, span: Span, ty: Ty<'_>) { + if !ty.is_simd() { + fx.tcx.sess.span_err(span, &format!("invalid monomorphization of `{}` intrinsic: expected SIMD input type, found non-SIMD `{}`", intrinsic, ty)); + // Prevent verifier error + crate::trap::trap_unreachable(fx, "compilation should not have succeeded"); + return; + } +} + +macro simd_cmp($fx:expr, $cc_u:ident|$cc_s:ident|$cc_f:ident($x:ident, $y:ident) -> $ret:ident) { + // FIXME use vector instructions when possible + simd_pair_for_each_lane($fx, $x, $y, $ret, &|fx, lane_ty, res_lane_ty, x_lane, y_lane| { + let res_lane = match lane_ty.kind() { + ty::Uint(_) => fx.bcx.ins().icmp(IntCC::$cc_u, x_lane, y_lane), + ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc_s, x_lane, y_lane), + ty::Float(_) => fx.bcx.ins().fcmp(FloatCC::$cc_f, x_lane, y_lane), + _ => unreachable!("{:?}", lane_ty), + }; + + let ty = fx.clif_type(res_lane_ty).unwrap(); + + let res_lane = fx.bcx.ins().bint(ty, res_lane); + fx.bcx.ins().ineg(res_lane) + }); +} + +macro simd_int_binop($fx:expr, $op_u:ident|$op_s:ident($x:ident, $y:ident) -> $ret:ident) { + // FIXME use vector instructions when possible + simd_pair_for_each_lane($fx, $x, $y, $ret, &|fx, lane_ty, _ret_lane_ty, x_lane, y_lane| { + match lane_ty.kind() { + ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane), + ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane), + _ => unreachable!("{:?}", lane_ty), + } + }); +} + +macro simd_int_flt_binop($fx:expr, $op_u:ident|$op_s:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) { + // FIXME use vector instructions when possible + simd_pair_for_each_lane($fx, $x, $y, $ret, &|fx, lane_ty, _ret_lane_ty, x_lane, y_lane| { + match lane_ty.kind() { + ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane), + ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane), + ty::Float(_) => fx.bcx.ins().$op_f(x_lane, y_lane), + _ => unreachable!("{:?}", lane_ty), + } + }); +} + +macro simd_flt_binop($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) { + // FIXME use vector instructions when possible + simd_pair_for_each_lane($fx, $x, $y, $ret, &|fx, lane_ty, _ret_lane_ty, x_lane, y_lane| { + match lane_ty.kind() { + ty::Float(_) => fx.bcx.ins().$op(x_lane, y_lane), + _ => unreachable!("{:?}", lane_ty), + } + }); +} + pub(super) fn codegen_simd_intrinsic_call<'tcx>( fx: &mut FunctionCx<'_, '_, 'tcx>, - instance: Instance<'tcx>, + intrinsic: Symbol, + substs: SubstsRef<'tcx>, args: &[mir::Operand<'tcx>], ret: CPlace<'tcx>, span: Span, ) { - let def_id = instance.def_id(); - let substs = instance.substs; - - let intrinsic = fx.tcx.item_name(def_id); - intrinsic_match! { fx, intrinsic, substs, args, _ => { @@ -22,40 +80,39 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( }; simd_cast, (c a) { - validate_simd_type!(fx, intrinsic, span, a.layout().ty); - simd_for_each_lane(fx, a, ret, |fx, lane_layout, ret_lane_layout, lane| { - let ret_lane_ty = fx.clif_type(ret_lane_layout.ty).unwrap(); + validate_simd_type(fx, intrinsic, span, a.layout().ty); + simd_for_each_lane(fx, a, ret, &|fx, lane_ty, ret_lane_ty, lane| { + let ret_lane_clif_ty = fx.clif_type(ret_lane_ty).unwrap(); - let from_signed = type_sign(lane_layout.ty); - let to_signed = type_sign(ret_lane_layout.ty); + let from_signed = type_sign(lane_ty); + let to_signed = type_sign(ret_lane_ty); - let ret_lane = clif_int_or_float_cast(fx, lane, from_signed, ret_lane_ty, to_signed); - CValue::by_val(ret_lane, ret_lane_layout) + clif_int_or_float_cast(fx, lane, from_signed, ret_lane_clif_ty, to_signed) }); }; simd_eq, (c x, c y) { - validate_simd_type!(fx, intrinsic, span, x.layout().ty); - simd_cmp!(fx, Equal|Equal(x, y) -> ret); + validate_simd_type(fx, intrinsic, span, x.layout().ty); + simd_cmp!(fx, Equal|Equal|Equal(x, y) -> ret); }; simd_ne, (c x, c y) { - validate_simd_type!(fx, intrinsic, span, x.layout().ty); - simd_cmp!(fx, NotEqual|NotEqual(x, y) -> ret); + validate_simd_type(fx, intrinsic, span, x.layout().ty); + simd_cmp!(fx, NotEqual|NotEqual|NotEqual(x, y) -> ret); }; simd_lt, (c x, c y) { - validate_simd_type!(fx, intrinsic, span, x.layout().ty); + validate_simd_type(fx, intrinsic, span, x.layout().ty); simd_cmp!(fx, UnsignedLessThan|SignedLessThan|LessThan(x, y) -> ret); }; simd_le, (c x, c y) { - validate_simd_type!(fx, intrinsic, span, x.layout().ty); + validate_simd_type(fx, intrinsic, span, x.layout().ty); simd_cmp!(fx, UnsignedLessThanOrEqual|SignedLessThanOrEqual|LessThanOrEqual(x, y) -> ret); }; simd_gt, (c x, c y) { - validate_simd_type!(fx, intrinsic, span, x.layout().ty); + validate_simd_type(fx, intrinsic, span, x.layout().ty); simd_cmp!(fx, UnsignedGreaterThan|SignedGreaterThan|GreaterThan(x, y) -> ret); }; simd_ge, (c x, c y) { - validate_simd_type!(fx, intrinsic, span, x.layout().ty); + validate_simd_type(fx, intrinsic, span, x.layout().ty); simd_cmp!( fx, UnsignedGreaterThanOrEqual|SignedGreaterThanOrEqual|GreaterThanOrEqual @@ -65,7 +122,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( // simd_shuffle32(x: T, y: T, idx: [u32; 32]) -> U _ if intrinsic.as_str().starts_with("simd_shuffle"), (c x, c y, o idx) { - validate_simd_type!(fx, intrinsic, span, x.layout().ty); + validate_simd_type(fx, intrinsic, span, x.layout().ty); // If this intrinsic is the older "simd_shuffleN" form, simply parse the integer. // If there is no suffix, use the index array length. @@ -167,7 +224,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( }; simd_extract, (c v, o idx) { - validate_simd_type!(fx, intrinsic, span, v.layout().ty); + validate_simd_type(fx, intrinsic, span, v.layout().ty); let idx_const = if let Some(idx_const) = crate::constant::mir_operand_get_const_val(fx, idx) { idx_const } else { @@ -195,53 +252,50 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( }; simd_neg, (c a) { - validate_simd_type!(fx, intrinsic, span, a.layout().ty); - simd_for_each_lane(fx, a, ret, |fx, lane_layout, ret_lane_layout, lane| { - let ret_lane = match lane_layout.ty.kind() { + validate_simd_type(fx, intrinsic, span, a.layout().ty); + simd_for_each_lane(fx, a, ret, &|fx, lane_ty, _ret_lane_ty, lane| { + match lane_ty.kind() { ty::Int(_) => fx.bcx.ins().ineg(lane), ty::Float(_) => fx.bcx.ins().fneg(lane), _ => unreachable!(), - }; - CValue::by_val(ret_lane, ret_lane_layout) + } }); }; simd_fabs, (c a) { - validate_simd_type!(fx, intrinsic, span, a.layout().ty); - simd_for_each_lane(fx, a, ret, |fx, _lane_layout, ret_lane_layout, lane| { - let ret_lane = fx.bcx.ins().fabs(lane); - CValue::by_val(ret_lane, ret_lane_layout) + validate_simd_type(fx, intrinsic, span, a.layout().ty); + simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _ret_lane_ty, lane| { + fx.bcx.ins().fabs(lane) }); }; simd_fsqrt, (c a) { - validate_simd_type!(fx, intrinsic, span, a.layout().ty); - simd_for_each_lane(fx, a, ret, |fx, _lane_layout, ret_lane_layout, lane| { - let ret_lane = fx.bcx.ins().sqrt(lane); - CValue::by_val(ret_lane, ret_lane_layout) + validate_simd_type(fx, intrinsic, span, a.layout().ty); + simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _ret_lane_ty, lane| { + fx.bcx.ins().sqrt(lane) }); }; simd_add, (c x, c y) { - validate_simd_type!(fx, intrinsic, span, x.layout().ty); - simd_int_flt_binop!(fx, iadd|fadd(x, y) -> ret); + validate_simd_type(fx, intrinsic, span, x.layout().ty); + simd_int_flt_binop!(fx, iadd|iadd|fadd(x, y) -> ret); }; simd_sub, (c x, c y) { - validate_simd_type!(fx, intrinsic, span, x.layout().ty); - simd_int_flt_binop!(fx, isub|fsub(x, y) -> ret); + validate_simd_type(fx, intrinsic, span, x.layout().ty); + simd_int_flt_binop!(fx, isub|isub|fsub(x, y) -> ret); }; simd_mul, (c x, c y) { - validate_simd_type!(fx, intrinsic, span, x.layout().ty); - simd_int_flt_binop!(fx, imul|fmul(x, y) -> ret); + validate_simd_type(fx, intrinsic, span, x.layout().ty); + simd_int_flt_binop!(fx, imul|imul|fmul(x, y) -> ret); }; simd_div, (c x, c y) { - validate_simd_type!(fx, intrinsic, span, x.layout().ty); + validate_simd_type(fx, intrinsic, span, x.layout().ty); simd_int_flt_binop!(fx, udiv|sdiv|fdiv(x, y) -> ret); }; simd_rem, (c x, c y) { - validate_simd_type!(fx, intrinsic, span, x.layout().ty); - simd_pair_for_each_lane(fx, x, y, ret, |fx, lane_layout, ret_lane_layout, x_lane, y_lane| { - let res_lane = match lane_layout.ty.kind() { + validate_simd_type(fx, intrinsic, span, x.layout().ty); + simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, _ret_lane_ty, x_lane, y_lane| { + match lane_ty.kind() { ty::Uint(_) => fx.bcx.ins().urem(x_lane, y_lane), ty::Int(_) => fx.bcx.ins().srem(x_lane, y_lane), ty::Float(FloatTy::F32) => fx.lib_call( @@ -256,34 +310,33 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( vec![AbiParam::new(types::F64)], &[x_lane, y_lane], )[0], - _ => unreachable!("{:?}", lane_layout.ty), - }; - CValue::by_val(res_lane, ret_lane_layout) + _ => unreachable!("{:?}", lane_ty), + } }); }; simd_shl, (c x, c y) { - validate_simd_type!(fx, intrinsic, span, x.layout().ty); - simd_int_binop!(fx, ishl(x, y) -> ret); + validate_simd_type(fx, intrinsic, span, x.layout().ty); + simd_int_binop!(fx, ishl|ishl(x, y) -> ret); }; simd_shr, (c x, c y) { - validate_simd_type!(fx, intrinsic, span, x.layout().ty); + validate_simd_type(fx, intrinsic, span, x.layout().ty); simd_int_binop!(fx, ushr|sshr(x, y) -> ret); }; simd_and, (c x, c y) { - validate_simd_type!(fx, intrinsic, span, x.layout().ty); - simd_int_binop!(fx, band(x, y) -> ret); + validate_simd_type(fx, intrinsic, span, x.layout().ty); + simd_int_binop!(fx, band|band(x, y) -> ret); }; simd_or, (c x, c y) { - validate_simd_type!(fx, intrinsic, span, x.layout().ty); - simd_int_binop!(fx, bor(x, y) -> ret); + validate_simd_type(fx, intrinsic, span, x.layout().ty); + simd_int_binop!(fx, bor|bor(x, y) -> ret); }; simd_xor, (c x, c y) { - validate_simd_type!(fx, intrinsic, span, x.layout().ty); - simd_int_binop!(fx, bxor(x, y) -> ret); + validate_simd_type(fx, intrinsic, span, x.layout().ty); + simd_int_binop!(fx, bxor|bxor(x, y) -> ret); }; simd_fma, (c a, c b, c c) { - validate_simd_type!(fx, intrinsic, span, a.layout().ty); + validate_simd_type(fx, intrinsic, span, a.layout().ty); assert_eq!(a.layout(), b.layout()); assert_eq!(a.layout(), c.layout()); let layout = a.layout(); @@ -306,18 +359,18 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( }; simd_fmin, (c x, c y) { - validate_simd_type!(fx, intrinsic, span, x.layout().ty); + validate_simd_type(fx, intrinsic, span, x.layout().ty); simd_flt_binop!(fx, fmin(x, y) -> ret); }; simd_fmax, (c x, c y) { - validate_simd_type!(fx, intrinsic, span, x.layout().ty); + validate_simd_type(fx, intrinsic, span, x.layout().ty); simd_flt_binop!(fx, fmax(x, y) -> ret); }; simd_round, (c a) { - validate_simd_type!(fx, intrinsic, span, a.layout().ty); - simd_for_each_lane(fx, a, ret, |fx, lane_layout, ret_lane_layout, lane| { - let res_lane = match lane_layout.ty.kind() { + validate_simd_type(fx, intrinsic, span, a.layout().ty); + simd_for_each_lane(fx, a, ret, &|fx, lane_ty, _ret_lane_ty, lane| { + match lane_ty.kind() { ty::Float(FloatTy::F32) => fx.lib_call( "roundf", vec![AbiParam::new(types::F32)], @@ -330,37 +383,33 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( vec![AbiParam::new(types::F64)], &[lane], )[0], - _ => unreachable!("{:?}", lane_layout.ty), - }; - CValue::by_val(res_lane, ret_lane_layout) + _ => unreachable!("{:?}", lane_ty), + } }); }; simd_ceil, (c a) { - validate_simd_type!(fx, intrinsic, span, a.layout().ty); - simd_for_each_lane(fx, a, ret, |fx, _lane_layout, ret_lane_layout, lane| { - let ret_lane = fx.bcx.ins().ceil(lane); - CValue::by_val(ret_lane, ret_lane_layout) + validate_simd_type(fx, intrinsic, span, a.layout().ty); + simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _ret_lane_ty, lane| { + fx.bcx.ins().ceil(lane) }); }; simd_floor, (c a) { - validate_simd_type!(fx, intrinsic, span, a.layout().ty); - simd_for_each_lane(fx, a, ret, |fx, _lane_layout, ret_lane_layout, lane| { - let ret_lane = fx.bcx.ins().floor(lane); - CValue::by_val(ret_lane, ret_lane_layout) + validate_simd_type(fx, intrinsic, span, a.layout().ty); + simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _ret_lane_ty, lane| { + fx.bcx.ins().floor(lane) }); }; simd_trunc, (c a) { - validate_simd_type!(fx, intrinsic, span, a.layout().ty); - simd_for_each_lane(fx, a, ret, |fx, _lane_layout, ret_lane_layout, lane| { - let ret_lane = fx.bcx.ins().trunc(lane); - CValue::by_val(ret_lane, ret_lane_layout) + validate_simd_type(fx, intrinsic, span, a.layout().ty); + simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _ret_lane_ty, lane| { + fx.bcx.ins().trunc(lane) }); }; simd_reduce_add_ordered | simd_reduce_add_unordered, (c v, v acc) { - validate_simd_type!(fx, intrinsic, span, v.layout().ty); - simd_reduce(fx, v, Some(acc), ret, |fx, lane_layout, a, b| { - if lane_layout.ty.is_floating_point() { + validate_simd_type(fx, intrinsic, span, v.layout().ty); + simd_reduce(fx, v, Some(acc), ret, &|fx, lane_ty, a, b| { + if lane_ty.is_floating_point() { fx.bcx.ins().fadd(a, b) } else { fx.bcx.ins().iadd(a, b) @@ -369,9 +418,9 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( }; simd_reduce_mul_ordered | simd_reduce_mul_unordered, (c v, v acc) { - validate_simd_type!(fx, intrinsic, span, v.layout().ty); - simd_reduce(fx, v, Some(acc), ret, |fx, lane_layout, a, b| { - if lane_layout.ty.is_floating_point() { + validate_simd_type(fx, intrinsic, span, v.layout().ty); + simd_reduce(fx, v, Some(acc), ret, &|fx, lane_ty, a, b| { + if lane_ty.is_floating_point() { fx.bcx.ins().fmul(a, b) } else { fx.bcx.ins().imul(a, b) @@ -380,34 +429,34 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( }; simd_reduce_all, (c v) { - validate_simd_type!(fx, intrinsic, span, v.layout().ty); - simd_reduce_bool(fx, v, ret, |fx, a, b| fx.bcx.ins().band(a, b)); + validate_simd_type(fx, intrinsic, span, v.layout().ty); + simd_reduce_bool(fx, v, ret, &|fx, a, b| fx.bcx.ins().band(a, b)); }; simd_reduce_any, (c v) { - validate_simd_type!(fx, intrinsic, span, v.layout().ty); - simd_reduce_bool(fx, v, ret, |fx, a, b| fx.bcx.ins().bor(a, b)); + validate_simd_type(fx, intrinsic, span, v.layout().ty); + simd_reduce_bool(fx, v, ret, &|fx, a, b| fx.bcx.ins().bor(a, b)); }; simd_reduce_and, (c v) { - validate_simd_type!(fx, intrinsic, span, v.layout().ty); - simd_reduce(fx, v, None, ret, |fx, _layout, a, b| fx.bcx.ins().band(a, b)); + validate_simd_type(fx, intrinsic, span, v.layout().ty); + simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| fx.bcx.ins().band(a, b)); }; simd_reduce_or, (c v) { - validate_simd_type!(fx, intrinsic, span, v.layout().ty); - simd_reduce(fx, v, None, ret, |fx, _layout, a, b| fx.bcx.ins().bor(a, b)); + validate_simd_type(fx, intrinsic, span, v.layout().ty); + simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| fx.bcx.ins().bor(a, b)); }; simd_reduce_xor, (c v) { - validate_simd_type!(fx, intrinsic, span, v.layout().ty); - simd_reduce(fx, v, None, ret, |fx, _layout, a, b| fx.bcx.ins().bxor(a, b)); + validate_simd_type(fx, intrinsic, span, v.layout().ty); + simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| fx.bcx.ins().bxor(a, b)); }; simd_reduce_min, (c v) { - validate_simd_type!(fx, intrinsic, span, v.layout().ty); - simd_reduce(fx, v, None, ret, |fx, layout, a, b| { - let lt = match layout.ty.kind() { + validate_simd_type(fx, intrinsic, span, v.layout().ty); + simd_reduce(fx, v, None, ret, &|fx, ty, a, b| { + let lt = match ty.kind() { ty::Int(_) => fx.bcx.ins().icmp(IntCC::SignedLessThan, a, b), ty::Uint(_) => fx.bcx.ins().icmp(IntCC::UnsignedLessThan, a, b), ty::Float(_) => fx.bcx.ins().fcmp(FloatCC::LessThan, a, b), @@ -418,9 +467,9 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( }; simd_reduce_max, (c v) { - validate_simd_type!(fx, intrinsic, span, v.layout().ty); - simd_reduce(fx, v, None, ret, |fx, layout, a, b| { - let gt = match layout.ty.kind() { + validate_simd_type(fx, intrinsic, span, v.layout().ty); + simd_reduce(fx, v, None, ret, &|fx, ty, a, b| { + let gt = match ty.kind() { ty::Int(_) => fx.bcx.ins().icmp(IntCC::SignedGreaterThan, a, b), ty::Uint(_) => fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, a, b), ty::Float(_) => fx.bcx.ins().fcmp(FloatCC::GreaterThan, a, b), @@ -431,8 +480,8 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( }; simd_select, (c m, c a, c b) { - validate_simd_type!(fx, intrinsic, span, m.layout().ty); - validate_simd_type!(fx, intrinsic, span, a.layout().ty); + validate_simd_type(fx, intrinsic, span, m.layout().ty); + validate_simd_type(fx, intrinsic, span, a.layout().ty); assert_eq!(a.layout(), b.layout()); let (lane_count, lane_ty) = a.layout().ty.simd_size_and_type(fx.tcx);