[RISCV] Special case sign extended scalars when type legalizing nxvXi64 .vx instrinsics on RV32.

On RV32, we need to type legalize i64 scalar arguments to intrinsics.
We usually do this by splatting the value into a vector separately.
If the scalar happens to be sign extended, we can continue using a .vx
intrinsic.

We already special cased sign extended constants, this extends it
to any sign extended value.

I've only added tests for one case of vadd. Most intrinsics go
through the same check.

Reviewed By: khchen

Differential Revision: https://reviews.llvm.org/D122186
This commit is contained in:
Craig Topper 2022-03-22 10:16:53 -07:00
parent c3fe8ddd83
commit 51940d69cb
2 changed files with 48 additions and 9 deletions

View File

@ -4621,13 +4621,11 @@ static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
assert(XLenVT == MVT::i32 && OpVT == MVT::i64 && assert(XLenVT == MVT::i32 && OpVT == MVT::i64 &&
VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!"); VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!");
// If this is a sign-extended 32-bit constant, we can truncate it and rely // If this is a sign-extended 32-bit value, we can truncate it and rely on the
// on the instruction to sign-extend since SEW>XLEN. // instruction to sign-extend since SEW>XLEN.
if (auto *CVal = dyn_cast<ConstantSDNode>(ScalarOp)) { if (DAG.ComputeNumSignBits(ScalarOp) > 32) {
if (isInt<32>(CVal->getSExtValue())) { ScalarOp = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, ScalarOp);
ScalarOp = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32); return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
}
} }
switch (IntNo) { switch (IntNo) {
@ -4749,8 +4747,6 @@ static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
} }
// We need to convert the scalar to a splat vector. // We need to convert the scalar to a splat vector.
// FIXME: Can we implicitly truncate the scalar if it is known to
// be sign extended?
SDValue VL = getVLOperand(Op); SDValue VL = getVLOperand(Op);
assert(VL.getValueType() == XLenVT); assert(VL.getValueType() == XLenVT);
ScalarOp = splatSplitI64WithVL(DL, VT, SDValue(), ScalarOp, VL, DAG); ScalarOp = splatSplitI64WithVL(DL, VT, SDValue(), ScalarOp, VL, DAG);

View File

@ -1881,6 +1881,49 @@ entry:
ret <vscale x 1 x i64> %a ret <vscale x 1 x i64> %a
} }
define <vscale x 1 x i64> @intrinsic_vadd_vx_sext_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1, iXLen %2) nounwind {
; RV32-LABEL: intrinsic_vadd_vx_sext_nxv1i64_nxv1i64_i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV32-NEXT: vadd.vx v8, v8, a0
; RV32-NEXT: ret
;
; RV64-LABEL: intrinsic_vadd_vx_sext_nxv1i64_nxv1i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: sext.w a0, a0
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vadd.vx v8, v8, a0
; RV64-NEXT: ret
entry:
%ext = sext i32 %1 to i64
%a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64(
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %ext,
iXLen %2)
ret <vscale x 1 x i64> %a
}
define <vscale x 1 x i64> @intrinsic_vadd_vx_sextload_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i32* %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vadd_vx_sextload_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lw a0, 0(a0)
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vadd.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
%load = load i32, i32* %1
%ext = sext i32 %load to i64
%a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64(
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %ext,
iXLen %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64( declare <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64(
<vscale x 1 x i64>, <vscale x 1 x i64>,
<vscale x 1 x i64>, <vscale x 1 x i64>,