[ARM] Add instruction selection patterns for vmin/vmax

Summary:
The mid-end was generating vector smin/smax/umin/umax nodes, but
we were using vbsl to generatate the code. This adds the vmin/vmax
patterns and a test to check that we are now generating vmin/vmax
instructions.

Reviewers: rengolin, jmolloy

Subscribers: aemerson, rengolin, llvm-commits

Differential Revision: http://reviews.llvm.org/D12105

llvm-svn: 245439
This commit is contained in:
Silviu Baranga 2015-08-19 14:11:27 +00:00
parent 746da5fe2a
commit ad1b19fcb7
4 changed files with 224 additions and 14 deletions

View File

@ -147,6 +147,11 @@ void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT,
setOperationAction(ISD::SABSDIFF, VT, Legal); setOperationAction(ISD::SABSDIFF, VT, Legal);
setOperationAction(ISD::UABSDIFF, VT, Legal); setOperationAction(ISD::UABSDIFF, VT, Legal);
} }
if (!VT.isFloatingPoint() &&
VT != MVT::v2i64 && VT != MVT::v1i64)
for (unsigned Opcode : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
setOperationAction(Opcode, VT, Legal);
} }
void ARMTargetLowering::addDRTypeForNEON(MVT VT) { void ARMTargetLowering::addDRTypeForNEON(MVT VT) {
@ -2821,11 +2826,24 @@ ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
Op.getOperand(1), Op.getOperand(2)); Op.getOperand(1), Op.getOperand(2));
} }
case Intrinsic::arm_neon_vminu:
case Intrinsic::arm_neon_vmaxu: {
if (Op.getValueType().isFloatingPoint())
return SDValue();
unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminu)
? ISD::UMIN : ISD::UMAX;
return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
Op.getOperand(1), Op.getOperand(2));
}
case Intrinsic::arm_neon_vmins: case Intrinsic::arm_neon_vmins:
case Intrinsic::arm_neon_vmaxs: { case Intrinsic::arm_neon_vmaxs: {
// v{min,max}s is overloaded between signed integers and floats. // v{min,max}s is overloaded between signed integers and floats.
if (!Op.getValueType().isFloatingPoint()) if (!Op.getValueType().isFloatingPoint()) {
return SDValue(); unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins)
? ISD::SMIN : ISD::SMAX;
return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
Op.getOperand(1), Op.getOperand(2));
}
unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins) unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins)
? ISD::FMINNAN : ISD::FMAXNAN; ? ISD::FMINNAN : ISD::FMAXNAN;
return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),

View File

@ -5026,10 +5026,10 @@ defm VABALu : N3VLIntExtOp_QHS<1,1,0b0101,0, IIC_VABAD,
// VMAX : Vector Maximum // VMAX : Vector Maximum
defm VMAXs : N3VInt_QHS<0, 0, 0b0110, 0, N3RegFrm, defm VMAXs : N3VInt_QHS<0, 0, 0b0110, 0, N3RegFrm,
IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q, IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q,
"vmax", "s", int_arm_neon_vmaxs, 1>; "vmax", "s", smax, 1>;
defm VMAXu : N3VInt_QHS<1, 0, 0b0110, 0, N3RegFrm, defm VMAXu : N3VInt_QHS<1, 0, 0b0110, 0, N3RegFrm,
IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q, IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q,
"vmax", "u", int_arm_neon_vmaxu, 1>; "vmax", "u", umax, 1>;
def VMAXfd : N3VDInt<0, 0, 0b00, 0b1111, 0, N3RegFrm, IIC_VBIND, def VMAXfd : N3VDInt<0, 0, 0b00, 0b1111, 0, N3RegFrm, IIC_VBIND,
"vmax", "f32", "vmax", "f32",
v2f32, v2f32, fmaxnan, 1>; v2f32, v2f32, fmaxnan, 1>;
@ -5052,10 +5052,10 @@ let PostEncoderMethod = "NEONThumb2V8PostEncoder", DecoderNamespace = "v8NEON" i
// VMIN : Vector Minimum // VMIN : Vector Minimum
defm VMINs : N3VInt_QHS<0, 0, 0b0110, 1, N3RegFrm, defm VMINs : N3VInt_QHS<0, 0, 0b0110, 1, N3RegFrm,
IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q, IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q,
"vmin", "s", int_arm_neon_vmins, 1>; "vmin", "s", smin, 1>;
defm VMINu : N3VInt_QHS<1, 0, 0b0110, 1, N3RegFrm, defm VMINu : N3VInt_QHS<1, 0, 0b0110, 1, N3RegFrm,
IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q, IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q,
"vmin", "u", int_arm_neon_vminu, 1>; "vmin", "u", umin, 1>;
def VMINfd : N3VDInt<0, 0, 0b10, 0b1111, 0, N3RegFrm, IIC_VBIND, def VMINfd : N3VDInt<0, 0, 0b10, 0b1111, 0, N3RegFrm, IIC_VBIND,
"vmin", "f32", "vmin", "f32",
v2f32, v2f32, fminnan, 1>; v2f32, v2f32, fminnan, 1>;

View File

@ -0,0 +1,193 @@
; RUN: llc < %s -mtriple=armv8-linux-gnu -mattr=+neon | FileCheck %s
; CHECK-LABEL: t1
; CHECK: vmax.s32 {{q[0-9]+}}, {{q[0-9]+}}, {{q[0-9]+}}
define <4 x i32> @t1(<4 x i32> %a, <4 x i32> %b) {
%t1 = icmp sgt <4 x i32> %a, %b
%t2 = select <4 x i1> %t1, <4 x i32> %a, <4 x i32> %b
ret <4 x i32> %t2
}
; CHECK-LABEL: t2
; CHECK: vmin.s32 {{q[0-9]+}}, {{q[0-9]+}}, {{q[0-9]+}}
define <4 x i32> @t2(<4 x i32> %a, <4 x i32> %b) {
%t1 = icmp slt <4 x i32> %a, %b
%t2 = select <4 x i1> %t1, <4 x i32> %a, <4 x i32> %b
ret <4 x i32> %t2
}
; CHECK-LABEL: t3
; CHECK: vmax.u32 {{q[0-9]+}}, {{q[0-9]+}}, {{q[0-9]+}}
define <4 x i32> @t3(<4 x i32> %a, <4 x i32> %b) {
%t1 = icmp ugt <4 x i32> %a, %b
%t2 = select <4 x i1> %t1, <4 x i32> %a, <4 x i32> %b
ret <4 x i32> %t2
}
; CHECK-LABEL: t4
; CHECK: vmin.u32 {{q[0-9]+}}, {{q[0-9]+}}, {{q[0-9]+}}
define <4 x i32> @t4(<4 x i32> %a, <4 x i32> %b) {
%t1 = icmp ult <4 x i32> %a, %b
%t2 = select <4 x i1> %t1, <4 x i32> %a, <4 x i32> %b
ret <4 x i32> %t2
}
; CHECK-LABEL: t5
; CHECK: vmax.s32 {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
define <2 x i32> @t5(<2 x i32> %a, <2 x i32> %b) {
%t1 = icmp sgt <2 x i32> %a, %b
%t2 = select <2 x i1> %t1, <2 x i32> %a, <2 x i32> %b
ret <2 x i32> %t2
}
; CHECK-LABEL: t6
; CHECK: vmin.s32 {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
define <2 x i32> @t6(<2 x i32> %a, <2 x i32> %b) {
%t1 = icmp slt <2 x i32> %a, %b
%t2 = select <2 x i1> %t1, <2 x i32> %a, <2 x i32> %b
ret <2 x i32> %t2
}
; CHECK-LABEL: t7
; CHECK: vmax.u32 {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
define <2 x i32> @t7(<2 x i32> %a, <2 x i32> %b) {
%t1 = icmp ugt <2 x i32> %a, %b
%t2 = select <2 x i1> %t1, <2 x i32> %a, <2 x i32> %b
ret <2 x i32> %t2
}
; CHECK-LABEL: t8
; CHECK: vmin.u32 {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
define <2 x i32> @t8(<2 x i32> %a, <2 x i32> %b) {
%t1 = icmp ult <2 x i32> %a, %b
%t2 = select <2 x i1> %t1, <2 x i32> %a, <2 x i32> %b
ret <2 x i32> %t2
}
; CHECK-LABEL: t9
; CHECK: vmax.s16 {{q[0-9]+}}, {{q[0-9]+}}, {{q[0-9]+}}
define <8 x i16> @t9(<8 x i16> %a, <8 x i16> %b) {
%t1 = icmp sgt <8 x i16> %a, %b
%t2 = select <8 x i1> %t1, <8 x i16> %a, <8 x i16> %b
ret <8 x i16> %t2
}
; CHECK-LABEL: t10
; CHECK: vmin.s16 {{q[0-9]+}}, {{q[0-9]+}}, {{q[0-9]+}}
define <8 x i16> @t10(<8 x i16> %a, <8 x i16> %b) {
%t1 = icmp slt <8 x i16> %a, %b
%t2 = select <8 x i1> %t1, <8 x i16> %a, <8 x i16> %b
ret <8 x i16> %t2
}
; CHECK-LABEL: t11
; CHECK: vmax.u16 {{q[0-9]+}}, {{q[0-9]+}}, {{q[0-9]+}}
define <8 x i16> @t11(<8 x i16> %a, <8 x i16> %b) {
%t1 = icmp ugt <8 x i16> %a, %b
%t2 = select <8 x i1> %t1, <8 x i16> %a, <8 x i16> %b
ret <8 x i16> %t2
}
; CHECK-LABEL: t12
; CHECK: vmin.u16 {{q[0-9]+}}, {{q[0-9]+}}, {{q[0-9]+}}
define <8 x i16> @t12(<8 x i16> %a, <8 x i16> %b) {
%t1 = icmp ult <8 x i16> %a, %b
%t2 = select <8 x i1> %t1, <8 x i16> %a, <8 x i16> %b
ret <8 x i16> %t2
}
; CHECK-LABEL: t13
; CHECK: vmax.s16
define <4 x i16> @t13(<4 x i16> %a, <4 x i16> %b) {
%t1 = icmp sgt <4 x i16> %a, %b
%t2 = select <4 x i1> %t1, <4 x i16> %a, <4 x i16> %b
ret <4 x i16> %t2
}
; CHECK-LABEL: t14
; CHECK: vmin.s16 {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
define <4 x i16> @t14(<4 x i16> %a, <4 x i16> %b) {
%t1 = icmp slt <4 x i16> %a, %b
%t2 = select <4 x i1> %t1, <4 x i16> %a, <4 x i16> %b
ret <4 x i16> %t2
}
; CHECK-LABEL: t15
; CHECK: vmax.u16 {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
define <4 x i16> @t15(<4 x i16> %a, <4 x i16> %b) {
%t1 = icmp ugt <4 x i16> %a, %b
%t2 = select <4 x i1> %t1, <4 x i16> %a, <4 x i16> %b
ret <4 x i16> %t2
}
; CHECK-LABEL: t16
; CHECK: vmin.u16 {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
define <4 x i16> @t16(<4 x i16> %a, <4 x i16> %b) {
%t1 = icmp ult <4 x i16> %a, %b
%t2 = select <4 x i1> %t1, <4 x i16> %a, <4 x i16> %b
ret <4 x i16> %t2
}
; CHECK-LABEL: t17
; CHECK: vmax.s8 {{q[0-9]+}}, {{q[0-9]+}}, {{q[0-9]+}}
define <16 x i8> @t17(<16 x i8> %a, <16 x i8> %b) {
%t1 = icmp sgt <16 x i8> %a, %b
%t2 = select <16 x i1> %t1, <16 x i8> %a, <16 x i8> %b
ret <16 x i8> %t2
}
; CHECK-LABEL: t18
; CHECK: vmin.s8 {{q[0-9]+}}, {{q[0-9]+}}, {{q[0-9]+}}
define <16 x i8> @t18(<16 x i8> %a, <16 x i8> %b) {
%t1 = icmp slt <16 x i8> %a, %b
%t2 = select <16 x i1> %t1, <16 x i8> %a, <16 x i8> %b
ret <16 x i8> %t2
}
; CHECK-LABEL: t19
; CHECK: vmax.u8 {{q[0-9]+}}, {{q[0-9]+}}, {{q[0-9]+}}
define <16 x i8> @t19(<16 x i8> %a, <16 x i8> %b) {
%t1 = icmp ugt <16 x i8> %a, %b
%t2 = select <16 x i1> %t1, <16 x i8> %a, <16 x i8> %b
ret <16 x i8> %t2
}
; CHECK-LABEL: t20
; CHECK: vmin.u8 {{q[0-9]+}}, {{q[0-9]+}}, {{q[0-9]+}}
define <16 x i8> @t20(<16 x i8> %a, <16 x i8> %b) {
%t1 = icmp ult <16 x i8> %a, %b
%t2 = select <16 x i1> %t1, <16 x i8> %a, <16 x i8> %b
ret <16 x i8> %t2
}
; CHECK-LABEL: t21
; CHECK: vmax.s8 {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
define <8 x i8> @t21(<8 x i8> %a, <8 x i8> %b) {
%t1 = icmp sgt <8 x i8> %a, %b
%t2 = select <8 x i1> %t1, <8 x i8> %a, <8 x i8> %b
ret <8 x i8> %t2
}
; CHECK-LABEL: t22
; CHECK: vmin.s8 {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
define <8 x i8> @t22(<8 x i8> %a, <8 x i8> %b) {
%t1 = icmp slt <8 x i8> %a, %b
%t2 = select <8 x i1> %t1, <8 x i8> %a, <8 x i8> %b
ret <8 x i8> %t2
}
; CHECK-LABEL: t23
; CHECK: vmax.u8 {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
define <8 x i8> @t23(<8 x i8> %a, <8 x i8> %b) {
%t1 = icmp ugt <8 x i8> %a, %b
%t2 = select <8 x i1> %t1, <8 x i8> %a, <8 x i8> %b
ret <8 x i8> %t2
}
; CHECK-LABEL: t24
; CHECK: vmin.u8 {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
define <8 x i8> @t24(<8 x i8> %a, <8 x i8> %b) {
%t1 = icmp ult <8 x i8> %a, %b
%t2 = select <8 x i1> %t1, <8 x i8> %a, <8 x i8> %b
ret <8 x i8> %t2
}

View File

@ -3,8 +3,7 @@
; Make sure that ARM backend with NEON handles vselect. ; Make sure that ARM backend with NEON handles vselect.
define void @vmax_v4i32(<4 x i32>* %m, <4 x i32> %a, <4 x i32> %b) { define void @vmax_v4i32(<4 x i32>* %m, <4 x i32> %a, <4 x i32> %b) {
; CHECK: vcgt.s32 [[QR:q[0-9]+]], [[Q1:q[0-9]+]], [[Q2:q[0-9]+]] ; CHECK: vmax.s32 {{q[0-9]+}}, {{q[0-9]+}}, {{q[0-9]+}}
; CHECK: vbsl [[QR]], [[Q1]], [[Q2]]
%cmpres = icmp sgt <4 x i32> %a, %b %cmpres = icmp sgt <4 x i32> %a, %b
%maxres = select <4 x i1> %cmpres, <4 x i32> %a, <4 x i32> %b %maxres = select <4 x i1> %cmpres, <4 x i32> %a, <4 x i32> %b
store <4 x i32> %maxres, <4 x i32>* %m store <4 x i32> %maxres, <4 x i32>* %m
@ -21,8 +20,8 @@ define void @func_blend10(%T0_10* %loadaddr, %T0_10* %loadaddr2,
%v0 = load %T0_10, %T0_10* %loadaddr %v0 = load %T0_10, %T0_10* %loadaddr
%v1 = load %T0_10, %T0_10* %loadaddr2 %v1 = load %T0_10, %T0_10* %loadaddr2
%c = icmp slt %T0_10 %v0, %v1 %c = icmp slt %T0_10 %v0, %v1
; CHECK: vbsl ; CHECK: vmin.s16
; CHECK: vbsl ; CHECK: vmin.s16
; COST: func_blend10 ; COST: func_blend10
; COST: cost of 40 {{.*}} select ; COST: cost of 40 {{.*}} select
%r = select %T1_10 %c, %T0_10 %v0, %T0_10 %v1 %r = select %T1_10 %c, %T0_10 %v0, %T0_10 %v1
@ -37,8 +36,8 @@ define void @func_blend14(%T0_14* %loadaddr, %T0_14* %loadaddr2,
%v0 = load %T0_14, %T0_14* %loadaddr %v0 = load %T0_14, %T0_14* %loadaddr
%v1 = load %T0_14, %T0_14* %loadaddr2 %v1 = load %T0_14, %T0_14* %loadaddr2
%c = icmp slt %T0_14 %v0, %v1 %c = icmp slt %T0_14 %v0, %v1
; CHECK: vbsl ; CHECK: vmin.s32
; CHECK: vbsl ; CHECK: vmin.s32
; COST: func_blend14 ; COST: func_blend14
; COST: cost of 41 {{.*}} select ; COST: cost of 41 {{.*}} select
%r = select %T1_14 %c, %T0_14 %v0, %T0_14 %v1 %r = select %T1_14 %c, %T0_14 %v0, %T0_14 %v1
@ -50,8 +49,8 @@ define void @func_blend14(%T0_14* %loadaddr, %T0_14* %loadaddr2,
; CHECK-LABEL: func_blend15: ; CHECK-LABEL: func_blend15:
define void @func_blend15(%T0_15* %loadaddr, %T0_15* %loadaddr2, define void @func_blend15(%T0_15* %loadaddr, %T0_15* %loadaddr2,
%T1_15* %blend, %T0_15* %storeaddr) { %T1_15* %blend, %T0_15* %storeaddr) {
; CHECK: vbsl ; CHECK: vmin.s32
; CHECK: vbsl ; CHECK: vmin.s32
%v0 = load %T0_15, %T0_15* %loadaddr %v0 = load %T0_15, %T0_15* %loadaddr
%v1 = load %T0_15, %T0_15* %loadaddr2 %v1 = load %T0_15, %T0_15* %loadaddr2
%c = icmp slt %T0_15 %v0, %v1 %c = icmp slt %T0_15 %v0, %v1