[X86] Stop promoting vector ISD::SELECT to vXi64.

The additional patterns needed for this aren't overwhelming and introducing extra bitcasts during lowering limits our ability to do computeNumSignBits. Not that I have a good example of that for select. I'm just becoming increasingly grumpy about promotion of AND/OR/XOR. SELECT was just a lot easier to fix.

llvm-svn: 343723
This commit is contained in:
Craig Topper 2018-10-03 21:10:29 +00:00
parent f8c4f4e6e7
commit a65c2dbfd6
2 changed files with 41 additions and 3 deletions

View File

@ -876,12 +876,14 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationPromotedToType(ISD::OR, VT, MVT::v2i64);
setOperationPromotedToType(ISD::XOR, VT, MVT::v2i64);
setOperationPromotedToType(ISD::LOAD, VT, MVT::v2i64);
setOperationPromotedToType(ISD::SELECT, VT, MVT::v2i64);
}
// Custom lower v2i64 and v2f64 selects.
setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
setOperationAction(ISD::SELECT, MVT::v4i32, Custom);
setOperationAction(ISD::SELECT, MVT::v8i16, Custom);
setOperationAction(ISD::SELECT, MVT::v16i8, Custom);
setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
setOperationAction(ISD::FP_TO_SINT, MVT::v2i32, Custom);
@ -1058,6 +1060,9 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::SELECT, MVT::v4f64, Custom);
setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
setOperationAction(ISD::SELECT, MVT::v8i32, Custom);
setOperationAction(ISD::SELECT, MVT::v16i16, Custom);
setOperationAction(ISD::SELECT, MVT::v32i8, Custom);
setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
for (auto VT : { MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
@ -1174,7 +1179,6 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationPromotedToType(ISD::OR, VT, MVT::v4i64);
setOperationPromotedToType(ISD::XOR, VT, MVT::v4i64);
setOperationPromotedToType(ISD::LOAD, VT, MVT::v4i64);
setOperationPromotedToType(ISD::SELECT, VT, MVT::v4i64);
}
if (HasInt256) {
@ -1347,6 +1351,9 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::SELECT, MVT::v8f64, Custom);
setOperationAction(ISD::SELECT, MVT::v8i64, Custom);
setOperationAction(ISD::SELECT, MVT::v16i32, Custom);
setOperationAction(ISD::SELECT, MVT::v32i16, Custom);
setOperationAction(ISD::SELECT, MVT::v64i8, Custom);
setOperationAction(ISD::SELECT, MVT::v16f32, Custom);
for (auto VT : { MVT::v16i32, MVT::v8i64 }) {
@ -1421,7 +1428,6 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
}
for (auto VT : { MVT::v64i8, MVT::v32i16, MVT::v16i32 }) {
setOperationPromotedToType(ISD::LOAD, VT, MVT::v8i64);
setOperationPromotedToType(ISD::SELECT, VT, MVT::v8i64);
}
// Need to custom split v32i16/v64i8 bitcasts.

View File

@ -611,26 +611,58 @@ def : Pat<(f128 (X86cmov VR128:$t, VR128:$f, imm:$cond, EFLAGS)),
(CMOV_VR128 VR128:$t, VR128:$f, imm:$cond)>;
let Predicates = [NoVLX] in {
def : Pat<(v16i8 (X86cmov VR128:$t, VR128:$f, imm:$cond, EFLAGS)),
(CMOV_VR128 VR128:$t, VR128:$f, imm:$cond)>;
def : Pat<(v8i16 (X86cmov VR128:$t, VR128:$f, imm:$cond, EFLAGS)),
(CMOV_VR128 VR128:$t, VR128:$f, imm:$cond)>;
def : Pat<(v4i32 (X86cmov VR128:$t, VR128:$f, imm:$cond, EFLAGS)),
(CMOV_VR128 VR128:$t, VR128:$f, imm:$cond)>;
def : Pat<(v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond, EFLAGS)),
(CMOV_VR128 VR128:$t, VR128:$f, imm:$cond)>;
def : Pat<(v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond, EFLAGS)),
(CMOV_VR128 VR128:$t, VR128:$f, imm:$cond)>;
def : Pat<(v32i8 (X86cmov VR256:$t, VR256:$f, imm:$cond, EFLAGS)),
(CMOV_VR256 VR256:$t, VR256:$f, imm:$cond)>;
def : Pat<(v16i16 (X86cmov VR256:$t, VR256:$f, imm:$cond, EFLAGS)),
(CMOV_VR256 VR256:$t, VR256:$f, imm:$cond)>;
def : Pat<(v8i32 (X86cmov VR256:$t, VR256:$f, imm:$cond, EFLAGS)),
(CMOV_VR256 VR256:$t, VR256:$f, imm:$cond)>;
def : Pat<(v8f32 (X86cmov VR256:$t, VR256:$f, imm:$cond, EFLAGS)),
(CMOV_VR256 VR256:$t, VR256:$f, imm:$cond)>;
def : Pat<(v4f64 (X86cmov VR256:$t, VR256:$f, imm:$cond, EFLAGS)),
(CMOV_VR256 VR256:$t, VR256:$f, imm:$cond)>;
}
let Predicates = [HasVLX] in {
def : Pat<(v16i8 (X86cmov VR128X:$t, VR128X:$f, imm:$cond, EFLAGS)),
(CMOV_VR128X VR128X:$t, VR128X:$f, imm:$cond)>;
def : Pat<(v8i16 (X86cmov VR128X:$t, VR128X:$f, imm:$cond, EFLAGS)),
(CMOV_VR128X VR128X:$t, VR128X:$f, imm:$cond)>;
def : Pat<(v4i32 (X86cmov VR128X:$t, VR128X:$f, imm:$cond, EFLAGS)),
(CMOV_VR128X VR128X:$t, VR128X:$f, imm:$cond)>;
def : Pat<(v4f32 (X86cmov VR128X:$t, VR128X:$f, imm:$cond, EFLAGS)),
(CMOV_VR128X VR128X:$t, VR128X:$f, imm:$cond)>;
def : Pat<(v2f64 (X86cmov VR128X:$t, VR128X:$f, imm:$cond, EFLAGS)),
(CMOV_VR128X VR128X:$t, VR128X:$f, imm:$cond)>;
def : Pat<(v32i8 (X86cmov VR256X:$t, VR256X:$f, imm:$cond, EFLAGS)),
(CMOV_VR256X VR256X:$t, VR256X:$f, imm:$cond)>;
def : Pat<(v16i16 (X86cmov VR256X:$t, VR256X:$f, imm:$cond, EFLAGS)),
(CMOV_VR256X VR256X:$t, VR256X:$f, imm:$cond)>;
def : Pat<(v8i32 (X86cmov VR256X:$t, VR256X:$f, imm:$cond, EFLAGS)),
(CMOV_VR256X VR256X:$t, VR256X:$f, imm:$cond)>;
def : Pat<(v8f32 (X86cmov VR256X:$t, VR256X:$f, imm:$cond, EFLAGS)),
(CMOV_VR256X VR256X:$t, VR256X:$f, imm:$cond)>;
def : Pat<(v4f64 (X86cmov VR256X:$t, VR256X:$f, imm:$cond, EFLAGS)),
(CMOV_VR256X VR256X:$t, VR256X:$f, imm:$cond)>;
}
def : Pat<(v64i8 (X86cmov VR512:$t, VR512:$f, imm:$cond, EFLAGS)),
(CMOV_VR512 VR512:$t, VR512:$f, imm:$cond)>;
def : Pat<(v32i16 (X86cmov VR512:$t, VR512:$f, imm:$cond, EFLAGS)),
(CMOV_VR512 VR512:$t, VR512:$f, imm:$cond)>;
def : Pat<(v16i32 (X86cmov VR512:$t, VR512:$f, imm:$cond, EFLAGS)),
(CMOV_VR512 VR512:$t, VR512:$f, imm:$cond)>;
def : Pat<(v16f32 (X86cmov VR512:$t, VR512:$f, imm:$cond, EFLAGS)),
(CMOV_VR512 VR512:$t, VR512:$f, imm:$cond)>;
def : Pat<(v8f64 (X86cmov VR512:$t, VR512:$f, imm:$cond, EFLAGS)),