[TargetLowering] Fix undef vector element issue with true/false result handling

Fixed an issue with vector usage of TargetLowering::isConstTrueVal / TargetLowering::isConstFalseVal boolean result matching.

The comment said we shouldn't handle constant splat vectors with undef elements. But the the actual code was returning false if the build vector contained no undef elements....

This patch now ignores the number of undefs (getConstantSplatNode will return null if the build vector is all undefs).

The change has also unearthed a couple of missed opportunities in AVX512 comparison code that will need to be addressed.

Differential Revision: https://reviews.llvm.org/D26031

llvm-svn: 286238
This commit is contained in:
Simon Pilgrim 2016-11-08 15:07:01 +00:00
parent c0e47fbfb2
commit 778596bf59
4 changed files with 22 additions and 29 deletions

View File

@ -1302,11 +1302,11 @@ bool TargetLowering::isConstTrueVal(const SDNode *N) const {
if (!BV)
return false;
BitVector UndefElements;
CN = BV->getConstantSplatNode(&UndefElements);
// Only interested in constant splats, and we don't try to handle undef
// elements in identifying boolean constants.
if (!CN || UndefElements.none())
// Only interested in constant splats, we don't care about undef
// elements in identifying boolean constants and getConstantSplatNode
// returns NULL if all ops are undef;
CN = BV->getConstantSplatNode();
if (!CN)
return false;
}
@ -1342,11 +1342,11 @@ bool TargetLowering::isConstFalseVal(const SDNode *N) const {
if (!BV)
return false;
BitVector UndefElements;
CN = BV->getConstantSplatNode(&UndefElements);
// Only interested in constant splats, and we don't try to handle undef
// elements in identifying boolean constants.
if (!CN || UndefElements.none())
// Only interested in constant splats, we don't care about undef
// elements in identifying boolean constants and getConstantSplatNode
// returns NULL if all ops are undef;
CN = BV->getConstantSplatNode();
if (!CN)
return false;
}

View File

@ -1400,19 +1400,14 @@ define i8 @trunc_8i16_to_8i1(<8 x i16> %a) {
define <8 x i32> @sext_8i1_8i32(<8 x i32> %a1, <8 x i32> %a2) nounwind {
; KNL-LABEL: sext_8i1_8i32:
; KNL: ## BB#0:
; KNL-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; KNL-NEXT: vpcmpgtd %zmm0, %zmm1, %k0
; KNL-NEXT: knotw %k0, %k1
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vpmovqd %zmm0, %ymm0
; KNL-NEXT: vpcmpgtd %ymm0, %ymm1, %ymm0
; KNL-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; KNL-NEXT: vpxor %ymm1, %ymm0, %ymm0
; KNL-NEXT: retq
;
; SKX-LABEL: sext_8i1_8i32:
; SKX: ## BB#0:
; SKX-NEXT: vpcmpgtd %ymm0, %ymm1, %k0
; SKX-NEXT: knotb %k0, %k0
; SKX-NEXT: vpcmpled %ymm0, %ymm1, %k0
; SKX-NEXT: vpmovm2d %k0, %ymm0
; SKX-NEXT: retq
%x = icmp slt <8 x i32> %a1, %a2

View File

@ -659,7 +659,8 @@ define <16 x i32> @test14(<16 x i32>%a, <16 x i32>%b) {
; CHECK-LABEL: test14:
; CHECK: ## BB#0:
; CHECK-NEXT: vpsubd %zmm1, %zmm0, %zmm1
; CHECK-NEXT: vpcmpgtd %zmm0, %zmm1, %k1
; CHECK-NEXT: vpcmpled %zmm0, %zmm1, %k0
; CHECK-NEXT: knotw %k0, %k1
; CHECK-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
%sub_r = sub <16 x i32> %a, %b
@ -674,7 +675,8 @@ define <8 x i64> @test15(<8 x i64>%a, <8 x i64>%b) {
; CHECK-LABEL: test15:
; CHECK: ## BB#0:
; CHECK-NEXT: vpsubq %zmm1, %zmm0, %zmm1
; CHECK-NEXT: vpcmpgtq %zmm0, %zmm1, %k1
; CHECK-NEXT: vpcmpleq %zmm0, %zmm1, %k0
; CHECK-NEXT: knotw %k0, %k1
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
%sub_r = sub <8 x i64> %a, %b

View File

@ -48,10 +48,8 @@ define void @neg_masks(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwi
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovups (%ecx), %ymm0
; X32-NEXT: vcmpltps (%eax), %ymm0, %ymm0
; X32-NEXT: vmovaps {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1]
; X32-NEXT: vxorps %ymm1, %ymm0, %ymm0
; X32-NEXT: vandps %ymm1, %ymm0, %ymm0
; X32-NEXT: vcmpnltps (%eax), %ymm0, %ymm0
; X32-NEXT: vandps LCPI1_0, %ymm0, %ymm0
; X32-NEXT: vmovaps %ymm0, (%eax)
; X32-NEXT: vzeroupper
; X32-NEXT: retl
@ -59,10 +57,8 @@ define void @neg_masks(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwi
; X64-LABEL: neg_masks:
; X64: ## BB#0:
; X64-NEXT: vmovups (%rsi), %ymm0
; X64-NEXT: vcmpltps (%rdi), %ymm0, %ymm0
; X64-NEXT: vmovaps {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1]
; X64-NEXT: vxorps %ymm1, %ymm0, %ymm0
; X64-NEXT: vandps %ymm1, %ymm0, %ymm0
; X64-NEXT: vcmpnltps (%rdi), %ymm0, %ymm0
; X64-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: vmovaps %ymm0, (%rax)
; X64-NEXT: vzeroupper
; X64-NEXT: retq