[AVX512] Fix intrinsic cmp{sd|ss} lowering.

Differential Revision: http://reviews.llvm.org/D20615

llvm-svn: 270843
This commit is contained in:
Igor Breger 2016-05-26 12:42:25 +00:00
parent dc6e951860
commit 8437bb70fd
2 changed files with 12 additions and 16 deletions

View File

@ -17455,9 +17455,7 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget &Subtarget
MVT::i1),
Subtarget, DAG);
return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::i8,
DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i8, CmpMask),
DAG.getValueType(MVT::i1));
return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i8, CmpMask);
}
case COMI: { // Comparison intrinsics
ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;

View File

@ -5659,8 +5659,7 @@ define i8@test_int_x86_avx512_mask_cmp_sd(<2 x double> %x0, <2 x double> %x1, i8
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcmpnltsd {sae}, %xmm1, %xmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: shlb $7, %al
; CHECK-NEXT: sarb $7, %al
; CHECK-NEXT: andl $1, %eax
; CHECK-NEXT: retq
%res4 = call i8 @llvm.x86.avx512.mask.cmp.sd(<2 x double> %x0, <2 x double> %x1, i32 5, i8 %x3, i32 8)
@ -5681,8 +5680,7 @@ define i8@test_int_x86_avx512_mask_cmp_sd_all(<2 x double> %x0, <2 x double> %x1
; CHECK-NEXT: kandw %k2, %k1, %k1
; CHECK-NEXT: korw %k1, %k0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: shlb $7, %al
; CHECK-NEXT: sarb $7, %al
; CHECK-NEXT: andl $1, %eax
; CHECK-NEXT: retq
%res1 = call i8 @llvm.x86.avx512.mask.cmp.sd(<2 x double> %x0, <2 x double> %x1, i32 2, i8 -1, i32 4)
@ -5705,8 +5703,7 @@ define i8@test_int_x86_avx512_mask_cmp_ss(<4 x float> %x0, <4 x float> %x1, i8 %
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcmpunordss %xmm1, %xmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: shlb $7, %al
; CHECK-NEXT: sarb $7, %al
; CHECK-NEXT: andl $1, %eax
; CHECK-NEXT: retq
%res2 = call i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float> %x0, <4 x float> %x1, i32 3, i8 %x3, i32 4)
@ -5719,15 +5716,16 @@ define i8@test_int_x86_avx512_mask_cmp_ss_all(<4 x float> %x0, <4 x float> %x1,
; CHECK: ## BB#0:
; CHECK-NEXT: vcmpless %xmm1, %xmm0, %k1
; CHECK-NEXT: vcmpunordss {sae}, %xmm1, %xmm0, %k0 {%k1}
; CHECK-NEXT: vcmpneqss %xmm1, %xmm0, %k1
; CHECK-NEXT: vcmpnltss {sae}, %xmm1, %xmm0, %k1 {%k1}
; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k2
; CHECK-NEXT: kandw %k2, %k1, %k1
; CHECK-NEXT: kandw %k1, %k0, %k0
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcmpneqss %xmm1, %xmm0, %k2 {%k1}
; CHECK-NEXT: kmovw %k2, %ecx
; CHECK-NEXT: vcmpnltss {sae}, %xmm1, %xmm0, %k1 {%k1}
; CHECK-NEXT: kmovw %k1, %edx
; CHECK-NEXT: andl $1, %edx
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: shlb $7, %al
; CHECK-NEXT: sarb $7, %al
; CHECK-NEXT: andb %cl, %al
; CHECK-NEXT: andb %dl, %al
; CHECK-NEXT: retq
%res1 = call i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float> %x0, <4 x float> %x1, i32 2, i8 -1, i32 4)
%res2 = call i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float> %x0, <4 x float> %x1, i32 3, i8 -1, i32 8)