[X86] Replace cvt*2mask intrinsics with native IR using 'icmp slt X, zeroinitializer.

llvm-svn: 322038
This commit is contained in:
Craig Topper 2018-01-08 22:37:56 +00:00
parent 53bf4ba69d
commit de91dff5d4
5 changed files with 54 additions and 16 deletions

View File

@ -7791,7 +7791,9 @@ static Value *EmitX86Select(CodeGenFunction &CGF,
}
static Value *EmitX86MaskedCompare(CodeGenFunction &CGF, unsigned CC,
bool Signed, SmallVectorImpl<Value *> &Ops) {
bool Signed, ArrayRef<Value *> Ops) {
assert((Ops.size() == 2 || Ops.size() == 4) &&
"Unexpected number of arguments");
unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
Value *Cmp;
@ -7815,9 +7817,11 @@ static Value *EmitX86MaskedCompare(CodeGenFunction &CGF, unsigned CC,
Cmp = CGF.Builder.CreateICmp(Pred, Ops[0], Ops[1]);
}
const auto *C = dyn_cast<Constant>(Ops.back());
if (!C || !C->isAllOnesValue())
Cmp = CGF.Builder.CreateAnd(Cmp, getMaskVecValue(CGF, Ops.back(), NumElts));
if (Ops.size() == 4) {
const auto *C = dyn_cast<Constant>(Ops[3]);
if (!C || !C->isAllOnesValue())
Cmp = CGF.Builder.CreateAnd(Cmp, getMaskVecValue(CGF, Ops[3], NumElts));
}
if (NumElts < 8) {
uint32_t Indices[8];
@ -7833,6 +7837,11 @@ static Value *EmitX86MaskedCompare(CodeGenFunction &CGF, unsigned CC,
std::max(NumElts, 8U)));
}
static Value *EmitX86ConvertToMask(CodeGenFunction &CGF, Value *In) {
Value *Zero = Constant::getNullValue(In->getType());
return EmitX86MaskedCompare(CGF, 1, true, { In, Zero });
}
static Value *EmitX86Abs(CodeGenFunction &CGF, ArrayRef<Value *> Ops) {
llvm::Type *Ty = Ops[0]->getType();
@ -8179,6 +8188,20 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_cvtmask2q512:
return EmitX86SExtMask(*this, Ops[0], ConvertType(E->getType()));
case X86::BI__builtin_ia32_cvtb2mask128:
case X86::BI__builtin_ia32_cvtb2mask256:
case X86::BI__builtin_ia32_cvtb2mask512:
case X86::BI__builtin_ia32_cvtw2mask128:
case X86::BI__builtin_ia32_cvtw2mask256:
case X86::BI__builtin_ia32_cvtw2mask512:
case X86::BI__builtin_ia32_cvtd2mask128:
case X86::BI__builtin_ia32_cvtd2mask256:
case X86::BI__builtin_ia32_cvtd2mask512:
case X86::BI__builtin_ia32_cvtq2mask128:
case X86::BI__builtin_ia32_cvtq2mask256:
case X86::BI__builtin_ia32_cvtq2mask512:
return EmitX86ConvertToMask(*this, Ops[0]);
case X86::BI__builtin_ia32_movdqa32store128_mask:
case X86::BI__builtin_ia32_movdqa64store128_mask:
case X86::BI__builtin_ia32_storeaps128_mask:

View File

@ -1743,7 +1743,8 @@ __mmask32 test_mm512_mask_testn_epi16_mask(__mmask32 __U, __m512i __A, __m512i _
__mmask64 test_mm512_movepi8_mask(__m512i __A) {
// CHECK-LABEL: @test_mm512_movepi8_mask
// CHECK: @llvm.x86.avx512.cvtb2mask.512
// CHECK: [[CMP:%.*]] = icmp slt <64 x i8> %{{.*}}, zeroinitializer
// CHECK: bitcast <64 x i1> [[CMP]] to i64
return _mm512_movepi8_mask(__A);
}
@ -1941,7 +1942,8 @@ __m512i test_mm512_sad_epu8(__m512i __A, __m512i __B) {
__mmask32 test_mm512_movepi16_mask(__m512i __A) {
// CHECK-LABEL: @test_mm512_movepi16_mask
// CHECK: @llvm.x86.avx512.cvtw2mask.512
// CHECK: [[CMP:%.*]] = icmp slt <32 x i16> %{{.*}}, zeroinitializer
// CHECK: bitcast <32 x i1> [[CMP]] to i32
return _mm512_movepi16_mask(__A);
}

View File

@ -923,7 +923,8 @@ __m128d test_mm_maskz_reduce_round_sd(__mmask8 __U, __m128d __A, __m128d __B) {
__mmask16 test_mm512_movepi32_mask(__m512i __A) {
// CHECK-LABEL: @test_mm512_movepi32_mask
// CHECK: @llvm.x86.avx512.cvtd2mask.512
// CHECK: [[CMP:%.*]] = icmp slt <16 x i32> %{{.*}}, zeroinitializer
// CHECK: bitcast <16 x i1> [[CMP]] to i16
return _mm512_movepi32_mask(__A);
}
@ -943,7 +944,8 @@ __m512i test_mm512_movm_epi64(__mmask8 __A) {
__mmask8 test_mm512_movepi64_mask(__m512i __A) {
// CHECK-LABEL: @test_mm512_movepi64_mask
// CHECK: @llvm.x86.avx512.cvtq2mask.512
// CHECK: [[CMP:%.*]] = icmp slt <8 x i64> %{{.*}}, zeroinitializer
// CHECK: bitcast <8 x i1> [[CMP]] to i8
return _mm512_movepi64_mask(__A);
}

View File

@ -2601,13 +2601,15 @@ __mmask16 test_mm256_mask_testn_epi16_mask(__mmask16 __U, __m256i __A, __m256i _
__mmask16 test_mm_movepi8_mask(__m128i __A) {
// CHECK-LABEL: @test_mm_movepi8_mask
// CHECK: @llvm.x86.avx512.cvtb2mask.128
// CHECK: [[CMP:%.*]] = icmp slt <16 x i8> %{{.*}}, zeroinitializer
// CHECK: bitcast <16 x i1> [[CMP]] to i16
return _mm_movepi8_mask(__A);
}
__mmask32 test_mm256_movepi8_mask(__m256i __A) {
// CHECK-LABEL: @test_mm256_movepi8_mask
// CHECK: @llvm.x86.avx512.cvtb2mask.256
// CHECK: [[CMP:%.*]] = icmp slt <32 x i8> %{{.*}}, zeroinitializer
// CHECK: bitcast <32 x i1> [[CMP]] to i32
return _mm256_movepi8_mask(__A);
}
@ -2985,13 +2987,15 @@ __m256i test_mm256_maskz_dbsad_epu8(__mmask16 __U, __m256i __A, __m256i __B) {
}
__mmask8 test_mm_movepi16_mask(__m128i __A) {
// CHECK-LABEL: @test_mm_movepi16_mask
// CHECK: @llvm.x86.avx512.cvtw2mask.128
// CHECK: [[CMP:%.*]] = icmp slt <8 x i16> %{{.*}}, zeroinitializer
// CHECK: bitcast <8 x i1> [[CMP]] to i8
return _mm_movepi16_mask(__A);
}
__mmask16 test_mm256_movepi16_mask(__m256i __A) {
// CHECK-LABEL: @test_mm256_movepi16_mask
// CHECK: @llvm.x86.avx512.cvtw2mask.256
// CHECK: [[CMP:%.*]] = icmp slt <16 x i16> %{{.*}}, zeroinitializer
// CHECK: bitcast <16 x i1> [[CMP]] to i16
return _mm256_movepi16_mask(__A);
}

View File

@ -853,13 +853,16 @@ __m256 test_mm256_maskz_reduce_ps(__mmask8 __U, __m256 __A) {
__mmask8 test_mm_movepi32_mask(__m128i __A) {
// CHECK-LABEL: @test_mm_movepi32_mask
// CHECK: @llvm.x86.avx512.cvtd2mask.128
// CHECK: [[CMP:%.*]] = icmp slt <4 x i32> %{{.*}}, zeroinitializer
// CHECK: [[SHUF:%.*]] = shufflevector <4 x i1> [[CMP]], <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
// CHECK: bitcast <8 x i1> [[SHUF]] to i8
return _mm_movepi32_mask(__A);
}
__mmask8 test_mm256_movepi32_mask(__m256i __A) {
// CHECK-LABEL: @test_mm256_movepi32_mask
// CHECK: @llvm.x86.avx512.cvtd2mask.256
// CHECK: [[CMP:%.*]] = icmp slt <8 x i32> %{{.*}}, zeroinitializer
// CHECK: bitcast <8 x i1> [[CMP]] to i8
return _mm256_movepi32_mask(__A);
}
@ -896,13 +899,17 @@ __m256i test_mm256_movm_epi64(__mmask8 __A) {
__mmask8 test_mm_movepi64_mask(__m128i __A) {
// CHECK-LABEL: @test_mm_movepi64_mask
// CHECK: @llvm.x86.avx512.cvtq2mask.128
// CHECK: [[CMP:%.*]] = icmp slt <2 x i64> %{{.*}}, zeroinitializer
// CHECK: [[SHUF:%.*]] = shufflevector <2 x i1> [[CMP]], <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
// CHECK: bitcast <8 x i1> [[SHUF]] to i8
return _mm_movepi64_mask(__A);
}
__mmask8 test_mm256_movepi64_mask(__m256i __A) {
// CHECK-LABEL: @test_mm256_movepi64_mask
// CHECK: @llvm.x86.avx512.cvtq2mask.256
// CHECK: [[CMP:%.*]] = icmp slt <4 x i64> %{{.*}}, zeroinitializer
// CHECK: [[SHUF:%.*]] = shufflevector <4 x i1> [[CMP]], <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
// CHECK: bitcast <8 x i1> [[SHUF]] to i8
return _mm256_movepi64_mask(__A);
}