Revert mmx palignr to use an intrinsic, since mmx shuffle patterns are missing.

llvm-svn: 91269
This commit is contained in:
Nate Begeman 2009-12-14 05:15:02 +00:00
parent aaa6ac10a6
commit 67dfd4236a
4 changed files with 10 additions and 3 deletions

View File

@ -251,7 +251,7 @@ BUILTIN(__builtin_ia32_monitor, "vv*UiUi", "")
BUILTIN(__builtin_ia32_mwait, "vUiUi", "") BUILTIN(__builtin_ia32_mwait, "vUiUi", "")
BUILTIN(__builtin_ia32_lddqu, "V16ccC*", "") BUILTIN(__builtin_ia32_lddqu, "V16ccC*", "")
BUILTIN(__builtin_ia32_palignr128, "V16cV16cV16cc", "") BUILTIN(__builtin_ia32_palignr128, "V16cV16cV16cc", "")
BUILTIN(__builtin_ia32_palignr, "V8cV8cV8cc", "") BUILTIN(__builtin_ia32_palignr, "V1LLiV1LLiV1LLic", "")
BUILTIN(__builtin_ia32_insertps128, "V4fV4fV4fi", "") BUILTIN(__builtin_ia32_insertps128, "V4fV4fV4fi", "")
BUILTIN(__builtin_ia32_storelv4si, "vV2i*V2LLi", "") BUILTIN(__builtin_ia32_storelv4si, "vV2i*V2LLi", "")

View File

@ -805,8 +805,11 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy); Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy);
return Builder.CreateStore(Ops[1], Ops[0]); return Builder.CreateStore(Ops[1], Ops[0]);
} }
case X86::BI__builtin_ia32_palignr128:
case X86::BI__builtin_ia32_palignr: { case X86::BI__builtin_ia32_palignr: {
Function *F = CGM.getIntrinsic(Intrinsic::x86_ssse3_palign_r);
return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size());
}
case X86::BI__builtin_ia32_palignr128: {
unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
// If palignr is shifting the pair of input vectors less than 17 bytes, // If palignr is shifting the pair of input vectors less than 17 bytes,

View File

@ -67,7 +67,7 @@ _mm_abs_epi32(__m128i a)
} }
#define _mm_alignr_epi8(a, b, n) (__builtin_ia32_palignr128((a), (b), (n))) #define _mm_alignr_epi8(a, b, n) (__builtin_ia32_palignr128((a), (b), (n)))
#define _mm_alignr_pi8(a, b, n) (__builtin_ia32_palignr((a), (b), (n))) #define _mm_alignr_pi8(a, b, n) (__builtin_ia32_palignr((a), (b), (n*8)))
static inline __m128i __attribute__((__always_inline__, __nodebug__)) static inline __m128i __attribute__((__always_inline__, __nodebug__))
_mm_hadd_epi16(__m128i a, __m128i b) _mm_hadd_epi16(__m128i a, __m128i b)

View File

@ -1,8 +1,12 @@
// RUN: clang-cc %s -triple=i686-apple-darwin -target-feature +ssse3 -O1 -S -o - | FileCheck %s // RUN: clang-cc %s -triple=i686-apple-darwin -target-feature +ssse3 -O1 -S -o - | FileCheck %s
#define _mm_alignr_epi8(a, b, n) (__builtin_ia32_palignr128((a), (b), (n))) #define _mm_alignr_epi8(a, b, n) (__builtin_ia32_palignr128((a), (b), (n)))
#define _mm_alignr_pi8(a, b, n) (__builtin_ia32_palignr((a), (b), (n*8)))
typedef __attribute__((vector_size(8))) int int2;
typedef __attribute__((vector_size(16))) int int4; typedef __attribute__((vector_size(16))) int int4;
// CHECK: palignr
int2 mmx_align1(int2 a, int2 b) { return _mm_alignr_pi8(a, b, 7); }
// CHECK: palignr // CHECK: palignr
int4 align1(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 15); } int4 align1(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 15); }
// CHECK: ret // CHECK: ret