[Clang][BuiltIn][avx512] Adding avx512 (shuf,sqrt{ss|sd},rsqrt ) builtin to clang

llvm-svn: 266048
This commit is contained in:
Michael Zuckerman 2016-04-12 07:59:39 +00:00
parent f59f2bb1b5
commit 04fb3bc682
5 changed files with 954 additions and 0 deletions

View File

@ -1942,6 +1942,26 @@ TARGET_BUILTIN(__builtin_ia32_pternlogq128_mask, "V2LLiV2LLiV2LLiV2LLiIiUc","","
TARGET_BUILTIN(__builtin_ia32_pternlogq128_maskz, "V2LLiV2LLiV2LLiV2LLiIiUc","","avx512vl")
TARGET_BUILTIN(__builtin_ia32_pternlogq256_mask, "V4LLiV4LLiV4LLiV4LLiIiUc","","avx512vl")
TARGET_BUILTIN(__builtin_ia32_pternlogq256_maskz, "V4LLiV4LLiV4LLiV4LLiIiUc","","avx512vl")
TARGET_BUILTIN(__builtin_ia32_shuf_f32x4_mask, "V16fV16fV16fIiV16fUs","","avx512f")
TARGET_BUILTIN(__builtin_ia32_shuf_f64x2_mask, "V8dV8dV8dIiV8dUc","","avx512f")
TARGET_BUILTIN(__builtin_ia32_shuf_i32x4_mask, "V16iV16iV16iIiV16iUs","","avx512f")
TARGET_BUILTIN(__builtin_ia32_shuf_i64x2_mask, "V8LLiV8LLiV8LLiIiV8LLiUc","","avx512f")
TARGET_BUILTIN(__builtin_ia32_shufpd512_mask, "V8dV8dV8dIiV8dUc","","avx512f")
TARGET_BUILTIN(__builtin_ia32_shufps512_mask, "V16fV16fV16fIiV16fUs","","avx512f")
TARGET_BUILTIN(__builtin_ia32_shuf_f32x4_256_mask, "V8fV8fV8fIiV8fUc","","avx512vl")
TARGET_BUILTIN(__builtin_ia32_shuf_f64x2_256_mask, "V4dV4dV4dIiV4dUc","","avx512vl")
TARGET_BUILTIN(__builtin_ia32_shuf_i32x4_256_mask, "V8iV8iV8iIiV8iUc","","avx512vl")
TARGET_BUILTIN(__builtin_ia32_shuf_i64x2_256_mask, "V4LLiV4LLiV4LLiIiV4LLiUc","","avx512vl")
TARGET_BUILTIN(__builtin_ia32_shufpd128_mask, "V2dV2dV2dIiV2dUc","","avx512vl")
TARGET_BUILTIN(__builtin_ia32_shufpd256_mask, "V4dV4dV4dIiV4dUc","","avx512vl")
TARGET_BUILTIN(__builtin_ia32_shufps128_mask, "V4fV4fV4fIiV4fUc","","avx512vl")
TARGET_BUILTIN(__builtin_ia32_shufps256_mask, "V8fV8fV8fIiV8fUc","","avx512vl")
TARGET_BUILTIN(__builtin_ia32_sqrtsd_round_mask, "V2dV2dV2dV2dUcIi","","avx512f")
TARGET_BUILTIN(__builtin_ia32_sqrtss_round_mask, "V4fV4fV4fV4fUcIi","","avx512f")
TARGET_BUILTIN(__builtin_ia32_rsqrt14pd128_mask, "V2dV2dV2dUc","","avx512vl")
TARGET_BUILTIN(__builtin_ia32_rsqrt14pd256_mask, "V4dV4dV4dUc","","avx512vl")
TARGET_BUILTIN(__builtin_ia32_rsqrt14ps128_mask, "V4fV4fV4fUc","","avx512vl")
TARGET_BUILTIN(__builtin_ia32_rsqrt14ps256_mask, "V8fV8fV8fUc","","avx512vl")
#undef BUILTIN
#undef TARGET_BUILTIN

View File

@ -46,6 +46,96 @@ typedef unsigned short __mmask16;
#define _MM_FROUND_TO_ZERO 0x03
#define _MM_FROUND_CUR_DIRECTION 0x04
typedef enum
{
_MM_PERM_AAAA = 0x00, _MM_PERM_AAAB = 0x01, _MM_PERM_AAAC = 0x02,
_MM_PERM_AAAD = 0x03, _MM_PERM_AABA = 0x04, _MM_PERM_AABB = 0x05,
_MM_PERM_AABC = 0x06, _MM_PERM_AABD = 0x07, _MM_PERM_AACA = 0x08,
_MM_PERM_AACB = 0x09, _MM_PERM_AACC = 0x0A, _MM_PERM_AACD = 0x0B,
_MM_PERM_AADA = 0x0C, _MM_PERM_AADB = 0x0D, _MM_PERM_AADC = 0x0E,
_MM_PERM_AADD = 0x0F, _MM_PERM_ABAA = 0x10, _MM_PERM_ABAB = 0x11,
_MM_PERM_ABAC = 0x12, _MM_PERM_ABAD = 0x13, _MM_PERM_ABBA = 0x14,
_MM_PERM_ABBB = 0x15, _MM_PERM_ABBC = 0x16, _MM_PERM_ABBD = 0x17,
_MM_PERM_ABCA = 0x18, _MM_PERM_ABCB = 0x19, _MM_PERM_ABCC = 0x1A,
_MM_PERM_ABCD = 0x1B, _MM_PERM_ABDA = 0x1C, _MM_PERM_ABDB = 0x1D,
_MM_PERM_ABDC = 0x1E, _MM_PERM_ABDD = 0x1F, _MM_PERM_ACAA = 0x20,
_MM_PERM_ACAB = 0x21, _MM_PERM_ACAC = 0x22, _MM_PERM_ACAD = 0x23,
_MM_PERM_ACBA = 0x24, _MM_PERM_ACBB = 0x25, _MM_PERM_ACBC = 0x26,
_MM_PERM_ACBD = 0x27, _MM_PERM_ACCA = 0x28, _MM_PERM_ACCB = 0x29,
_MM_PERM_ACCC = 0x2A, _MM_PERM_ACCD = 0x2B, _MM_PERM_ACDA = 0x2C,
_MM_PERM_ACDB = 0x2D, _MM_PERM_ACDC = 0x2E, _MM_PERM_ACDD = 0x2F,
_MM_PERM_ADAA = 0x30, _MM_PERM_ADAB = 0x31, _MM_PERM_ADAC = 0x32,
_MM_PERM_ADAD = 0x33, _MM_PERM_ADBA = 0x34, _MM_PERM_ADBB = 0x35,
_MM_PERM_ADBC = 0x36, _MM_PERM_ADBD = 0x37, _MM_PERM_ADCA = 0x38,
_MM_PERM_ADCB = 0x39, _MM_PERM_ADCC = 0x3A, _MM_PERM_ADCD = 0x3B,
_MM_PERM_ADDA = 0x3C, _MM_PERM_ADDB = 0x3D, _MM_PERM_ADDC = 0x3E,
_MM_PERM_ADDD = 0x3F, _MM_PERM_BAAA = 0x40, _MM_PERM_BAAB = 0x41,
_MM_PERM_BAAC = 0x42, _MM_PERM_BAAD = 0x43, _MM_PERM_BABA = 0x44,
_MM_PERM_BABB = 0x45, _MM_PERM_BABC = 0x46, _MM_PERM_BABD = 0x47,
_MM_PERM_BACA = 0x48, _MM_PERM_BACB = 0x49, _MM_PERM_BACC = 0x4A,
_MM_PERM_BACD = 0x4B, _MM_PERM_BADA = 0x4C, _MM_PERM_BADB = 0x4D,
_MM_PERM_BADC = 0x4E, _MM_PERM_BADD = 0x4F, _MM_PERM_BBAA = 0x50,
_MM_PERM_BBAB = 0x51, _MM_PERM_BBAC = 0x52, _MM_PERM_BBAD = 0x53,
_MM_PERM_BBBA = 0x54, _MM_PERM_BBBB = 0x55, _MM_PERM_BBBC = 0x56,
_MM_PERM_BBBD = 0x57, _MM_PERM_BBCA = 0x58, _MM_PERM_BBCB = 0x59,
_MM_PERM_BBCC = 0x5A, _MM_PERM_BBCD = 0x5B, _MM_PERM_BBDA = 0x5C,
_MM_PERM_BBDB = 0x5D, _MM_PERM_BBDC = 0x5E, _MM_PERM_BBDD = 0x5F,
_MM_PERM_BCAA = 0x60, _MM_PERM_BCAB = 0x61, _MM_PERM_BCAC = 0x62,
_MM_PERM_BCAD = 0x63, _MM_PERM_BCBA = 0x64, _MM_PERM_BCBB = 0x65,
_MM_PERM_BCBC = 0x66, _MM_PERM_BCBD = 0x67, _MM_PERM_BCCA = 0x68,
_MM_PERM_BCCB = 0x69, _MM_PERM_BCCC = 0x6A, _MM_PERM_BCCD = 0x6B,
_MM_PERM_BCDA = 0x6C, _MM_PERM_BCDB = 0x6D, _MM_PERM_BCDC = 0x6E,
_MM_PERM_BCDD = 0x6F, _MM_PERM_BDAA = 0x70, _MM_PERM_BDAB = 0x71,
_MM_PERM_BDAC = 0x72, _MM_PERM_BDAD = 0x73, _MM_PERM_BDBA = 0x74,
_MM_PERM_BDBB = 0x75, _MM_PERM_BDBC = 0x76, _MM_PERM_BDBD = 0x77,
_MM_PERM_BDCA = 0x78, _MM_PERM_BDCB = 0x79, _MM_PERM_BDCC = 0x7A,
_MM_PERM_BDCD = 0x7B, _MM_PERM_BDDA = 0x7C, _MM_PERM_BDDB = 0x7D,
_MM_PERM_BDDC = 0x7E, _MM_PERM_BDDD = 0x7F, _MM_PERM_CAAA = 0x80,
_MM_PERM_CAAB = 0x81, _MM_PERM_CAAC = 0x82, _MM_PERM_CAAD = 0x83,
_MM_PERM_CABA = 0x84, _MM_PERM_CABB = 0x85, _MM_PERM_CABC = 0x86,
_MM_PERM_CABD = 0x87, _MM_PERM_CACA = 0x88, _MM_PERM_CACB = 0x89,
_MM_PERM_CACC = 0x8A, _MM_PERM_CACD = 0x8B, _MM_PERM_CADA = 0x8C,
_MM_PERM_CADB = 0x8D, _MM_PERM_CADC = 0x8E, _MM_PERM_CADD = 0x8F,
_MM_PERM_CBAA = 0x90, _MM_PERM_CBAB = 0x91, _MM_PERM_CBAC = 0x92,
_MM_PERM_CBAD = 0x93, _MM_PERM_CBBA = 0x94, _MM_PERM_CBBB = 0x95,
_MM_PERM_CBBC = 0x96, _MM_PERM_CBBD = 0x97, _MM_PERM_CBCA = 0x98,
_MM_PERM_CBCB = 0x99, _MM_PERM_CBCC = 0x9A, _MM_PERM_CBCD = 0x9B,
_MM_PERM_CBDA = 0x9C, _MM_PERM_CBDB = 0x9D, _MM_PERM_CBDC = 0x9E,
_MM_PERM_CBDD = 0x9F, _MM_PERM_CCAA = 0xA0, _MM_PERM_CCAB = 0xA1,
_MM_PERM_CCAC = 0xA2, _MM_PERM_CCAD = 0xA3, _MM_PERM_CCBA = 0xA4,
_MM_PERM_CCBB = 0xA5, _MM_PERM_CCBC = 0xA6, _MM_PERM_CCBD = 0xA7,
_MM_PERM_CCCA = 0xA8, _MM_PERM_CCCB = 0xA9, _MM_PERM_CCCC = 0xAA,
_MM_PERM_CCCD = 0xAB, _MM_PERM_CCDA = 0xAC, _MM_PERM_CCDB = 0xAD,
_MM_PERM_CCDC = 0xAE, _MM_PERM_CCDD = 0xAF, _MM_PERM_CDAA = 0xB0,
_MM_PERM_CDAB = 0xB1, _MM_PERM_CDAC = 0xB2, _MM_PERM_CDAD = 0xB3,
_MM_PERM_CDBA = 0xB4, _MM_PERM_CDBB = 0xB5, _MM_PERM_CDBC = 0xB6,
_MM_PERM_CDBD = 0xB7, _MM_PERM_CDCA = 0xB8, _MM_PERM_CDCB = 0xB9,
_MM_PERM_CDCC = 0xBA, _MM_PERM_CDCD = 0xBB, _MM_PERM_CDDA = 0xBC,
_MM_PERM_CDDB = 0xBD, _MM_PERM_CDDC = 0xBE, _MM_PERM_CDDD = 0xBF,
_MM_PERM_DAAA = 0xC0, _MM_PERM_DAAB = 0xC1, _MM_PERM_DAAC = 0xC2,
_MM_PERM_DAAD = 0xC3, _MM_PERM_DABA = 0xC4, _MM_PERM_DABB = 0xC5,
_MM_PERM_DABC = 0xC6, _MM_PERM_DABD = 0xC7, _MM_PERM_DACA = 0xC8,
_MM_PERM_DACB = 0xC9, _MM_PERM_DACC = 0xCA, _MM_PERM_DACD = 0xCB,
_MM_PERM_DADA = 0xCC, _MM_PERM_DADB = 0xCD, _MM_PERM_DADC = 0xCE,
_MM_PERM_DADD = 0xCF, _MM_PERM_DBAA = 0xD0, _MM_PERM_DBAB = 0xD1,
_MM_PERM_DBAC = 0xD2, _MM_PERM_DBAD = 0xD3, _MM_PERM_DBBA = 0xD4,
_MM_PERM_DBBB = 0xD5, _MM_PERM_DBBC = 0xD6, _MM_PERM_DBBD = 0xD7,
_MM_PERM_DBCA = 0xD8, _MM_PERM_DBCB = 0xD9, _MM_PERM_DBCC = 0xDA,
_MM_PERM_DBCD = 0xDB, _MM_PERM_DBDA = 0xDC, _MM_PERM_DBDB = 0xDD,
_MM_PERM_DBDC = 0xDE, _MM_PERM_DBDD = 0xDF, _MM_PERM_DCAA = 0xE0,
_MM_PERM_DCAB = 0xE1, _MM_PERM_DCAC = 0xE2, _MM_PERM_DCAD = 0xE3,
_MM_PERM_DCBA = 0xE4, _MM_PERM_DCBB = 0xE5, _MM_PERM_DCBC = 0xE6,
_MM_PERM_DCBD = 0xE7, _MM_PERM_DCCA = 0xE8, _MM_PERM_DCCB = 0xE9,
_MM_PERM_DCCC = 0xEA, _MM_PERM_DCCD = 0xEB, _MM_PERM_DCDA = 0xEC,
_MM_PERM_DCDB = 0xED, _MM_PERM_DCDC = 0xEE, _MM_PERM_DCDD = 0xEF,
_MM_PERM_DDAA = 0xF0, _MM_PERM_DDAB = 0xF1, _MM_PERM_DDAC = 0xF2,
_MM_PERM_DDAD = 0xF3, _MM_PERM_DDBA = 0xF4, _MM_PERM_DDBB = 0xF5,
_MM_PERM_DDBC = 0xF6, _MM_PERM_DDBD = 0xF7, _MM_PERM_DDCA = 0xF8,
_MM_PERM_DDCB = 0xF9, _MM_PERM_DDCC = 0xFA, _MM_PERM_DDCD = 0xFB,
_MM_PERM_DDDA = 0xFC, _MM_PERM_DDDB = 0xFD, _MM_PERM_DDDC = 0xFE,
_MM_PERM_DDDD = 0xFF
} _MM_PERM_ENUM;
typedef enum
{
_MM_MANT_NORM_1_2, /* interval [1, 2) */
@ -5306,6 +5396,228 @@ __builtin_ia32_psraqi512_mask ((__v8di)( __A),( __B),\
(__mmask8)( __U));\
})
#define _mm512_shuffle_f32x4( __A, __B, __imm) __extension__ ({ \
__builtin_ia32_shuf_f32x4_mask ((__v16sf)( __A),\
(__v16sf)( __B),( __imm),\
(__v16sf)\
_mm512_undefined_ps (),\
(__mmask16) -1);\
})
#define _mm512_mask_shuffle_f32x4( __W, __U, __A, __B, __imm) __extension__ ({ \
__builtin_ia32_shuf_f32x4_mask ((__v16sf)( __A),\
(__v16sf)( __B),( __imm),\
(__v16sf)( __W),\
(__mmask16)( __U));\
})
#define _mm512_maskz_shuffle_f32x4( __U, __A, __B, __imm) __extension__ ({ \
__builtin_ia32_shuf_f32x4_mask ((__v16sf)( __A),\
(__v16sf)( __B),( __imm),\
(__v16sf)\
_mm512_setzero_ps (),\
(__mmask16)( __U));\
})
#define _mm512_shuffle_f64x2( __A, __B, __imm) __extension__ ({ \
__builtin_ia32_shuf_f64x2_mask ((__v8df)( __A),\
(__v8df)( __B),( __imm),\
(__v8df)\
_mm512_undefined_pd (),\
(__mmask8) -1);\
})
#define _mm512_mask_shuffle_f64x2( __W, __U, __A, __B, __imm) __extension__ ({ \
__builtin_ia32_shuf_f64x2_mask ((__v8df)( __A),\
(__v8df)( __B),( __imm),\
(__v8df)( __W),\
(__mmask8)( __U));\
})
#define _mm512_maskz_shuffle_f64x2( __U, __A, __B, __imm) __extension__ ({ \
__builtin_ia32_shuf_f64x2_mask ((__v8df)( __A),\
(__v8df)( __B),( __imm),\
(__v8df)\
_mm512_setzero_pd (),\
(__mmask8)( __U));\
})
#define _mm512_shuffle_i32x4( __A, __B, __imm) __extension__ ({ \
__builtin_ia32_shuf_i32x4_mask ((__v16si)( __A),\
(__v16si)( __B),\
( __imm),\
(__v16si)\
_mm512_setzero_si512 (),\
(__mmask16) -1);\
})
#define _mm512_mask_shuffle_i32x4( __W, __U, __A, __B, __imm) __extension__ ({ \
__builtin_ia32_shuf_i32x4_mask ((__v16si)( __A),\
(__v16si)( __B),\
( __imm),\
(__v16si)( __W),\
(__mmask16)( __U));\
})
#define _mm512_maskz_shuffle_i32x4( __U, __A, __B, __imm) __extension__ ({ \
__builtin_ia32_shuf_i32x4_mask ((__v16si)( __A),\
(__v16si)( __B),\
( __imm),\
(__v16si)\
_mm512_setzero_si512 (),\
(__mmask16)( __U));\
})
#define _mm512_shuffle_i64x2( __A, __B, __imm) __extension__ ({ \
__builtin_ia32_shuf_i64x2_mask ((__v8di)( __A),\
(__v8di)( __B),( __imm),\
(__v8di)\
_mm512_setzero_si512 (),\
(__mmask8) -1);\
})
#define _mm512_mask_shuffle_i64x2( __W, __U, __A, __B, __imm) __extension__ ({ \
__builtin_ia32_shuf_i64x2_mask ((__v8di)( __A),\
(__v8di)( __B),( __imm),\
(__v8di)( __W),\
(__mmask8)( __U));\
})
#define _mm512_maskz_shuffle_i64x2( __U, __A, __B, __imm) __extension__ ({ \
__builtin_ia32_shuf_i64x2_mask ((__v8di)( __A),\
(__v8di)( __B),( __imm),\
(__v8di)\
_mm512_setzero_si512 (),\
(__mmask8)( __U));\
})
#define _mm512_shuffle_pd( __M, __V, __imm) __extension__ ({ \
__builtin_ia32_shufpd512_mask ((__v8df)( __M),\
(__v8df)( __V),( __imm),\
(__v8df)\
_mm512_undefined_pd (),\
(__mmask8) -1);\
})
#define _mm512_mask_shuffle_pd( __W, __U, __M, __V, __imm) __extension__ ({ \
__builtin_ia32_shufpd512_mask ((__v8df)( __M),\
(__v8df)( __V),( __imm),\
(__v8df)( __W),\
(__mmask8)( __U));\
})
#define _mm512_maskz_shuffle_pd( __U, __M, __V, __imm) __extension__ ({ \
__builtin_ia32_shufpd512_mask ((__v8df)( __M),\
(__v8df)( __V),( __imm),\
(__v8df)\
_mm512_setzero_pd (),\
(__mmask8)( __U));\
})
#define _mm512_shuffle_ps( __M, __V, __imm) __extension__ ({ \
__builtin_ia32_shufps512_mask ((__v16sf)( __M),\
(__v16sf)( __V),( __imm),\
(__v16sf)\
_mm512_undefined_ps (),\
(__mmask16) -1);\
})
#define _mm512_mask_shuffle_ps( __W, __U, __M, __V, __imm) __extension__ ({ \
__builtin_ia32_shufps512_mask ((__v16sf)( __M),\
(__v16sf)( __V),( __imm),\
(__v16sf)( __W),\
(__mmask16)( __U));\
})
#define _mm512_maskz_shuffle_ps( __U, __M, __V, __imm) __extension__ ({ \
__builtin_ia32_shufps512_mask ((__v16sf)( __M),\
(__v16sf)( __V),( __imm),\
(__v16sf)\
_mm512_setzero_ps (),\
(__mmask16)( __U));\
})
#define _mm_sqrt_round_sd( __A, __B, __R) __extension__ ({ \
__builtin_ia32_sqrtsd_round_mask ((__v2df)( __B),\
(__v2df)( __A),(__v2df) _mm_setzero_pd(),\
(__mmask8) -1,\
( __R));\
})
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_mask_sqrt_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
{
return (__m128d) __builtin_ia32_sqrtsd_round_mask ( (__v2df) __B,
(__v2df) __A,
(__v2df) __W,
(__mmask8) __U,
_MM_FROUND_CUR_DIRECTION);
}
#define _mm_mask_sqrt_round_sd( __W, __U, __A, __B, __R) __extension__ ({ \
__builtin_ia32_sqrtsd_round_mask ((__v2df)( __B),\
(__v2df)( __A),(__v2df) __W,\
(__mmask8) __U,\
( __R));\
})
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_maskz_sqrt_sd (__mmask8 __U, __m128d __A, __m128d __B)
{
return (__m128d) __builtin_ia32_sqrtsd_round_mask ( (__v2df) __B,
(__v2df) __A,
(__v2df) _mm_setzero_pd (),
(__mmask8) __U,
_MM_FROUND_CUR_DIRECTION);
}
#define _mm_maskz_sqrt_round_sd( __U, __A, __B, __R) __extension__ ({ \
__builtin_ia32_sqrtsd_round_mask ((__v2df)( __B),\
(__v2df)( __A),(__v2df) _mm_setzero_pd(),\
(__mmask8) __U,\
( __R));\
})
#define _mm_sqrt_round_ss( __A, __B, __R) __extension__ ({ \
__builtin_ia32_sqrtss_round_mask ((__v4sf)( __B),\
(__v4sf)( __A),(__v4sf) _mm_setzero_ps(),\
(__mmask8) -1,\
( __R));\
})
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_mask_sqrt_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
{
return (__m128) __builtin_ia32_sqrtss_round_mask ( (__v4sf) __B,
(__v4sf) __A,
(__v4sf) __W,
(__mmask8) __U,
_MM_FROUND_CUR_DIRECTION);
}
#define _mm_mask_sqrt_round_ss( __W, __U, __A, __B, __R) __extension__ ({ \
__builtin_ia32_sqrtss_round_mask ((__v4sf)( __B),\
(__v4sf)( __A),(__v4sf) __W,\
(__mmask8) __U,\
( __R));\
})
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_maskz_sqrt_ss (__mmask8 __U, __m128 __A, __m128 __B)
{
return (__m128) __builtin_ia32_sqrtss_round_mask ( (__v4sf) __A,
(__v4sf) __B,
(__v4sf) _mm_setzero_ps (),
(__mmask8) __U,
_MM_FROUND_CUR_DIRECTION);
}
#define _mm_maskz_sqrt_round_ss( __U, __A, __B, __R) __extension__ ({ \
__builtin_ia32_sqrtss_round_mask ((__v4sf)( __B),\
(__v4sf)( __A),(__v4sf) _mm_setzero_ps(),\
(__mmask8) __U,\
__R);\
})
#undef __DEFAULT_FN_ATTRS

View File

@ -7496,6 +7496,275 @@ __builtin_ia32_pternlogq256_maskz ((__v4di)( __A),\
})
#define _mm256_shuffle_f32x4( __A, __B, __imm) __extension__ ({ \
__builtin_ia32_shuf_f32x4_256_mask ((__v8sf)( __A),\
(__v8sf)( __B),\
( __imm),\
(__v8sf)\
_mm256_setzero_ps (),\
(__mmask8) -1);\
})
#define _mm256_mask_shuffle_f32x4( __W, __U, __A, __B, __imm) __extension__ ({ \
__builtin_ia32_shuf_f32x4_256_mask ((__v8sf)( __A),\
(__v8sf)( __B),\
( __imm),\
(__v8sf)( __W),\
(__mmask8)( __U));\
})
#define _mm256_maskz_shuffle_f32x4( __U, __A, __B, __imm) __extension__ ({ \
__builtin_ia32_shuf_f32x4_256_mask ((__v8sf)( __A),\
(__v8sf)( __B),\
( __imm),\
(__v8sf)\
_mm256_setzero_ps (),\
(__mmask8)( __U));\
})
#define _mm256_shuffle_f64x2( __A, __B, __imm) __extension__ ({ \
__builtin_ia32_shuf_f64x2_256_mask ((__v4df)( __A),\
(__v4df)( __B),\
( __imm),\
(__v4df)\
_mm256_setzero_pd (),\
(__mmask8) -1);\
})
#define _mm256_mask_shuffle_f64x2( __W, __U, __A, __B, __imm) __extension__ ({ \
__builtin_ia32_shuf_f64x2_256_mask ((__v4df)( __A),\
(__v4df)( __B),\
( __imm),\
(__v4df)( __W),\
(__mmask8)( __U));\
})
#define _mm256_maskz_shuffle_f64x2( __U, __A, __B, __imm) __extension__ ({ \
__builtin_ia32_shuf_f64x2_256_mask ((__v4df)( __A),\
(__v4df)( __B),\
( __imm),\
(__v4df)\
_mm256_setzero_pd (),\
(__mmask8)( __U));\
})
#define _mm256_shuffle_i32x4( __A, __B, __imm) __extension__ ({ \
__builtin_ia32_shuf_i32x4_256_mask ((__v8si)( __A),\
(__v8si)( __B),\
( __imm),\
(__v8si)\
_mm256_setzero_si256 (),\
(__mmask8) -1);\
})
#define _mm256_mask_shuffle_i32x4( __W, __U, __A, __B, __imm) __extension__ ({ \
__builtin_ia32_shuf_i32x4_256_mask ((__v8si)( __A),\
(__v8si)( __B),\
( __imm),\
(__v8si)( __W),\
(__mmask8)( __U));\
})
#define _mm256_maskz_shuffle_i32x4( __U, __A, __B, __imm) __extension__ ({ \
__builtin_ia32_shuf_i32x4_256_mask ((__v8si)( __A),\
(__v8si)( __B),\
( __imm),\
(__v8si)\
_mm256_setzero_si256 (),\
(__mmask8)( __U));\
})
#define _mm256_shuffle_i64x2( __A, __B, __imm) __extension__ ({ \
__builtin_ia32_shuf_i64x2_256_mask ((__v4di)( __A),\
(__v4di)( __B),\
( __imm),\
(__v4di)\
_mm256_setzero_si256 (),\
(__mmask8) -1);\
})
#define _mm256_mask_shuffle_i64x2( __W, __U, __A, __B, __imm) __extension__ ({ \
__builtin_ia32_shuf_i64x2_256_mask ((__v4di)( __A),\
(__v4di)( __B),\
( __imm),\
(__v4di)( __W),\
(__mmask8)( __U));\
})
#define _mm256_maskz_shuffle_i64x2( __U, __A, __B, __imm) __extension__ ({ \
__builtin_ia32_shuf_i64x2_256_mask ((__v4di)( __A),\
(__v4di)( __B),\
( __imm),\
(__v4di)\
_mm256_setzero_si256 (),\
(__mmask8)( __U));\
})
#define _mm_mask_shuffle_pd( __W, __U, __A, __B, __imm) __extension__ ({ \
__builtin_ia32_shufpd128_mask ((__v2df)( __A),\
(__v2df)( __B),( __imm),\
(__v2df)( __W),\
(__mmask8)( __U));\
})
#define _mm_maskz_shuffle_pd( __U, __A, __B, __imm) __extension__ ({ \
__builtin_ia32_shufpd128_mask ((__v2df)( __A),\
(__v2df)( __B),( __imm),\
(__v2df)\
_mm_setzero_pd (),\
(__mmask8)( __U));\
})
#define _mm256_mask_shuffle_pd( __W, __U, __A, __B, __imm) __extension__ ({ \
__builtin_ia32_shufpd256_mask ((__v4df)( __A),\
(__v4df)( __B),( __imm),\
(__v4df)( __W),\
(__mmask8)( __U));\
})
#define _mm256_maskz_shuffle_pd( __U, __A, __B, __imm) __extension__ ({ \
__builtin_ia32_shufpd256_mask ((__v4df)( __A),\
(__v4df)( __B),( __imm),\
(__v4df)\
_mm256_setzero_pd (),\
(__mmask8)( __U));\
})
#define _mm_mask_shuffle_ps( __W, __U, __A, __B, __imm) __extension__ ({ \
__builtin_ia32_shufps128_mask ((__v4sf)( __A),\
(__v4sf)( __B),( __imm),\
(__v4sf)( __W),\
(__mmask8)( __U));\
})
#define _mm_maskz_shuffle_ps( __U, __A, __B, __imm) __extension__ ({ \
__builtin_ia32_shufps128_mask ((__v4sf)( __A),\
(__v4sf)( __B),( __imm),\
(__v4sf)\
_mm_setzero_ps (),\
(__mmask8)( __U));\
})
#define _mm256_mask_shuffle_ps( __W, __U, __A, __B, __imm) __extension__ ({ \
__builtin_ia32_shufps256_mask ((__v8sf)( __A),\
(__v8sf)( __B),( __imm),\
(__v8sf)( __W),\
(__mmask8)( __U));\
})
#define _mm256_maskz_shuffle_ps( __U, __A, __B, __imm) __extension__ ({ \
__builtin_ia32_shufps256_mask ((__v8sf)( __A),\
(__v8sf)( __B),( __imm),\
(__v8sf)\
_mm256_setzero_ps (),\
(__mmask8)( __U));\
})
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_rsqrt14_pd (__m128d __A)
{
return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A,
(__v2df)
_mm_setzero_pd (),
(__mmask8) -1);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_mask_rsqrt14_pd (__m128d __W, __mmask8 __U, __m128d __A)
{
return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A,
(__v2df) __W,
(__mmask8) __U);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_maskz_rsqrt14_pd (__mmask8 __U, __m128d __A)
{
return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A,
(__v2df)
_mm_setzero_pd (),
(__mmask8) __U);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
_mm256_rsqrt14_pd (__m256d __A)
{
return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A,
(__v4df)
_mm256_setzero_pd (),
(__mmask8) -1);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
_mm256_mask_rsqrt14_pd (__m256d __W, __mmask8 __U, __m256d __A)
{
return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A,
(__v4df) __W,
(__mmask8) __U);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
_mm256_maskz_rsqrt14_pd (__mmask8 __U, __m256d __A)
{
return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A,
(__v4df)
_mm256_setzero_pd (),
(__mmask8) __U);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_rsqrt14_ps (__m128 __A)
{
return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A,
(__v4sf)
_mm_setzero_ps (),
(__mmask8) -1);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_mask_rsqrt14_ps (__m128 __W, __mmask8 __U, __m128 __A)
{
return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A,
(__v4sf) __W,
(__mmask8) __U);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_maskz_rsqrt14_ps (__mmask8 __U, __m128 __A)
{
return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A,
(__v4sf)
_mm_setzero_ps (),
(__mmask8) __U);
}
static __inline__ __m256 __DEFAULT_FN_ATTRS
_mm256_rsqrt14_ps (__m256 __A)
{
return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A,
(__v8sf)
_mm256_setzero_ps (),
(__mmask8) -1);
}
static __inline__ __m256 __DEFAULT_FN_ATTRS
_mm256_mask_rsqrt14_ps (__m256 __W, __mmask8 __U, __m256 __A)
{
return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A,
(__v8sf) __W,
(__mmask8) __U);
}
static __inline__ __m256 __DEFAULT_FN_ATTRS
_mm256_maskz_rsqrt14_ps (__mmask8 __U, __m256 __A)
{
return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A,
(__v8sf)
_mm256_setzero_ps (),
(__mmask8) __U);
}
#undef __DEFAULT_FN_ATTRS
#undef __DEFAULT_FN_ATTRS_BOTH

View File

@ -3539,3 +3539,165 @@ __m512i test_mm512_maskz_ternarylogic_epi64(__mmask8 __U, __m512i __A, __m512i _
// CHECK: @llvm.x86.avx512.maskz.pternlog.q.512
return _mm512_maskz_ternarylogic_epi64(__U, __A, __B, __C, 4);
}
__m512 test_mm512_shuffle_f32x4(__m512 __A, __m512 __B) {
// CHECK-LABEL: @test_mm512_shuffle_f32x4
// CHECK: @llvm.x86.avx512.mask.shuf.f32x4
return _mm512_shuffle_f32x4(__A, __B, 4);
}
__m512 test_mm512_mask_shuffle_f32x4(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
// CHECK-LABEL: @test_mm512_mask_shuffle_f32x4
// CHECK: @llvm.x86.avx512.mask.shuf.f32x4
return _mm512_mask_shuffle_f32x4(__W, __U, __A, __B, 4);
}
__m512 test_mm512_maskz_shuffle_f32x4(__mmask16 __U, __m512 __A, __m512 __B) {
// CHECK-LABEL: @test_mm512_maskz_shuffle_f32x4
// CHECK: @llvm.x86.avx512.mask.shuf.f32x4
return _mm512_maskz_shuffle_f32x4(__U, __A, __B, 4);
}
__m512d test_mm512_shuffle_f64x2(__m512d __A, __m512d __B) {
// CHECK-LABEL: @test_mm512_shuffle_f64x2
// CHECK: @llvm.x86.avx512.mask.shuf.f64x2
return _mm512_shuffle_f64x2(__A, __B, 4);
}
__m512d test_mm512_mask_shuffle_f64x2(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
// CHECK-LABEL: @test_mm512_mask_shuffle_f64x2
// CHECK: @llvm.x86.avx512.mask.shuf.f64x2
return _mm512_mask_shuffle_f64x2(__W, __U, __A, __B, 4);
}
__m512d test_mm512_maskz_shuffle_f64x2(__mmask8 __U, __m512d __A, __m512d __B) {
// CHECK-LABEL: @test_mm512_maskz_shuffle_f64x2
// CHECK: @llvm.x86.avx512.mask.shuf.f64x2
return _mm512_maskz_shuffle_f64x2(__U, __A, __B, 4);
}
__m512i test_mm512_shuffle_i32x4(__m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_shuffle_i32x4
// CHECK: @llvm.x86.avx512.mask.shuf.i32x4
return _mm512_shuffle_i32x4(__A, __B, 4);
}
__m512i test_mm512_mask_shuffle_i32x4(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_mask_shuffle_i32x4
// CHECK: @llvm.x86.avx512.mask.shuf.i32x4
return _mm512_mask_shuffle_i32x4(__W, __U, __A, __B, 4);
}
__m512i test_mm512_maskz_shuffle_i32x4(__mmask16 __U, __m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_maskz_shuffle_i32x4
// CHECK: @llvm.x86.avx512.mask.shuf.i32x4
return _mm512_maskz_shuffle_i32x4(__U, __A, __B, 4);
}
__m512i test_mm512_shuffle_i64x2(__m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_shuffle_i64x2
// CHECK: @llvm.x86.avx512.mask.shuf.i64x2
return _mm512_shuffle_i64x2(__A, __B, 4);
}
__m512i test_mm512_mask_shuffle_i64x2(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_mask_shuffle_i64x2
// CHECK: @llvm.x86.avx512.mask.shuf.i64x2
return _mm512_mask_shuffle_i64x2(__W, __U, __A, __B, 4);
}
__m512i test_mm512_maskz_shuffle_i64x2(__mmask8 __U, __m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_maskz_shuffle_i64x2
// CHECK: @llvm.x86.avx512.mask.shuf.i64x2
return _mm512_maskz_shuffle_i64x2(__U, __A, __B, 4);
}
__m512d test_mm512_shuffle_pd(__m512d __M, __m512d __V) {
// CHECK-LABEL: @test_mm512_shuffle_pd
// CHECK: @llvm.x86.avx512.mask.shuf.pd.512
return _mm512_shuffle_pd(__M, __V, 4);
}
__m512d test_mm512_mask_shuffle_pd(__m512d __W, __mmask8 __U, __m512d __M, __m512d __V) {
// CHECK-LABEL: @test_mm512_mask_shuffle_pd
// CHECK: @llvm.x86.avx512.mask.shuf.pd.512
return _mm512_mask_shuffle_pd(__W, __U, __M, __V, 4);
}
__m512d test_mm512_maskz_shuffle_pd(__mmask8 __U, __m512d __M, __m512d __V) {
// CHECK-LABEL: @test_mm512_maskz_shuffle_pd
// CHECK: @llvm.x86.avx512.mask.shuf.pd.512
return _mm512_maskz_shuffle_pd(__U, __M, __V, 4);
}
__m512 test_mm512_shuffle_ps(__m512 __M, __m512 __V) {
// CHECK-LABEL: @test_mm512_shuffle_ps
// CHECK: @llvm.x86.avx512.mask.shuf.ps.512
return _mm512_shuffle_ps(__M, __V, 4);
}
__m512 test_mm512_mask_shuffle_ps(__m512 __W, __mmask16 __U, __m512 __M, __m512 __V) {
// CHECK-LABEL: @test_mm512_mask_shuffle_ps
// CHECK: @llvm.x86.avx512.mask.shuf.ps.512
return _mm512_mask_shuffle_ps(__W, __U, __M, __V, 4);
}
__m512 test_mm512_maskz_shuffle_ps(__mmask16 __U, __m512 __M, __m512 __V) {
// CHECK-LABEL: @test_mm512_maskz_shuffle_ps
// CHECK: @llvm.x86.avx512.mask.shuf.ps.512
return _mm512_maskz_shuffle_ps(__U, __M, __V, 4);
}
__m128d test_mm_sqrt_round_sd(__m128d __A, __m128d __B) {
// CHECK-LABEL: @test_mm_sqrt_round_sd
// CHECK: @llvm.x86.avx512.mask.sqrt.sd
return _mm_sqrt_round_sd(__A, __B, 4);
}
__m128d test_mm_mask_sqrt_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B){
// CHECK: @llvm.x86.avx512.mask.sqrt.sd
return _mm_mask_sqrt_sd(__W,__U,__A,__B);
}
__m128d test_mm_mask_sqrt_round_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B){
// CHECK: @llvm.x86.avx512.mask.sqrt.sd
return _mm_mask_sqrt_round_sd(__W,__U,__A,__B,_MM_FROUND_CUR_DIRECTION);
}
__m128d test_mm_maskz_sqrt_sd(__mmask8 __U, __m128d __A, __m128d __B){
// CHECK: @llvm.x86.avx512.mask.sqrt.sd
return _mm_maskz_sqrt_sd(__U,__A,__B);
}
__m128d test_mm_maskz_sqrt_round_sd(__mmask8 __U, __m128d __A, __m128d __B){
// CHECK: @llvm.x86.avx512.mask.sqrt.sd
return _mm_maskz_sqrt_round_sd(__U,__A,__B,_MM_FROUND_CUR_DIRECTION);
}
__m128 test_mm_sqrt_round_ss(__m128 __A, __m128 __B) {
// CHECK-LABEL: @test_mm_sqrt_round_ss
// CHECK: @llvm.x86.avx512.mask.sqrt.ss
return _mm_sqrt_round_ss(__A, __B, 4);
}
__m128 test_mm_mask_sqrt_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B){
// CHECK: @llvm.x86.avx512.mask.sqrt.ss
return _mm_mask_sqrt_ss(__W,__U,__A,__B);
}
__m128 test_mm_mask_sqrt_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B){
// CHECK: @llvm.x86.avx512.mask.sqrt.ss
return _mm_mask_sqrt_round_ss(__W,__U,__A,__B,_MM_FROUND_CUR_DIRECTION);
}
__m128 test_mm_maskz_sqrt_ss(__mmask8 __U, __m128 __A, __m128 __B){
// CHECK: @llvm.x86.avx512.mask.sqrt.ss
return _mm_maskz_sqrt_ss(__U,__A,__B);
}
__m128 test_mm_maskz_sqrt_round_ss(__mmask8 __U, __m128 __A, __m128 __B){
// CHECK: @llvm.x86.avx512.mask.sqrt.ss
return _mm_maskz_sqrt_round_ss(__U,__A,__B,_MM_FROUND_CUR_DIRECTION);
}

View File

@ -5087,3 +5087,194 @@ __m256i test_mm256_maskz_ternarylogic_epi64(__mmask8 __U, __m256i __A, __m256i _
// CHECK: @llvm.x86.avx512.maskz.pternlog.q.256
return _mm256_maskz_ternarylogic_epi64(__U, __A, __B, __C, 4);
}
__m256 test_mm256_shuffle_f32x4(__m256 __A, __m256 __B) {
// CHECK-LABEL: @test_mm256_shuffle_f32x4
// CHECK: @llvm.x86.avx512.mask.shuf.f32x4
return _mm256_shuffle_f32x4(__A, __B, 4);
}
__m256 test_mm256_mask_shuffle_f32x4(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
// CHECK-LABEL: @test_mm256_mask_shuffle_f32x4
// CHECK: @llvm.x86.avx512.mask.shuf.f32x4
return _mm256_mask_shuffle_f32x4(__W, __U, __A, __B, 4);
}
__m256 test_mm256_maskz_shuffle_f32x4(__mmask8 __U, __m256 __A, __m256 __B) {
// CHECK-LABEL: @test_mm256_maskz_shuffle_f32x4
// CHECK: @llvm.x86.avx512.mask.shuf.f32x4
return _mm256_maskz_shuffle_f32x4(__U, __A, __B, 4);
}
__m256d test_mm256_shuffle_f64x2(__m256d __A, __m256d __B) {
// CHECK-LABEL: @test_mm256_shuffle_f64x2
// CHECK: @llvm.x86.avx512.mask.shuf.f64x2
return _mm256_shuffle_f64x2(__A, __B, 4);
}
__m256d test_mm256_mask_shuffle_f64x2(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
// CHECK-LABEL: @test_mm256_mask_shuffle_f64x2
// CHECK: @llvm.x86.avx512.mask.shuf.f64x2
return _mm256_mask_shuffle_f64x2(__W, __U, __A, __B, 4);
}
__m256d test_mm256_maskz_shuffle_f64x2(__mmask8 __U, __m256d __A, __m256d __B) {
// CHECK-LABEL: @test_mm256_maskz_shuffle_f64x2
// CHECK: @llvm.x86.avx512.mask.shuf.f64x2
return _mm256_maskz_shuffle_f64x2(__U, __A, __B, 4);
}
__m256i test_mm256_shuffle_i32x4(__m256i __A, __m256i __B) {
// CHECK-LABEL: @test_mm256_shuffle_i32x4
// CHECK: @llvm.x86.avx512.mask.shuf.i32x4
return _mm256_shuffle_i32x4(__A, __B, 4);
}
__m256i test_mm256_mask_shuffle_i32x4(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
// CHECK-LABEL: @test_mm256_mask_shuffle_i32x4
// CHECK: @llvm.x86.avx512.mask.shuf.i32x4
return _mm256_mask_shuffle_i32x4(__W, __U, __A, __B, 4);
}
__m256i test_mm256_maskz_shuffle_i32x4(__mmask8 __U, __m256i __A, __m256i __B) {
// CHECK-LABEL: @test_mm256_maskz_shuffle_i32x4
// CHECK: @llvm.x86.avx512.mask.shuf.i32x4
return _mm256_maskz_shuffle_i32x4(__U, __A, __B, 4);
}
__m256i test_mm256_shuffle_i64x2(__m256i __A, __m256i __B) {
// CHECK-LABEL: @test_mm256_shuffle_i64x2
// CHECK: @llvm.x86.avx512.mask.shuf.i64x2
return _mm256_shuffle_i64x2(__A, __B, 4);
}
__m256i test_mm256_mask_shuffle_i64x2(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
// CHECK-LABEL: @test_mm256_mask_shuffle_i64x2
// CHECK: @llvm.x86.avx512.mask.shuf.i64x2
return _mm256_mask_shuffle_i64x2(__W, __U, __A, __B, 4);
}
__m256i test_mm256_maskz_shuffle_i64x2(__mmask8 __U, __m256i __A, __m256i __B) {
// CHECK-LABEL: @test_mm256_maskz_shuffle_i64x2
// CHECK: @llvm.x86.avx512.mask.shuf.i64x2
return _mm256_maskz_shuffle_i64x2(__U, __A, __B, 4);
}
__m128d test_mm_mask_shuffle_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
// CHECK-LABEL: @test_mm_mask_shuffle_pd
// CHECK: @llvm.x86.avx512.mask.shuf.pd.128
return _mm_mask_shuffle_pd(__W, __U, __A, __B, 4);
}
__m128d test_mm_maskz_shuffle_pd(__mmask8 __U, __m128d __A, __m128d __B) {
// CHECK-LABEL: @test_mm_maskz_shuffle_pd
// CHECK: @llvm.x86.avx512.mask.shuf.pd.128
return _mm_maskz_shuffle_pd(__U, __A, __B, 4);
}
__m256d test_mm256_mask_shuffle_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
// CHECK-LABEL: @test_mm256_mask_shuffle_pd
// CHECK: @llvm.x86.avx512.mask.shuf.pd.256
return _mm256_mask_shuffle_pd(__W, __U, __A, __B, 4);
}
__m256d test_mm256_maskz_shuffle_pd(__mmask8 __U, __m256d __A, __m256d __B) {
// CHECK-LABEL: @test_mm256_maskz_shuffle_pd
// CHECK: @llvm.x86.avx512.mask.shuf.pd.256
return _mm256_maskz_shuffle_pd(__U, __A, __B, 4);
}
__m128 test_mm_mask_shuffle_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
// CHECK-LABEL: @test_mm_mask_shuffle_ps
// CHECK: @llvm.x86.avx512.mask.shuf.ps.128
return _mm_mask_shuffle_ps(__W, __U, __A, __B, 4);
}
__m128 test_mm_maskz_shuffle_ps(__mmask8 __U, __m128 __A, __m128 __B) {
// CHECK-LABEL: @test_mm_maskz_shuffle_ps
// CHECK: @llvm.x86.avx512.mask.shuf.ps.128
return _mm_maskz_shuffle_ps(__U, __A, __B, 4);
}
__m256 test_mm256_mask_shuffle_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
// CHECK-LABEL: @test_mm256_mask_shuffle_ps
// CHECK: @llvm.x86.avx512.mask.shuf.ps.256
return _mm256_mask_shuffle_ps(__W, __U, __A, __B, 4);
}
__m256 test_mm256_maskz_shuffle_ps(__mmask8 __U, __m256 __A, __m256 __B) {
// CHECK-LABEL: @test_mm256_maskz_shuffle_ps
// CHECK: @llvm.x86.avx512.mask.shuf.ps.256
return _mm256_maskz_shuffle_ps(__U, __A, __B, 4);
}
__m128d test_mm_rsqrt14_pd(__m128d __A) {
// CHECK-LABEL: @test_mm_rsqrt14_pd
// CHECK: @llvm.x86.avx512.rsqrt14.pd.128
return _mm_rsqrt14_pd(__A);
}
__m128d test_mm_mask_rsqrt14_pd(__m128d __W, __mmask8 __U, __m128d __A) {
// CHECK-LABEL: @test_mm_mask_rsqrt14_pd
// CHECK: @llvm.x86.avx512.rsqrt14.pd.128
return _mm_mask_rsqrt14_pd(__W, __U, __A);
}
__m128d test_mm_maskz_rsqrt14_pd(__mmask8 __U, __m128d __A) {
// CHECK-LABEL: @test_mm_maskz_rsqrt14_pd
// CHECK: @llvm.x86.avx512.rsqrt14.pd.128
return _mm_maskz_rsqrt14_pd(__U, __A);
}
__m256d test_mm256_rsqrt14_pd(__m256d __A) {
// CHECK-LABEL: @test_mm256_rsqrt14_pd
// CHECK: @llvm.x86.avx512.rsqrt14.pd.256
return _mm256_rsqrt14_pd(__A);
}
__m256d test_mm256_mask_rsqrt14_pd(__m256d __W, __mmask8 __U, __m256d __A) {
// CHECK-LABEL: @test_mm256_mask_rsqrt14_pd
// CHECK: @llvm.x86.avx512.rsqrt14.pd.256
return _mm256_mask_rsqrt14_pd(__W, __U, __A);
}
__m256d test_mm256_maskz_rsqrt14_pd(__mmask8 __U, __m256d __A) {
// CHECK-LABEL: @test_mm256_maskz_rsqrt14_pd
// CHECK: @llvm.x86.avx512.rsqrt14.pd.256
return _mm256_maskz_rsqrt14_pd(__U, __A);
}
__m128 test_mm_rsqrt14_ps(__m128 __A) {
// CHECK-LABEL: @test_mm_rsqrt14_ps
// CHECK: @llvm.x86.avx512.rsqrt14.ps.128
return _mm_rsqrt14_ps(__A);
}
__m128 test_mm_mask_rsqrt14_ps(__m128 __W, __mmask8 __U, __m128 __A) {
// CHECK-LABEL: @test_mm_mask_rsqrt14_ps
// CHECK: @llvm.x86.avx512.rsqrt14.ps.128
return _mm_mask_rsqrt14_ps(__W, __U, __A);
}
__m128 test_mm_maskz_rsqrt14_ps(__mmask8 __U, __m128 __A) {
// CHECK-LABEL: @test_mm_maskz_rsqrt14_ps
// CHECK: @llvm.x86.avx512.rsqrt14.ps.128
return _mm_maskz_rsqrt14_ps(__U, __A);
}
__m256 test_mm256_rsqrt14_ps(__m256 __A) {
// CHECK-LABEL: @test_mm256_rsqrt14_ps
// CHECK: @llvm.x86.avx512.rsqrt14.ps.256
return _mm256_rsqrt14_ps(__A);
}
__m256 test_mm256_mask_rsqrt14_ps(__m256 __W, __mmask8 __U, __m256 __A) {
// CHECK-LABEL: @test_mm256_mask_rsqrt14_ps
// CHECK: @llvm.x86.avx512.rsqrt14.ps.256
return _mm256_mask_rsqrt14_ps(__W, __U, __A);
}
__m256 test_mm256_maskz_rsqrt14_ps(__mmask8 __U, __m256 __A) {
// CHECK-LABEL: @test_mm256_maskz_rsqrt14_ps
// CHECK: @llvm.x86.avx512.rsqrt14.ps.256
return _mm256_maskz_rsqrt14_ps(__U, __A);
}