[PowerPC] Allow const pointers for load builtins in altivec.h

The load builtins in altivec.h do not have const in the signature
for the pointer parameter. This prevents using them for loading
from constant pointers. A notable case for such a use is Eigen.

This patch simply adds the missing const.

Fixes: https://bugs.llvm.org/show_bug.cgi?id=47408
This commit is contained in:
Nemanja Ivanovic 2020-09-04 13:54:21 -04:00
parent 00eb6fef08
commit 54205f0bd2
4 changed files with 238 additions and 202 deletions

View File

@ -2702,67 +2702,67 @@ vec_insert_exp(vector unsigned int __a, vector unsigned int __b) {
}
#if defined(__powerpc64__)
static __inline__ vector signed char __ATTRS_o_ai vec_xl_len(signed char *__a,
static __inline__ vector signed char __ATTRS_o_ai vec_xl_len(const signed char *__a,
size_t __b) {
return (vector signed char)__builtin_vsx_lxvl(__a, (__b << 56));
}
static __inline__ vector unsigned char __ATTRS_o_ai
vec_xl_len(unsigned char *__a, size_t __b) {
vec_xl_len(const unsigned char *__a, size_t __b) {
return (vector unsigned char)__builtin_vsx_lxvl(__a, (__b << 56));
}
static __inline__ vector signed short __ATTRS_o_ai vec_xl_len(signed short *__a,
static __inline__ vector signed short __ATTRS_o_ai vec_xl_len(const signed short *__a,
size_t __b) {
return (vector signed short)__builtin_vsx_lxvl(__a, (__b << 56));
}
static __inline__ vector unsigned short __ATTRS_o_ai
vec_xl_len(unsigned short *__a, size_t __b) {
vec_xl_len(const unsigned short *__a, size_t __b) {
return (vector unsigned short)__builtin_vsx_lxvl(__a, (__b << 56));
}
static __inline__ vector signed int __ATTRS_o_ai vec_xl_len(signed int *__a,
static __inline__ vector signed int __ATTRS_o_ai vec_xl_len(const signed int *__a,
size_t __b) {
return (vector signed int)__builtin_vsx_lxvl(__a, (__b << 56));
}
static __inline__ vector unsigned int __ATTRS_o_ai vec_xl_len(unsigned int *__a,
static __inline__ vector unsigned int __ATTRS_o_ai vec_xl_len(const unsigned int *__a,
size_t __b) {
return (vector unsigned int)__builtin_vsx_lxvl(__a, (__b << 56));
}
static __inline__ vector float __ATTRS_o_ai vec_xl_len(float *__a, size_t __b) {
static __inline__ vector float __ATTRS_o_ai vec_xl_len(const float *__a, size_t __b) {
return (vector float)__builtin_vsx_lxvl(__a, (__b << 56));
}
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_xl_len(signed __int128 *__a, size_t __b) {
vec_xl_len(const signed __int128 *__a, size_t __b) {
return (vector signed __int128)__builtin_vsx_lxvl(__a, (__b << 56));
}
static __inline__ vector unsigned __int128 __ATTRS_o_ai
vec_xl_len(unsigned __int128 *__a, size_t __b) {
vec_xl_len(const unsigned __int128 *__a, size_t __b) {
return (vector unsigned __int128)__builtin_vsx_lxvl(__a, (__b << 56));
}
static __inline__ vector signed long long __ATTRS_o_ai
vec_xl_len(signed long long *__a, size_t __b) {
vec_xl_len(const signed long long *__a, size_t __b) {
return (vector signed long long)__builtin_vsx_lxvl(__a, (__b << 56));
}
static __inline__ vector unsigned long long __ATTRS_o_ai
vec_xl_len(unsigned long long *__a, size_t __b) {
vec_xl_len(const unsigned long long *__a, size_t __b) {
return (vector unsigned long long)__builtin_vsx_lxvl(__a, (__b << 56));
}
static __inline__ vector double __ATTRS_o_ai vec_xl_len(double *__a,
static __inline__ vector double __ATTRS_o_ai vec_xl_len(const double *__a,
size_t __b) {
return (vector double)__builtin_vsx_lxvl(__a, (__b << 56));
}
static __inline__ vector unsigned char __ATTRS_o_ai
vec_xl_len_r(unsigned char *__a, size_t __b) {
vec_xl_len_r(const unsigned char *__a, size_t __b) {
vector unsigned char __res =
(vector unsigned char)__builtin_vsx_lxvll(__a, (__b << 56));
#ifdef __LITTLE_ENDIAN__
@ -16447,41 +16447,41 @@ typedef vector unsigned int unaligned_vec_uint __attribute__((aligned(1)));
typedef vector float unaligned_vec_float __attribute__((aligned(1)));
static inline __ATTRS_o_ai vector signed char vec_xl(signed long long __offset,
signed char *__ptr) {
const signed char *__ptr) {
return *(unaligned_vec_schar *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector unsigned char
vec_xl(signed long long __offset, unsigned char *__ptr) {
vec_xl(signed long long __offset, const unsigned char *__ptr) {
return *(unaligned_vec_uchar*)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector signed short vec_xl(signed long long __offset,
signed short *__ptr) {
const signed short *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_sshort *)__addr;
}
static inline __ATTRS_o_ai vector unsigned short
vec_xl(signed long long __offset, unsigned short *__ptr) {
vec_xl(signed long long __offset, const unsigned short *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_ushort *)__addr;
}
static inline __ATTRS_o_ai vector signed int vec_xl(signed long long __offset,
signed int *__ptr) {
const signed int *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_sint *)__addr;
}
static inline __ATTRS_o_ai vector unsigned int vec_xl(signed long long __offset,
unsigned int *__ptr) {
const unsigned int *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_uint *)__addr;
}
static inline __ATTRS_o_ai vector float vec_xl(signed long long __offset,
float *__ptr) {
const float *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_float *)__addr;
}
@ -16492,19 +16492,19 @@ typedef vector unsigned long long unaligned_vec_ull __attribute__((aligned(1)));
typedef vector double unaligned_vec_double __attribute__((aligned(1)));
static inline __ATTRS_o_ai vector signed long long
vec_xl(signed long long __offset, signed long long *__ptr) {
vec_xl(signed long long __offset, const signed long long *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_sll *)__addr;
}
static inline __ATTRS_o_ai vector unsigned long long
vec_xl(signed long long __offset, unsigned long long *__ptr) {
vec_xl(signed long long __offset, const unsigned long long *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_ull *)__addr;
}
static inline __ATTRS_o_ai vector double vec_xl(signed long long __offset,
double *__ptr) {
const double *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_double *)__addr;
}
@ -16515,13 +16515,13 @@ typedef vector signed __int128 unaligned_vec_si128 __attribute__((aligned(1)));
typedef vector unsigned __int128 unaligned_vec_ui128
__attribute__((aligned(1)));
static inline __ATTRS_o_ai vector signed __int128
vec_xl(signed long long __offset, signed __int128 *__ptr) {
vec_xl(signed long long __offset, const signed __int128 *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_si128 *)__addr;
}
static inline __ATTRS_o_ai vector unsigned __int128
vec_xl(signed long long __offset, unsigned __int128 *__ptr) {
vec_xl(signed long long __offset, const unsigned __int128 *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_ui128 *)__addr;
}
@ -16531,71 +16531,71 @@ vec_xl(signed long long __offset, unsigned __int128 *__ptr) {
#ifdef __LITTLE_ENDIAN__
static __inline__ vector signed char __ATTRS_o_ai
vec_xl_be(signed long long __offset, signed char *__ptr) {
vec_xl_be(signed long long __offset, const signed char *__ptr) {
vector signed char __vec = (vector signed char)__builtin_vsx_lxvd2x_be(__offset, __ptr);
return __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14,
13, 12, 11, 10, 9, 8);
}
static __inline__ vector unsigned char __ATTRS_o_ai
vec_xl_be(signed long long __offset, unsigned char *__ptr) {
vec_xl_be(signed long long __offset, const unsigned char *__ptr) {
vector unsigned char __vec = (vector unsigned char)__builtin_vsx_lxvd2x_be(__offset, __ptr);
return __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14,
13, 12, 11, 10, 9, 8);
}
static __inline__ vector signed short __ATTRS_o_ai
vec_xl_be(signed long long __offset, signed short *__ptr) {
vec_xl_be(signed long long __offset, const signed short *__ptr) {
vector signed short __vec = (vector signed short)__builtin_vsx_lxvd2x_be(__offset, __ptr);
return __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4);
}
static __inline__ vector unsigned short __ATTRS_o_ai
vec_xl_be(signed long long __offset, unsigned short *__ptr) {
vec_xl_be(signed long long __offset, const unsigned short *__ptr) {
vector unsigned short __vec = (vector unsigned short)__builtin_vsx_lxvd2x_be(__offset, __ptr);
return __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4);
}
static __inline__ vector signed int __ATTRS_o_ai
vec_xl_be(signed long long __offset, signed int *__ptr) {
vec_xl_be(signed long long __offset, const signed int *__ptr) {
return (vector signed int)__builtin_vsx_lxvw4x_be(__offset, __ptr);
}
static __inline__ vector unsigned int __ATTRS_o_ai
vec_xl_be(signed long long __offset, unsigned int *__ptr) {
vec_xl_be(signed long long __offset, const unsigned int *__ptr) {
return (vector unsigned int)__builtin_vsx_lxvw4x_be(__offset, __ptr);
}
static __inline__ vector float __ATTRS_o_ai
vec_xl_be(signed long long __offset, float *__ptr) {
vec_xl_be(signed long long __offset, const float *__ptr) {
return (vector float)__builtin_vsx_lxvw4x_be(__offset, __ptr);
}
#ifdef __VSX__
static __inline__ vector signed long long __ATTRS_o_ai
vec_xl_be(signed long long __offset, signed long long *__ptr) {
vec_xl_be(signed long long __offset, const signed long long *__ptr) {
return (vector signed long long)__builtin_vsx_lxvd2x_be(__offset, __ptr);
}
static __inline__ vector unsigned long long __ATTRS_o_ai
vec_xl_be(signed long long __offset, unsigned long long *__ptr) {
vec_xl_be(signed long long __offset, const unsigned long long *__ptr) {
return (vector unsigned long long)__builtin_vsx_lxvd2x_be(__offset, __ptr);
}
static __inline__ vector double __ATTRS_o_ai
vec_xl_be(signed long long __offset, double *__ptr) {
vec_xl_be(signed long long __offset, const double *__ptr) {
return (vector double)__builtin_vsx_lxvd2x_be(__offset, __ptr);
}
#endif
#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_xl_be(signed long long __offset, signed __int128 *__ptr) {
vec_xl_be(signed long long __offset, const signed __int128 *__ptr) {
return vec_xl(__offset, __ptr);
}
static __inline__ vector unsigned __int128 __ATTRS_o_ai
vec_xl_be(signed long long __offset, unsigned __int128 *__ptr) {
vec_xl_be(signed long long __offset, const unsigned __int128 *__ptr) {
return vec_xl(__offset, __ptr);
}
#endif
@ -16608,44 +16608,44 @@ vec_xl_be(signed long long __offset, unsigned __int128 *__ptr) {
/* vect_xl_sext */
static __inline__ vector unsigned __int128 __ATTRS_o_ai
vec_xl_sext(signed long long __offset, signed char *__pointer) {
vec_xl_sext(signed long long __offset, const signed char *__pointer) {
return (vector unsigned __int128)*(__pointer + __offset);
}
static __inline__ vector unsigned __int128 __ATTRS_o_ai
vec_xl_sext(signed long long __offset, signed short *__pointer) {
vec_xl_sext(signed long long __offset, const signed short *__pointer) {
return (vector unsigned __int128)*(__pointer + __offset);
}
static __inline__ vector unsigned __int128 __ATTRS_o_ai
vec_xl_sext(signed long long __offset, signed int *__pointer) {
vec_xl_sext(signed long long __offset, const signed int *__pointer) {
return (vector unsigned __int128)*(__pointer + __offset);
}
static __inline__ vector unsigned __int128 __ATTRS_o_ai
vec_xl_sext(signed long long __offset, signed long long *__pointer) {
vec_xl_sext(signed long long __offset, const signed long long *__pointer) {
return (vector unsigned __int128)*(__pointer + __offset);
}
/* vec_xl_zext */
static __inline__ vector unsigned __int128 __ATTRS_o_ai
vec_xl_zext(signed long long __offset, unsigned char *__pointer) {
vec_xl_zext(signed long long __offset, const unsigned char *__pointer) {
return (vector unsigned __int128)*(__pointer + __offset);
}
static __inline__ vector unsigned __int128 __ATTRS_o_ai
vec_xl_zext(signed long long __offset, unsigned short *__pointer) {
vec_xl_zext(signed long long __offset, const unsigned short *__pointer) {
return (vector unsigned __int128)*(__pointer + __offset);
}
static __inline__ vector unsigned __int128 __ATTRS_o_ai
vec_xl_zext(signed long long __offset, unsigned int *__pointer) {
vec_xl_zext(signed long long __offset, const unsigned int *__pointer) {
return (vector unsigned __int128)*(__pointer + __offset);
}
static __inline__ vector unsigned __int128 __ATTRS_o_ai
vec_xl_zext(signed long long __offset, unsigned long long *__pointer) {
vec_xl_zext(signed long long __offset, const unsigned long long *__pointer) {
return (vector unsigned __int128)*(__pointer + __offset);
}

View File

@ -38,6 +38,13 @@ vector float res_vf;
// CHECK-NOALTIVEC: error: unknown type name 'vector'
// CHECK-NOALTIVEC-NOT: '(error)'
const signed char *param_sc_ld;
const unsigned char *param_uc_ld;
const short *param_s_ld;
const unsigned short *param_us_ld;
const int *param_i_ld;
const unsigned int *param_ui_ld;
const float *param_f_ld;
signed char param_sc;
unsigned char param_uc;
@ -1313,7 +1320,7 @@ void test6() {
// CHECK: @llvm.ppc.altivec.lvx
// CHECK-LE: @llvm.ppc.altivec.lvx
res_vsc = vec_ld(0, &param_sc);
res_vsc = vec_ld(0, param_sc_ld);
// CHECK: @llvm.ppc.altivec.lvx
// CHECK-LE: @llvm.ppc.altivec.lvx
@ -1321,7 +1328,7 @@ void test6() {
// CHECK: @llvm.ppc.altivec.lvx
// CHECK-LE: @llvm.ppc.altivec.lvx
res_vuc = vec_ld(0, &param_uc);
res_vuc = vec_ld(0, param_uc_ld);
// CHECK: @llvm.ppc.altivec.lvx
// CHECK-LE: @llvm.ppc.altivec.lvx
@ -1333,7 +1340,7 @@ void test6() {
// CHECK: @llvm.ppc.altivec.lvx
// CHECK-LE: @llvm.ppc.altivec.lvx
res_vs = vec_ld(0, &param_s);
res_vs = vec_ld(0, param_s_ld);
// CHECK: @llvm.ppc.altivec.lvx
// CHECK-LE: @llvm.ppc.altivec.lvx
@ -1341,7 +1348,7 @@ void test6() {
// CHECK: @llvm.ppc.altivec.lvx
// CHECK-LE: @llvm.ppc.altivec.lvx
res_vus = vec_ld(0, &param_us);
res_vus = vec_ld(0, param_us_ld);
// CHECK: @llvm.ppc.altivec.lvx
// CHECK-LE: @llvm.ppc.altivec.lvx
@ -1357,7 +1364,7 @@ void test6() {
// CHECK: @llvm.ppc.altivec.lvx
// CHECK-LE: @llvm.ppc.altivec.lvx
res_vi = vec_ld(0, &param_i);
res_vi = vec_ld(0, param_i_ld);
// CHECK: @llvm.ppc.altivec.lvx
// CHECK-LE: @llvm.ppc.altivec.lvx
@ -1365,7 +1372,7 @@ void test6() {
// CHECK: @llvm.ppc.altivec.lvx
// CHECK-LE: @llvm.ppc.altivec.lvx
res_vui = vec_ld(0, &param_ui);
res_vui = vec_ld(0, param_ui_ld);
// CHECK: @llvm.ppc.altivec.lvx
// CHECK-LE: @llvm.ppc.altivec.lvx
@ -1377,7 +1384,7 @@ void test6() {
// CHECK: @llvm.ppc.altivec.lvx
// CHECK-LE: @llvm.ppc.altivec.lvx
res_vf = vec_ld(0, &param_f);
res_vf = vec_ld(0, param_f_ld);
// CHECK: @llvm.ppc.altivec.lvx
// CHECK-LE: @llvm.ppc.altivec.lvx
@ -1385,7 +1392,7 @@ void test6() {
// CHECK: @llvm.ppc.altivec.lvx
// CHECK-LE: @llvm.ppc.altivec.lvx
res_vsc = vec_lvx(0, &param_sc);
res_vsc = vec_lvx(0, param_sc_ld);
// CHECK: @llvm.ppc.altivec.lvx
// CHECK-LE: @llvm.ppc.altivec.lvx
@ -1393,7 +1400,7 @@ void test6() {
// CHECK: @llvm.ppc.altivec.lvx
// CHECK-LE: @llvm.ppc.altivec.lvx
res_vuc = vec_lvx(0, &param_uc);
res_vuc = vec_lvx(0, param_uc_ld);
// CHECK: @llvm.ppc.altivec.lvx
// CHECK-LE: @llvm.ppc.altivec.lvx
@ -1405,7 +1412,7 @@ void test6() {
// CHECK: @llvm.ppc.altivec.lvx
// CHECK-LE: @llvm.ppc.altivec.lvx
res_vs = vec_lvx(0, &param_s);
res_vs = vec_lvx(0, param_s_ld);
// CHECK: @llvm.ppc.altivec.lvx
// CHECK-LE: @llvm.ppc.altivec.lvx
@ -1413,7 +1420,7 @@ void test6() {
// CHECK: @llvm.ppc.altivec.lvx
// CHECK-LE: @llvm.ppc.altivec.lvx
res_vus = vec_lvx(0, &param_us);
res_vus = vec_lvx(0, param_us_ld);
// CHECK: @llvm.ppc.altivec.lvx
// CHECK-LE: @llvm.ppc.altivec.lvx
@ -1429,7 +1436,7 @@ void test6() {
// CHECK: @llvm.ppc.altivec.lvx
// CHECK-LE: @llvm.ppc.altivec.lvx
res_vi = vec_lvx(0, &param_i);
res_vi = vec_lvx(0, param_i_ld);
// CHECK: @llvm.ppc.altivec.lvx
// CHECK-LE: @llvm.ppc.altivec.lvx
@ -1437,7 +1444,7 @@ void test6() {
// CHECK: @llvm.ppc.altivec.lvx
// CHECK-LE: @llvm.ppc.altivec.lvx
res_vui = vec_lvx(0, &param_ui);
res_vui = vec_lvx(0, param_ui_ld);
// CHECK: @llvm.ppc.altivec.lvx
// CHECK-LE: @llvm.ppc.altivec.lvx
@ -1449,64 +1456,64 @@ void test6() {
// CHECK: @llvm.ppc.altivec.lvx
// CHECK-LE: @llvm.ppc.altivec.lvx
res_vf = vec_lvx(0, &param_f);
res_vf = vec_lvx(0, param_f_ld);
// CHECK: @llvm.ppc.altivec.lvx
// CHECK-LE: @llvm.ppc.altivec.lvx
/* vec_lde */
res_vsc = vec_lde(0, &param_sc);
res_vsc = vec_lde(0, param_sc_ld);
// CHECK: @llvm.ppc.altivec.lvebx
// CHECK-LE: @llvm.ppc.altivec.lvebx
res_vuc = vec_lde(0, &param_uc);
res_vuc = vec_lde(0, param_uc_ld);
// CHECK: @llvm.ppc.altivec.lvebx
// CHECK-LE: @llvm.ppc.altivec.lvebx
res_vs = vec_lde(0, &param_s);
res_vs = vec_lde(0, param_s_ld);
// CHECK: @llvm.ppc.altivec.lvehx
// CHECK-LE: @llvm.ppc.altivec.lvehx
res_vus = vec_lde(0, &param_us);
res_vus = vec_lde(0, param_us_ld);
// CHECK: @llvm.ppc.altivec.lvehx
// CHECK-LE: @llvm.ppc.altivec.lvehx
res_vi = vec_lde(0, &param_i);
res_vi = vec_lde(0, param_i_ld);
// CHECK: @llvm.ppc.altivec.lvewx
// CHECK-LE: @llvm.ppc.altivec.lvewx
res_vui = vec_lde(0, &param_ui);
res_vui = vec_lde(0, param_ui_ld);
// CHECK: @llvm.ppc.altivec.lvewx
// CHECK-LE: @llvm.ppc.altivec.lvewx
res_vf = vec_lde(0, &param_f);
res_vf = vec_lde(0, param_f_ld);
// CHECK: @llvm.ppc.altivec.lvewx
// CHECK-LE: @llvm.ppc.altivec.lvewx
res_vsc = vec_lvebx(0, &param_sc);
res_vsc = vec_lvebx(0, param_sc_ld);
// CHECK: @llvm.ppc.altivec.lvebx
// CHECK-LE: @llvm.ppc.altivec.lvebx
res_vuc = vec_lvebx(0, &param_uc);
res_vuc = vec_lvebx(0, param_uc_ld);
// CHECK: @llvm.ppc.altivec.lvebx
// CHECK-LE: @llvm.ppc.altivec.lvebx
res_vs = vec_lvehx(0, &param_s);
res_vs = vec_lvehx(0, param_s_ld);
// CHECK: @llvm.ppc.altivec.lvehx
// CHECK-LE: @llvm.ppc.altivec.lvehx
res_vus = vec_lvehx(0, &param_us);
res_vus = vec_lvehx(0, param_us_ld);
// CHECK: @llvm.ppc.altivec.lvehx
// CHECK-LE: @llvm.ppc.altivec.lvehx
res_vi = vec_lvewx(0, &param_i);
res_vi = vec_lvewx(0, param_i_ld);
// CHECK: @llvm.ppc.altivec.lvewx
// CHECK-LE: @llvm.ppc.altivec.lvewx
res_vui = vec_lvewx(0, &param_ui);
res_vui = vec_lvewx(0, param_ui_ld);
// CHECK: @llvm.ppc.altivec.lvewx
// CHECK-LE: @llvm.ppc.altivec.lvewx
res_vf = vec_lvewx(0, &param_f);
res_vf = vec_lvewx(0, param_f_ld);
// CHECK: @llvm.ppc.altivec.lvewx
// CHECK-LE: @llvm.ppc.altivec.lvewx
@ -1515,7 +1522,7 @@ void test6() {
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK-LE: @llvm.ppc.altivec.lvxl
res_vsc = vec_ldl(0, &param_sc);
res_vsc = vec_ldl(0, param_sc_ld);
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK-LE: @llvm.ppc.altivec.lvxl
@ -1523,7 +1530,7 @@ void test6() {
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK-LE: @llvm.ppc.altivec.lvxl
res_vuc = vec_ldl(0, &param_uc);
res_vuc = vec_ldl(0, param_uc_ld);
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK-LE: @llvm.ppc.altivec.lvxl
@ -1535,7 +1542,7 @@ void test6() {
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK-LE: @llvm.ppc.altivec.lvxl
res_vs = vec_ldl(0, &param_s);
res_vs = vec_ldl(0, param_s_ld);
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK-LE: @llvm.ppc.altivec.lvxl
@ -1543,7 +1550,7 @@ void test6() {
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK-LE: @llvm.ppc.altivec.lvxl
res_vus = vec_ldl(0, &param_us);
res_vus = vec_ldl(0, param_us_ld);
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK-LE: @llvm.ppc.altivec.lvxl
@ -1559,7 +1566,7 @@ void test6() {
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK-LE: @llvm.ppc.altivec.lvxl
res_vi = vec_ldl(0, &param_i);
res_vi = vec_ldl(0, param_i_ld);
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK-LE: @llvm.ppc.altivec.lvxl
@ -1567,7 +1574,7 @@ void test6() {
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK-LE: @llvm.ppc.altivec.lvxl
res_vui = vec_ldl(0, &param_ui);
res_vui = vec_ldl(0, param_ui_ld);
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK-LE: @llvm.ppc.altivec.lvxl
@ -1579,7 +1586,7 @@ void test6() {
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK-LE: @llvm.ppc.altivec.lvxl
res_vf = vec_ldl(0, &param_f);
res_vf = vec_ldl(0, param_f_ld);
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK-LE: @llvm.ppc.altivec.lvxl
@ -1587,7 +1594,7 @@ void test6() {
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK-LE: @llvm.ppc.altivec.lvxl
res_vsc = vec_lvxl(0, &param_sc);
res_vsc = vec_lvxl(0, param_sc_ld);
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK-LE: @llvm.ppc.altivec.lvxl
@ -1599,7 +1606,7 @@ void test6() {
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK-LE: @llvm.ppc.altivec.lvxl
res_vuc = vec_lvxl(0, &param_uc);
res_vuc = vec_lvxl(0, param_uc_ld);
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK-LE: @llvm.ppc.altivec.lvxl
@ -1607,7 +1614,7 @@ void test6() {
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK-LE: @llvm.ppc.altivec.lvxl
res_vs = vec_lvxl(0, &param_s);
res_vs = vec_lvxl(0, param_s_ld);
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK-LE: @llvm.ppc.altivec.lvxl
@ -1615,7 +1622,7 @@ void test6() {
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK-LE: @llvm.ppc.altivec.lvxl
res_vus = vec_lvxl(0, &param_us);
res_vus = vec_lvxl(0, param_us_ld);
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK-LE: @llvm.ppc.altivec.lvxl
@ -1631,7 +1638,7 @@ void test6() {
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK-LE: @llvm.ppc.altivec.lvxl
res_vi = vec_lvxl(0, &param_i);
res_vi = vec_lvxl(0, param_i_ld);
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK-LE: @llvm.ppc.altivec.lvxl
@ -1639,7 +1646,7 @@ void test6() {
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK-LE: @llvm.ppc.altivec.lvxl
res_vui = vec_lvxl(0, &param_ui);
res_vui = vec_lvxl(0, param_ui_ld);
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK-LE: @llvm.ppc.altivec.lvxl
@ -1651,7 +1658,7 @@ void test6() {
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK-LE: @llvm.ppc.altivec.lvxl
res_vf = vec_lvxl(0, &param_f);
res_vf = vec_lvxl(0, param_f_ld);
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK-LE: @llvm.ppc.altivec.lvxl
@ -1665,12 +1672,12 @@ void test6() {
// CHECK-LE: @llvm.ppc.altivec.vlogefp
/* vec_lvsl */
res_vuc = vec_lvsl(0, &param_i);
res_vuc = vec_lvsl(0, param_i_ld);
// CHECK: @llvm.ppc.altivec.lvsl
// CHECK-LE: @llvm.ppc.altivec.lvsl
/* vec_lvsr */
res_vuc = vec_lvsr(0, &param_i);
res_vuc = vec_lvsr(0, param_i_ld);
// CHECK: @llvm.ppc.altivec.lvsr
// CHECK-LE: @llvm.ppc.altivec.lvsr
@ -6029,7 +6036,7 @@ void test6() {
// CHECK-LE: insertelement <4 x float>
/* vec_lvlx */
res_vsc = vec_lvlx(0, &param_sc);
res_vsc = vec_lvlx(0, param_sc_ld);
// CHECK: @llvm.ppc.altivec.lvx
// CHECK: @llvm.ppc.altivec.lvsl
// CHECK: store <16 x i8> zeroinitializer
@ -6049,7 +6056,7 @@ void test6() {
// CHECK-LE: store <16 x i8> zeroinitializer
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vuc = vec_lvlx(0, &param_uc);
res_vuc = vec_lvlx(0, param_uc_ld);
// CHECK: @llvm.ppc.altivec.lvx
// CHECK: @llvm.ppc.altivec.lvsl
// CHECK: store <16 x i8> zeroinitializer
@ -6079,7 +6086,7 @@ void test6() {
// CHECK-LE: @llvm.ppc.altivec.lvsl
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vs = vec_lvlx(0, &param_s);
res_vs = vec_lvlx(0, param_s_ld);
// CHECK: @llvm.ppc.altivec.lvx
// CHECK: @llvm.ppc.altivec.lvsl
// CHECK: store <8 x i16> zeroinitializer
@ -6099,7 +6106,7 @@ void test6() {
// CHECK-LE: store <8 x i16> zeroinitializer
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vus = vec_lvlx(0, &param_us);
res_vus = vec_lvlx(0, param_us_ld);
// CHECK: @llvm.ppc.altivec.lvx
// CHECK: @llvm.ppc.altivec.lvsl
// CHECK: store <8 x i16> zeroinitializer
@ -6139,7 +6146,7 @@ void test6() {
// CHECK-LE: @llvm.ppc.altivec.lvsl
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vi = vec_lvlx(0, &param_i);
res_vi = vec_lvlx(0, param_i_ld);
// CHECK: @llvm.ppc.altivec.lvx
// CHECK: @llvm.ppc.altivec.lvsl
// CHECK: store <4 x i32> zeroinitializer
@ -6159,7 +6166,7 @@ void test6() {
// CHECK-LE: store <4 x i32> zeroinitializer
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vui = vec_lvlx(0, &param_ui);
res_vui = vec_lvlx(0, param_ui_ld);
// CHECK: @llvm.ppc.altivec.lvx
// CHECK: @llvm.ppc.altivec.lvsl
// CHECK: store <4 x i32> zeroinitializer
@ -6200,7 +6207,7 @@ void test6() {
// CHECK-LE: @llvm.ppc.altivec.vperm
/* vec_lvlxl */
res_vsc = vec_lvlxl(0, &param_sc);
res_vsc = vec_lvlxl(0, param_sc_ld);
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK: @llvm.ppc.altivec.lvsl
// CHECK: store <16 x i8> zeroinitializer
@ -6220,7 +6227,7 @@ void test6() {
// CHECK-LE: store <16 x i8> zeroinitializer
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vuc = vec_lvlxl(0, &param_uc);
res_vuc = vec_lvlxl(0, param_uc_ld);
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK: @llvm.ppc.altivec.lvsl
// CHECK: store <16 x i8> zeroinitializer
@ -6250,7 +6257,7 @@ void test6() {
// CHECK-LE: @llvm.ppc.altivec.lvsl
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vs = vec_lvlxl(0, &param_s);
res_vs = vec_lvlxl(0, param_s_ld);
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK: @llvm.ppc.altivec.lvsl
// CHECK: store <8 x i16> zeroinitializer
@ -6270,7 +6277,7 @@ void test6() {
// CHECK-LE: store <8 x i16> zeroinitializer
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vus = vec_lvlxl(0, &param_us);
res_vus = vec_lvlxl(0, param_us_ld);
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK: @llvm.ppc.altivec.lvsl
// CHECK: store <8 x i16> zeroinitializer
@ -6310,7 +6317,7 @@ void test6() {
// CHECK-LE: @llvm.ppc.altivec.lvsl
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vi = vec_lvlxl(0, &param_i);
res_vi = vec_lvlxl(0, param_i_ld);
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK: @llvm.ppc.altivec.lvsl
// CHECK: store <4 x i32> zeroinitializer
@ -6330,7 +6337,7 @@ void test6() {
// CHECK-LE: store <4 x i32> zeroinitializer
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vui = vec_lvlxl(0, &param_ui);
res_vui = vec_lvlxl(0, param_ui_ld);
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK: @llvm.ppc.altivec.lvsl
// CHECK: store <4 x i32> zeroinitializer
@ -6371,7 +6378,7 @@ void test6() {
// CHECK-LE: @llvm.ppc.altivec.vperm
/* vec_lvrx */
res_vsc = vec_lvrx(0, &param_sc);
res_vsc = vec_lvrx(0, param_sc_ld);
// CHECK: @llvm.ppc.altivec.lvx
// CHECK: @llvm.ppc.altivec.lvsl
// CHECK: store <16 x i8> zeroinitializer
@ -6391,7 +6398,7 @@ void test6() {
// CHECK-LE: store <16 x i8> zeroinitializer
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vuc = vec_lvrx(0, &param_uc);
res_vuc = vec_lvrx(0, param_uc_ld);
// CHECK: @llvm.ppc.altivec.lvx
// CHECK: @llvm.ppc.altivec.lvsl
// CHECK: store <16 x i8> zeroinitializer
@ -6421,7 +6428,7 @@ void test6() {
// CHECK-LE: @llvm.ppc.altivec.lvsl
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vs = vec_lvrx(0, &param_s);
res_vs = vec_lvrx(0, param_s_ld);
// CHECK: @llvm.ppc.altivec.lvx
// CHECK: @llvm.ppc.altivec.lvsl
// CHECK: store <8 x i16> zeroinitializer
@ -6441,7 +6448,7 @@ void test6() {
// CHECK-LE: store <8 x i16> zeroinitializer
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vus = vec_lvrx(0, &param_us);
res_vus = vec_lvrx(0, param_us_ld);
// CHECK: @llvm.ppc.altivec.lvx
// CHECK: @llvm.ppc.altivec.lvsl
// CHECK: store <8 x i16> zeroinitializer
@ -6481,7 +6488,7 @@ void test6() {
// CHECK-LE: @llvm.ppc.altivec.lvsl
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vi = vec_lvrx(0, &param_i);
res_vi = vec_lvrx(0, param_i_ld);
// CHECK: @llvm.ppc.altivec.lvx
// CHECK: @llvm.ppc.altivec.lvsl
// CHECK: store <4 x i32> zeroinitializer
@ -6501,7 +6508,7 @@ void test6() {
// CHECK-LE: store <4 x i32> zeroinitializer
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vui = vec_lvrx(0, &param_ui);
res_vui = vec_lvrx(0, param_ui_ld);
// CHECK: @llvm.ppc.altivec.lvx
// CHECK: @llvm.ppc.altivec.lvsl
// CHECK: store <4 x i32> zeroinitializer
@ -6542,7 +6549,7 @@ void test6() {
// CHECK-LE: @llvm.ppc.altivec.vperm
/* vec_lvrxl */
res_vsc = vec_lvrxl(0, &param_sc);
res_vsc = vec_lvrxl(0, param_sc_ld);
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK: @llvm.ppc.altivec.lvsl
// CHECK: store <16 x i8> zeroinitializer
@ -6562,7 +6569,7 @@ void test6() {
// CHECK-LE: store <16 x i8> zeroinitializer
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vuc = vec_lvrxl(0, &param_uc);
res_vuc = vec_lvrxl(0, param_uc_ld);
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK: @llvm.ppc.altivec.lvsl
// CHECK: store <16 x i8> zeroinitializer
@ -6592,7 +6599,7 @@ void test6() {
// CHECK-LE: @llvm.ppc.altivec.lvsl
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vs = vec_lvrxl(0, &param_s);
res_vs = vec_lvrxl(0, param_s_ld);
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK: @llvm.ppc.altivec.lvsl
// CHECK: store <8 x i16> zeroinitializer
@ -6612,7 +6619,7 @@ void test6() {
// CHECK-LE: store <8 x i16> zeroinitializer
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vus = vec_lvrxl(0, &param_us);
res_vus = vec_lvrxl(0, param_us_ld);
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK: @llvm.ppc.altivec.lvsl
// CHECK: store <8 x i16> zeroinitializer
@ -6652,7 +6659,7 @@ void test6() {
// CHECK-LE: @llvm.ppc.altivec.lvsl
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vi = vec_lvrxl(0, &param_i);
res_vi = vec_lvrxl(0, param_i_ld);
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK: @llvm.ppc.altivec.lvsl
// CHECK: store <4 x i32> zeroinitializer
@ -6672,7 +6679,7 @@ void test6() {
// CHECK-LE: store <4 x i32> zeroinitializer
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vui = vec_lvrxl(0, &param_ui);
res_vui = vec_lvrxl(0, param_ui_ld);
// CHECK: @llvm.ppc.altivec.lvxl
// CHECK: @llvm.ppc.altivec.lvsl
// CHECK: store <4 x i32> zeroinitializer
@ -9354,31 +9361,31 @@ void test8() {
void test9() {
// CHECK-LABEL: define void @test9
// CHECK-LE-LABEL: define void @test9
res_vsc = vec_xl(param_sll, &param_sc);
res_vsc = vec_xl(param_sll, param_sc_ld);
// CHECK: load <16 x i8>, <16 x i8>* %{{[0-9]+}}, align 1
// CHECK-LE: load <16 x i8>, <16 x i8>* %{{[0-9]+}}, align 1
res_vuc = vec_xl(param_sll, &param_uc);
res_vuc = vec_xl(param_sll, param_uc_ld);
// CHECK: load <16 x i8>, <16 x i8>* %{{[0-9]+}}, align 1
// CHECK-LE: load <16 x i8>, <16 x i8>* %{{[0-9]+}}, align 1
res_vs = vec_xl(param_sll, &param_s);
res_vs = vec_xl(param_sll, param_s_ld);
// CHECK: load <8 x i16>, <8 x i16>* %{{[0-9]+}}, align 1
// CHECK-LE: load <8 x i16>, <8 x i16>* %{{[0-9]+}}, align 1
res_vus = vec_xl(param_sll, &param_us);
res_vus = vec_xl(param_sll, param_us_ld);
// CHECK: load <8 x i16>, <8 x i16>* %{{[0-9]+}}, align 1
// CHECK-LE: load <8 x i16>, <8 x i16>* %{{[0-9]+}}, align 1
res_vi = vec_xl(param_sll, &param_i);
res_vi = vec_xl(param_sll, param_i_ld);
// CHECK: load <4 x i32>, <4 x i32>* %{{[0-9]+}}, align 1
// CHECK-LE: load <4 x i32>, <4 x i32>* %{{[0-9]+}}, align 1
res_vui = vec_xl(param_sll, &param_ui);
res_vui = vec_xl(param_sll, param_ui_ld);
// CHECK: load <4 x i32>, <4 x i32>* %{{[0-9]+}}, align 1
// CHECK-LE: load <4 x i32>, <4 x i32>* %{{[0-9]+}}, align 1
res_vf = vec_xl(param_sll, &param_f);
res_vf = vec_xl(param_sll, param_f_ld);
// CHECK: load <4 x float>, <4 x float>* %{{[0-9]+}}, align 1
// CHECK-LE: load <4 x float>, <4 x float>* %{{[0-9]+}}, align 1
}
@ -9420,35 +9427,35 @@ void test10() {
void test11() {
// CHECK-LABEL: define void @test11
// CHECK-LE-LABEL: define void @test11
res_vsc = vec_xl_be(param_sll, &param_sc);
res_vsc = vec_xl_be(param_sll, param_sc_ld);
// CHECK: load <16 x i8>, <16 x i8>* %{{[0-9]+}}, align 1
// CHECK-LE: call <2 x double> @llvm.ppc.vsx.lxvd2x.be(i8* %{{[0-9]+}})
// CHECK-LE: shufflevector <16 x i8> %{{[0-9]+}}, <16 x i8> %{{[0-9]+}}, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
res_vuc = vec_xl_be(param_sll, &param_uc);
res_vuc = vec_xl_be(param_sll, param_uc_ld);
// CHECK: load <16 x i8>, <16 x i8>* %{{[0-9]+}}, align 1
// CHECK-LE: call <2 x double> @llvm.ppc.vsx.lxvd2x.be(i8* %{{[0-9]+}})
// CHECK-LE: shufflevector <16 x i8> %{{[0-9]+}}, <16 x i8> %{{[0-9]+}}, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
res_vs = vec_xl_be(param_sll, &param_s);
res_vs = vec_xl_be(param_sll, param_s_ld);
// CHECK: load <8 x i16>, <8 x i16>* %{{[0-9]+}}, align 1
// CHECK-LE: call <2 x double> @llvm.ppc.vsx.lxvd2x.be(i8* %{{[0-9]+}})
// CHECK-LE: shufflevector <8 x i16> %{{[0-9]+}}, <8 x i16> %{{[0-9]+}}, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
res_vus = vec_xl_be(param_sll, &param_us);
res_vus = vec_xl_be(param_sll, param_us_ld);
// CHECK: load <8 x i16>, <8 x i16>* %{{[0-9]+}}, align 1
// CHECK-LE: call <2 x double> @llvm.ppc.vsx.lxvd2x.be(i8* %{{[0-9]+}})
// CHECK-LE: shufflevector <8 x i16> %{{[0-9]+}}, <8 x i16> %{{[0-9]+}}, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
res_vi = vec_xl_be(param_sll, &param_i);
res_vi = vec_xl_be(param_sll, param_i_ld);
// CHECK: load <4 x i32>, <4 x i32>* %{{[0-9]+}}, align 1
// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.lxvw4x.be(i8* %{{[0-9]+}})
res_vui = vec_xl_be(param_sll, &param_ui);
res_vui = vec_xl_be(param_sll, param_ui_ld);
// CHECK: load <4 x i32>, <4 x i32>* %{{[0-9]+}}, align 1
// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.lxvw4x.be(i8* %{{[0-9]+}})
res_vf = vec_xl_be(param_sll, &param_f);
res_vf = vec_xl_be(param_sll, param_f_ld);
// CHECK: load <4 x float>, <4 x float>* %{{[0-9]+}}, align 1
// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.lxvw4x.be(i8* %{{[0-9]+}})
}

View File

@ -23,11 +23,15 @@ vector double vda, vdb;
signed int *iap;
unsigned int uia, uib, *uiap;
signed char *cap;
unsigned char uca, *ucap;
signed short *sap;
unsigned short usa, *usap;
signed long long *llap, llb;
unsigned long long ulla, *ullap;
unsigned char uca;
const unsigned char *ucap;
const signed short *sap;
unsigned short usa;
const unsigned short *usap;
const signed long long *llap;
signed long long llb;
unsigned long long ulla;
const unsigned long long *ullap;
vector signed long long test_vec_mul_sll(void) {
// CHECK: mul <2 x i64>

View File

@ -17,10 +17,12 @@
// CHECK-NEXT: [[__PTR_ADDR_I:%.*]] = alloca i16*, align 8
// CHECK-NEXT: [[__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK-NEXT: [[C_ADDR:%.*]] = alloca <8 x i16>*, align 8
// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca i16*, align 8
// CHECK-NEXT: [[ST_ADDR:%.*]] = alloca i16*, align 8
// CHECK-NEXT: [[LD_ADDR:%.*]] = alloca i16*, align 8
// CHECK-NEXT: store <8 x i16>* [[C:%.*]], <8 x i16>** [[C_ADDR]], align 8
// CHECK-NEXT: store i16* [[PTR:%.*]], i16** [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load i16*, i16** [[PTR_ADDR]], align 8
// CHECK-NEXT: store i16* [[ST:%.*]], i16** [[ST_ADDR]], align 8
// CHECK-NEXT: store i16* [[LD:%.*]], i16** [[LD_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load i16*, i16** [[LD_ADDR]], align 8
// CHECK-NEXT: store i64 3, i64* [[__OFFSET_ADDR_I]], align 8
// CHECK-NEXT: store i16* [[TMP0]], i16** [[__PTR_ADDR_I]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load i16*, i16** [[__PTR_ADDR_I]], align 8
@ -35,7 +37,7 @@
// CHECK-NEXT: store <8 x i16> [[TMP6]], <8 x i16>* [[TMP7]], align 16
// CHECK-NEXT: [[TMP8:%.*]] = load <8 x i16>*, <8 x i16>** [[C_ADDR]], align 8
// CHECK-NEXT: [[TMP9:%.*]] = load <8 x i16>, <8 x i16>* [[TMP8]], align 16
// CHECK-NEXT: [[TMP10:%.*]] = load i16*, i16** [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP10:%.*]] = load i16*, i16** [[ST_ADDR]], align 8
// CHECK-NEXT: store <8 x i16> [[TMP9]], <8 x i16>* [[__VEC_ADDR_I]], align 16
// CHECK-NEXT: store i64 7, i64* [[__OFFSET_ADDR_I1]], align 8
// CHECK-NEXT: store i16* [[TMP10]], i16** [[__PTR_ADDR_I2]], align 8
@ -50,9 +52,9 @@
// CHECK-NEXT: store <8 x i16> [[TMP14]], <8 x i16>* [[TMP16]], align 1
// CHECK-NEXT: ret void
//
void test1(vector signed short *c, signed short *ptr) {
*c = vec_xl(3ll, ptr);
vec_xst(*c, 7ll, ptr);
void test1(vector signed short *c, signed short *st, const signed short *ld) {
*c = vec_xl(3ll, ld);
vec_xst(*c, 7ll, st);
}
// CHECK-LABEL: @test2(
@ -65,10 +67,12 @@ void test1(vector signed short *c, signed short *ptr) {
// CHECK-NEXT: [[__PTR_ADDR_I:%.*]] = alloca i16*, align 8
// CHECK-NEXT: [[__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK-NEXT: [[C_ADDR:%.*]] = alloca <8 x i16>*, align 8
// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca i16*, align 8
// CHECK-NEXT: [[ST_ADDR:%.*]] = alloca i16*, align 8
// CHECK-NEXT: [[LD_ADDR:%.*]] = alloca i16*, align 8
// CHECK-NEXT: store <8 x i16>* [[C:%.*]], <8 x i16>** [[C_ADDR]], align 8
// CHECK-NEXT: store i16* [[PTR:%.*]], i16** [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load i16*, i16** [[PTR_ADDR]], align 8
// CHECK-NEXT: store i16* [[ST:%.*]], i16** [[ST_ADDR]], align 8
// CHECK-NEXT: store i16* [[LD:%.*]], i16** [[LD_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load i16*, i16** [[LD_ADDR]], align 8
// CHECK-NEXT: store i64 3, i64* [[__OFFSET_ADDR_I]], align 8
// CHECK-NEXT: store i16* [[TMP0]], i16** [[__PTR_ADDR_I]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load i16*, i16** [[__PTR_ADDR_I]], align 8
@ -83,7 +87,7 @@ void test1(vector signed short *c, signed short *ptr) {
// CHECK-NEXT: store <8 x i16> [[TMP6]], <8 x i16>* [[TMP7]], align 16
// CHECK-NEXT: [[TMP8:%.*]] = load <8 x i16>*, <8 x i16>** [[C_ADDR]], align 8
// CHECK-NEXT: [[TMP9:%.*]] = load <8 x i16>, <8 x i16>* [[TMP8]], align 16
// CHECK-NEXT: [[TMP10:%.*]] = load i16*, i16** [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP10:%.*]] = load i16*, i16** [[ST_ADDR]], align 8
// CHECK-NEXT: store <8 x i16> [[TMP9]], <8 x i16>* [[__VEC_ADDR_I]], align 16
// CHECK-NEXT: store i64 7, i64* [[__OFFSET_ADDR_I1]], align 8
// CHECK-NEXT: store i16* [[TMP10]], i16** [[__PTR_ADDR_I2]], align 8
@ -98,9 +102,10 @@ void test1(vector signed short *c, signed short *ptr) {
// CHECK-NEXT: store <8 x i16> [[TMP14]], <8 x i16>* [[TMP16]], align 1
// CHECK-NEXT: ret void
//
void test2(vector unsigned short *c, unsigned short *ptr) {
*c = vec_xl(3ll, ptr);
vec_xst(*c, 7ll, ptr);
void test2(vector unsigned short *c, unsigned short *st,
const unsigned short *ld) {
*c = vec_xl(3ll, ld);
vec_xst(*c, 7ll, st);
}
// CHECK-LABEL: @test3(
@ -113,10 +118,12 @@ void test2(vector unsigned short *c, unsigned short *ptr) {
// CHECK-NEXT: [[__PTR_ADDR_I:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK-NEXT: [[C_ADDR:%.*]] = alloca <4 x i32>*, align 8
// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[ST_ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[LD_ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: store <4 x i32>* [[C:%.*]], <4 x i32>** [[C_ADDR]], align 8
// CHECK-NEXT: store i32* [[PTR:%.*]], i32** [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load i32*, i32** [[PTR_ADDR]], align 8
// CHECK-NEXT: store i32* [[ST:%.*]], i32** [[ST_ADDR]], align 8
// CHECK-NEXT: store i32* [[LD:%.*]], i32** [[LD_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load i32*, i32** [[LD_ADDR]], align 8
// CHECK-NEXT: store i64 3, i64* [[__OFFSET_ADDR_I]], align 8
// CHECK-NEXT: store i32* [[TMP0]], i32** [[__PTR_ADDR_I]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load i32*, i32** [[__PTR_ADDR_I]], align 8
@ -131,7 +138,7 @@ void test2(vector unsigned short *c, unsigned short *ptr) {
// CHECK-NEXT: store <4 x i32> [[TMP6]], <4 x i32>* [[TMP7]], align 16
// CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>*, <4 x i32>** [[C_ADDR]], align 8
// CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, <4 x i32>* [[TMP8]], align 16
// CHECK-NEXT: [[TMP10:%.*]] = load i32*, i32** [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP10:%.*]] = load i32*, i32** [[ST_ADDR]], align 8
// CHECK-NEXT: store <4 x i32> [[TMP9]], <4 x i32>* [[__VEC_ADDR_I]], align 16
// CHECK-NEXT: store i64 7, i64* [[__OFFSET_ADDR_I1]], align 8
// CHECK-NEXT: store i32* [[TMP10]], i32** [[__PTR_ADDR_I2]], align 8
@ -146,9 +153,9 @@ void test2(vector unsigned short *c, unsigned short *ptr) {
// CHECK-NEXT: store <4 x i32> [[TMP14]], <4 x i32>* [[TMP16]], align 1
// CHECK-NEXT: ret void
//
void test3(vector signed int *c, signed int *ptr) {
*c = vec_xl(3ll, ptr);
vec_xst(*c, 7ll, ptr);
void test3(vector signed int *c, signed int *st, const signed int *ld) {
*c = vec_xl(3ll, ld);
vec_xst(*c, 7ll, st);
}
// CHECK-LABEL: @test4(
@ -161,10 +168,12 @@ void test3(vector signed int *c, signed int *ptr) {
// CHECK-NEXT: [[__PTR_ADDR_I:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK-NEXT: [[C_ADDR:%.*]] = alloca <4 x i32>*, align 8
// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[ST_ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[LD_ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: store <4 x i32>* [[C:%.*]], <4 x i32>** [[C_ADDR]], align 8
// CHECK-NEXT: store i32* [[PTR:%.*]], i32** [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load i32*, i32** [[PTR_ADDR]], align 8
// CHECK-NEXT: store i32* [[ST:%.*]], i32** [[ST_ADDR]], align 8
// CHECK-NEXT: store i32* [[LD:%.*]], i32** [[LD_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load i32*, i32** [[LD_ADDR]], align 8
// CHECK-NEXT: store i64 3, i64* [[__OFFSET_ADDR_I]], align 8
// CHECK-NEXT: store i32* [[TMP0]], i32** [[__PTR_ADDR_I]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load i32*, i32** [[__PTR_ADDR_I]], align 8
@ -179,7 +188,7 @@ void test3(vector signed int *c, signed int *ptr) {
// CHECK-NEXT: store <4 x i32> [[TMP6]], <4 x i32>* [[TMP7]], align 16
// CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>*, <4 x i32>** [[C_ADDR]], align 8
// CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, <4 x i32>* [[TMP8]], align 16
// CHECK-NEXT: [[TMP10:%.*]] = load i32*, i32** [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP10:%.*]] = load i32*, i32** [[ST_ADDR]], align 8
// CHECK-NEXT: store <4 x i32> [[TMP9]], <4 x i32>* [[__VEC_ADDR_I]], align 16
// CHECK-NEXT: store i64 7, i64* [[__OFFSET_ADDR_I1]], align 8
// CHECK-NEXT: store i32* [[TMP10]], i32** [[__PTR_ADDR_I2]], align 8
@ -194,9 +203,9 @@ void test3(vector signed int *c, signed int *ptr) {
// CHECK-NEXT: store <4 x i32> [[TMP14]], <4 x i32>* [[TMP16]], align 1
// CHECK-NEXT: ret void
//
void test4(vector unsigned int *c, unsigned int *ptr) {
*c = vec_xl(3ll, ptr);
vec_xst(*c, 7ll, ptr);
void test4(vector unsigned int *c, unsigned int *st, const unsigned int *ld) {
*c = vec_xl(3ll, ld);
vec_xst(*c, 7ll, st);
}
// CHECK-LABEL: @test5(
@ -209,10 +218,12 @@ void test4(vector unsigned int *c, unsigned int *ptr) {
// CHECK-NEXT: [[__PTR_ADDR_I:%.*]] = alloca i64*, align 8
// CHECK-NEXT: [[__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK-NEXT: [[C_ADDR:%.*]] = alloca <2 x i64>*, align 8
// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca i64*, align 8
// CHECK-NEXT: [[ST_ADDR:%.*]] = alloca i64*, align 8
// CHECK-NEXT: [[LD_ADDR:%.*]] = alloca i64*, align 8
// CHECK-NEXT: store <2 x i64>* [[C:%.*]], <2 x i64>** [[C_ADDR]], align 8
// CHECK-NEXT: store i64* [[PTR:%.*]], i64** [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load i64*, i64** [[PTR_ADDR]], align 8
// CHECK-NEXT: store i64* [[ST:%.*]], i64** [[ST_ADDR]], align 8
// CHECK-NEXT: store i64* [[LD:%.*]], i64** [[LD_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load i64*, i64** [[LD_ADDR]], align 8
// CHECK-NEXT: store i64 3, i64* [[__OFFSET_ADDR_I]], align 8
// CHECK-NEXT: store i64* [[TMP0]], i64** [[__PTR_ADDR_I]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load i64*, i64** [[__PTR_ADDR_I]], align 8
@ -227,7 +238,7 @@ void test4(vector unsigned int *c, unsigned int *ptr) {
// CHECK-NEXT: store <2 x i64> [[TMP6]], <2 x i64>* [[TMP7]], align 16
// CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>*, <2 x i64>** [[C_ADDR]], align 8
// CHECK-NEXT: [[TMP9:%.*]] = load <2 x i64>, <2 x i64>* [[TMP8]], align 16
// CHECK-NEXT: [[TMP10:%.*]] = load i64*, i64** [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP10:%.*]] = load i64*, i64** [[ST_ADDR]], align 8
// CHECK-NEXT: store <2 x i64> [[TMP9]], <2 x i64>* [[__VEC_ADDR_I]], align 16
// CHECK-NEXT: store i64 7, i64* [[__OFFSET_ADDR_I1]], align 8
// CHECK-NEXT: store i64* [[TMP10]], i64** [[__PTR_ADDR_I2]], align 8
@ -242,9 +253,10 @@ void test4(vector unsigned int *c, unsigned int *ptr) {
// CHECK-NEXT: store <2 x i64> [[TMP14]], <2 x i64>* [[TMP16]], align 1
// CHECK-NEXT: ret void
//
void test5(vector signed long long *c, signed long long *ptr) {
*c = vec_xl(3ll, ptr);
vec_xst(*c, 7ll, ptr);
void test5(vector signed long long *c, signed long long *st,
const signed long long *ld) {
*c = vec_xl(3ll, ld);
vec_xst(*c, 7ll, st);
}
// CHECK-LABEL: @test6(
@ -257,10 +269,12 @@ void test5(vector signed long long *c, signed long long *ptr) {
// CHECK-NEXT: [[__PTR_ADDR_I:%.*]] = alloca i64*, align 8
// CHECK-NEXT: [[__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK-NEXT: [[C_ADDR:%.*]] = alloca <2 x i64>*, align 8
// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca i64*, align 8
// CHECK-NEXT: [[ST_ADDR:%.*]] = alloca i64*, align 8
// CHECK-NEXT: [[LD_ADDR:%.*]] = alloca i64*, align 8
// CHECK-NEXT: store <2 x i64>* [[C:%.*]], <2 x i64>** [[C_ADDR]], align 8
// CHECK-NEXT: store i64* [[PTR:%.*]], i64** [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load i64*, i64** [[PTR_ADDR]], align 8
// CHECK-NEXT: store i64* [[ST:%.*]], i64** [[ST_ADDR]], align 8
// CHECK-NEXT: store i64* [[LD:%.*]], i64** [[LD_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load i64*, i64** [[LD_ADDR]], align 8
// CHECK-NEXT: store i64 3, i64* [[__OFFSET_ADDR_I]], align 8
// CHECK-NEXT: store i64* [[TMP0]], i64** [[__PTR_ADDR_I]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load i64*, i64** [[__PTR_ADDR_I]], align 8
@ -275,7 +289,7 @@ void test5(vector signed long long *c, signed long long *ptr) {
// CHECK-NEXT: store <2 x i64> [[TMP6]], <2 x i64>* [[TMP7]], align 16
// CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>*, <2 x i64>** [[C_ADDR]], align 8
// CHECK-NEXT: [[TMP9:%.*]] = load <2 x i64>, <2 x i64>* [[TMP8]], align 16
// CHECK-NEXT: [[TMP10:%.*]] = load i64*, i64** [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP10:%.*]] = load i64*, i64** [[ST_ADDR]], align 8
// CHECK-NEXT: store <2 x i64> [[TMP9]], <2 x i64>* [[__VEC_ADDR_I]], align 16
// CHECK-NEXT: store i64 7, i64* [[__OFFSET_ADDR_I1]], align 8
// CHECK-NEXT: store i64* [[TMP10]], i64** [[__PTR_ADDR_I2]], align 8
@ -290,9 +304,10 @@ void test5(vector signed long long *c, signed long long *ptr) {
// CHECK-NEXT: store <2 x i64> [[TMP14]], <2 x i64>* [[TMP16]], align 1
// CHECK-NEXT: ret void
//
void test6(vector unsigned long long *c, unsigned long long *ptr) {
*c = vec_xl(3ll, ptr);
vec_xst(*c, 7ll, ptr);
void test6(vector unsigned long long *c, unsigned long long *st,
const unsigned long long *ld) {
*c = vec_xl(3ll, ld);
vec_xst(*c, 7ll, st);
}
// CHECK-LABEL: @test7(
@ -305,10 +320,12 @@ void test6(vector unsigned long long *c, unsigned long long *ptr) {
// CHECK-NEXT: [[__PTR_ADDR_I:%.*]] = alloca float*, align 8
// CHECK-NEXT: [[__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK-NEXT: [[C_ADDR:%.*]] = alloca <4 x float>*, align 8
// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca float*, align 8
// CHECK-NEXT: [[ST_ADDR:%.*]] = alloca float*, align 8
// CHECK-NEXT: [[LD_ADDR:%.*]] = alloca float*, align 8
// CHECK-NEXT: store <4 x float>* [[C:%.*]], <4 x float>** [[C_ADDR]], align 8
// CHECK-NEXT: store float* [[PTR:%.*]], float** [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load float*, float** [[PTR_ADDR]], align 8
// CHECK-NEXT: store float* [[ST:%.*]], float** [[ST_ADDR]], align 8
// CHECK-NEXT: store float* [[LD:%.*]], float** [[LD_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load float*, float** [[LD_ADDR]], align 8
// CHECK-NEXT: store i64 3, i64* [[__OFFSET_ADDR_I]], align 8
// CHECK-NEXT: store float* [[TMP0]], float** [[__PTR_ADDR_I]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load float*, float** [[__PTR_ADDR_I]], align 8
@ -323,7 +340,7 @@ void test6(vector unsigned long long *c, unsigned long long *ptr) {
// CHECK-NEXT: store <4 x float> [[TMP6]], <4 x float>* [[TMP7]], align 16
// CHECK-NEXT: [[TMP8:%.*]] = load <4 x float>*, <4 x float>** [[C_ADDR]], align 8
// CHECK-NEXT: [[TMP9:%.*]] = load <4 x float>, <4 x float>* [[TMP8]], align 16
// CHECK-NEXT: [[TMP10:%.*]] = load float*, float** [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP10:%.*]] = load float*, float** [[ST_ADDR]], align 8
// CHECK-NEXT: store <4 x float> [[TMP9]], <4 x float>* [[__VEC_ADDR_I]], align 16
// CHECK-NEXT: store i64 7, i64* [[__OFFSET_ADDR_I1]], align 8
// CHECK-NEXT: store float* [[TMP10]], float** [[__PTR_ADDR_I2]], align 8
@ -338,9 +355,9 @@ void test6(vector unsigned long long *c, unsigned long long *ptr) {
// CHECK-NEXT: store <4 x float> [[TMP14]], <4 x float>* [[TMP16]], align 1
// CHECK-NEXT: ret void
//
void test7(vector float *c, float *ptr) {
*c = vec_xl(3ll, ptr);
vec_xst(*c, 7ll, ptr);
void test7(vector float *c, float *st, const float *ld) {
*c = vec_xl(3ll, ld);
vec_xst(*c, 7ll, st);
}
// CHECK-LABEL: @test8(
@ -353,10 +370,12 @@ void test7(vector float *c, float *ptr) {
// CHECK-NEXT: [[__PTR_ADDR_I:%.*]] = alloca double*, align 8
// CHECK-NEXT: [[__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK-NEXT: [[C_ADDR:%.*]] = alloca <2 x double>*, align 8
// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca double*, align 8
// CHECK-NEXT: [[ST_ADDR:%.*]] = alloca double*, align 8
// CHECK-NEXT: [[LD_ADDR:%.*]] = alloca double*, align 8
// CHECK-NEXT: store <2 x double>* [[C:%.*]], <2 x double>** [[C_ADDR]], align 8
// CHECK-NEXT: store double* [[PTR:%.*]], double** [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load double*, double** [[PTR_ADDR]], align 8
// CHECK-NEXT: store double* [[ST:%.*]], double** [[ST_ADDR]], align 8
// CHECK-NEXT: store double* [[LD:%.*]], double** [[LD_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load double*, double** [[LD_ADDR]], align 8
// CHECK-NEXT: store i64 3, i64* [[__OFFSET_ADDR_I]], align 8
// CHECK-NEXT: store double* [[TMP0]], double** [[__PTR_ADDR_I]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load double*, double** [[__PTR_ADDR_I]], align 8
@ -371,7 +390,7 @@ void test7(vector float *c, float *ptr) {
// CHECK-NEXT: store <2 x double> [[TMP6]], <2 x double>* [[TMP7]], align 16
// CHECK-NEXT: [[TMP8:%.*]] = load <2 x double>*, <2 x double>** [[C_ADDR]], align 8
// CHECK-NEXT: [[TMP9:%.*]] = load <2 x double>, <2 x double>* [[TMP8]], align 16
// CHECK-NEXT: [[TMP10:%.*]] = load double*, double** [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP10:%.*]] = load double*, double** [[ST_ADDR]], align 8
// CHECK-NEXT: store <2 x double> [[TMP9]], <2 x double>* [[__VEC_ADDR_I]], align 16
// CHECK-NEXT: store i64 7, i64* [[__OFFSET_ADDR_I1]], align 8
// CHECK-NEXT: store double* [[TMP10]], double** [[__PTR_ADDR_I2]], align 8
@ -386,9 +405,9 @@ void test7(vector float *c, float *ptr) {
// CHECK-NEXT: store <2 x double> [[TMP14]], <2 x double>* [[TMP16]], align 1
// CHECK-NEXT: ret void
//
void test8(vector double *c, double *ptr) {
*c = vec_xl(3ll, ptr);
vec_xst(*c, 7ll, ptr);
void test8(vector double *c, double *st, const double *ld) {
*c = vec_xl(3ll, ld);
vec_xst(*c, 7ll, st);
}
#ifdef __POWER8_VECTOR__
@ -402,10 +421,12 @@ void test8(vector double *c, double *ptr) {
// CHECK-P8-NEXT: [[__PTR_ADDR_I:%.*]] = alloca i128*, align 8
// CHECK-P8-NEXT: [[__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK-P8-NEXT: [[C_ADDR:%.*]] = alloca <1 x i128>*, align 8
// CHECK-P8-NEXT: [[PTR_ADDR:%.*]] = alloca i128*, align 8
// CHECK-P8-NEXT: [[ST_ADDR:%.*]] = alloca i128*, align 8
// CHECK-P8-NEXT: [[LD_ADDR:%.*]] = alloca i128*, align 8
// CHECK-P8-NEXT: store <1 x i128>* [[C:%.*]], <1 x i128>** [[C_ADDR]], align 8
// CHECK-P8-NEXT: store i128* [[PTR:%.*]], i128** [[PTR_ADDR]], align 8
// CHECK-P8-NEXT: [[TMP0:%.*]] = load i128*, i128** [[PTR_ADDR]], align 8
// CHECK-P8-NEXT: store i128* [[ST:%.*]], i128** [[ST_ADDR]], align 8
// CHECK-P8-NEXT: store i128* [[LD:%.*]], i128** [[LD_ADDR]], align 8
// CHECK-P8-NEXT: [[TMP0:%.*]] = load i128*, i128** [[LD_ADDR]], align 8
// CHECK-P8-NEXT: store i64 3, i64* [[__OFFSET_ADDR_I]], align 8
// CHECK-P8-NEXT: store i128* [[TMP0]], i128** [[__PTR_ADDR_I]], align 8
// CHECK-P8-NEXT: [[TMP1:%.*]] = load i128*, i128** [[__PTR_ADDR_I]], align 8
@ -420,7 +441,7 @@ void test8(vector double *c, double *ptr) {
// CHECK-P8-NEXT: store <1 x i128> [[TMP6]], <1 x i128>* [[TMP7]], align 16
// CHECK-P8-NEXT: [[TMP8:%.*]] = load <1 x i128>*, <1 x i128>** [[C_ADDR]], align 8
// CHECK-P8-NEXT: [[TMP9:%.*]] = load <1 x i128>, <1 x i128>* [[TMP8]], align 16
// CHECK-P8-NEXT: [[TMP10:%.*]] = load i128*, i128** [[PTR_ADDR]], align 8
// CHECK-P8-NEXT: [[TMP10:%.*]] = load i128*, i128** [[ST_ADDR]], align 8
// CHECK-P8-NEXT: store <1 x i128> [[TMP9]], <1 x i128>* [[__VEC_ADDR_I]], align 16
// CHECK-P8-NEXT: store i64 7, i64* [[__OFFSET_ADDR_I1]], align 8
// CHECK-P8-NEXT: store i128* [[TMP10]], i128** [[__PTR_ADDR_I2]], align 8
@ -435,9 +456,10 @@ void test8(vector double *c, double *ptr) {
// CHECK-P8-NEXT: store <1 x i128> [[TMP14]], <1 x i128>* [[TMP16]], align 1
// CHECK-P8-NEXT: ret void
//
void test9(vector signed __int128 *c, signed __int128 *ptr) {
*c = vec_xl(3ll, ptr);
vec_xst(*c, 7ll, ptr);
void test9(vector signed __int128 *c, signed __int128 *st,
const signed __int128 *ld) {
*c = vec_xl(3ll, ld);
vec_xst(*c, 7ll, st);
}
// CHECK-P8-LABEL: @test10(
@ -450,10 +472,12 @@ void test9(vector signed __int128 *c, signed __int128 *ptr) {
// CHECK-P8-NEXT: [[__PTR_ADDR_I:%.*]] = alloca i128*, align 8
// CHECK-P8-NEXT: [[__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK-P8-NEXT: [[C_ADDR:%.*]] = alloca <1 x i128>*, align 8
// CHECK-P8-NEXT: [[PTR_ADDR:%.*]] = alloca i128*, align 8
// CHECK-P8-NEXT: [[ST_ADDR:%.*]] = alloca i128*, align 8
// CHECK-P8-NEXT: [[LD_ADDR:%.*]] = alloca i128*, align 8
// CHECK-P8-NEXT: store <1 x i128>* [[C:%.*]], <1 x i128>** [[C_ADDR]], align 8
// CHECK-P8-NEXT: store i128* [[PTR:%.*]], i128** [[PTR_ADDR]], align 8
// CHECK-P8-NEXT: [[TMP0:%.*]] = load i128*, i128** [[PTR_ADDR]], align 8
// CHECK-P8-NEXT: store i128* [[ST:%.*]], i128** [[ST_ADDR]], align 8
// CHECK-P8-NEXT: store i128* [[LD:%.*]], i128** [[LD_ADDR]], align 8
// CHECK-P8-NEXT: [[TMP0:%.*]] = load i128*, i128** [[LD_ADDR]], align 8
// CHECK-P8-NEXT: store i64 3, i64* [[__OFFSET_ADDR_I]], align 8
// CHECK-P8-NEXT: store i128* [[TMP0]], i128** [[__PTR_ADDR_I]], align 8
// CHECK-P8-NEXT: [[TMP1:%.*]] = load i128*, i128** [[__PTR_ADDR_I]], align 8
@ -468,7 +492,7 @@ void test9(vector signed __int128 *c, signed __int128 *ptr) {
// CHECK-P8-NEXT: store <1 x i128> [[TMP6]], <1 x i128>* [[TMP7]], align 16
// CHECK-P8-NEXT: [[TMP8:%.*]] = load <1 x i128>*, <1 x i128>** [[C_ADDR]], align 8
// CHECK-P8-NEXT: [[TMP9:%.*]] = load <1 x i128>, <1 x i128>* [[TMP8]], align 16
// CHECK-P8-NEXT: [[TMP10:%.*]] = load i128*, i128** [[PTR_ADDR]], align 8
// CHECK-P8-NEXT: [[TMP10:%.*]] = load i128*, i128** [[ST_ADDR]], align 8
// CHECK-P8-NEXT: store <1 x i128> [[TMP9]], <1 x i128>* [[__VEC_ADDR_I]], align 16
// CHECK-P8-NEXT: store i64 7, i64* [[__OFFSET_ADDR_I1]], align 8
// CHECK-P8-NEXT: store i128* [[TMP10]], i128** [[__PTR_ADDR_I2]], align 8
@ -483,8 +507,9 @@ void test9(vector signed __int128 *c, signed __int128 *ptr) {
// CHECK-P8-NEXT: store <1 x i128> [[TMP14]], <1 x i128>* [[TMP16]], align 1
// CHECK-P8-NEXT: ret void
//
void test10(vector unsigned __int128 *c, unsigned __int128 *ptr) {
*c = vec_xl(3ll, ptr);
vec_xst(*c, 7ll, ptr);
void test10(vector unsigned __int128 *c, unsigned __int128 *st,
const unsigned __int128 *ld) {
*c = vec_xl(3ll, ld);
vec_xst(*c, 7ll, st);
}
#endif