[Instcombine][NFC]Add a test for reduce+([sext/zext](<n x i1)) case, NFC.
This commit is contained in:
parent
cc92833f8a
commit
9320d4b695
|
@ -0,0 +1,93 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
||||
; RUN: opt < %s -instcombine -S | FileCheck %s
|
||||
|
||||
define i1 @reduce_add_self(<8 x i1> %x) {
|
||||
; CHECK-LABEL: @reduce_add_self(
|
||||
; CHECK-NEXT: [[RES:%.*]] = call i1 @llvm.vector.reduce.add.v8i1(<8 x i1> [[X:%.*]])
|
||||
; CHECK-NEXT: ret i1 [[RES]]
|
||||
;
|
||||
%res = call i1 @llvm.vector.reduce.add.v8i32(<8 x i1> %x)
|
||||
ret i1 %res
|
||||
}
|
||||
|
||||
define i32 @reduce_add_sext(<4 x i1> %x) {
|
||||
; CHECK-LABEL: @reduce_add_sext(
|
||||
; CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[X:%.*]] to <4 x i32>
|
||||
; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[SEXT]])
|
||||
; CHECK-NEXT: ret i32 [[RES]]
|
||||
;
|
||||
%sext = sext <4 x i1> %x to <4 x i32>
|
||||
%res = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %sext)
|
||||
ret i32 %res
|
||||
}
|
||||
|
||||
define i64 @reduce_add_zext(<8 x i1> %x) {
|
||||
; CHECK-LABEL: @reduce_add_zext(
|
||||
; CHECK-NEXT: [[ZEXT:%.*]] = zext <8 x i1> [[X:%.*]] to <8 x i64>
|
||||
; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[ZEXT]])
|
||||
; CHECK-NEXT: ret i64 [[RES]]
|
||||
;
|
||||
%zext = zext <8 x i1> %x to <8 x i64>
|
||||
%res = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %zext)
|
||||
ret i64 %res
|
||||
}
|
||||
|
||||
define i16 @reduce_add_sext_same(<16 x i1> %x) {
|
||||
; CHECK-LABEL: @reduce_add_sext_same(
|
||||
; CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[X:%.*]] to <16 x i16>
|
||||
; CHECK-NEXT: [[RES:%.*]] = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> [[SEXT]])
|
||||
; CHECK-NEXT: ret i16 [[RES]]
|
||||
;
|
||||
%sext = sext <16 x i1> %x to <16 x i16>
|
||||
%res = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %sext)
|
||||
ret i16 %res
|
||||
}
|
||||
|
||||
define i8 @reduce_add_zext_long(<128 x i1> %x) {
|
||||
; CHECK-LABEL: @reduce_add_zext_long(
|
||||
; CHECK-NEXT: [[SEXT:%.*]] = sext <128 x i1> [[X:%.*]] to <128 x i8>
|
||||
; CHECK-NEXT: [[RES:%.*]] = call i8 @llvm.vector.reduce.add.v128i8(<128 x i8> [[SEXT]])
|
||||
; CHECK-NEXT: ret i8 [[RES]]
|
||||
;
|
||||
%sext = sext <128 x i1> %x to <128 x i8>
|
||||
%res = call i8 @llvm.vector.reduce.add.v128i8(<128 x i8> %sext)
|
||||
ret i8 %res
|
||||
}
|
||||
|
||||
@glob = external global i8, align 1
|
||||
define i8 @reduce_add_zext_long_external_use(<128 x i1> %x) {
|
||||
; CHECK-LABEL: @reduce_add_zext_long_external_use(
|
||||
; CHECK-NEXT: [[SEXT:%.*]] = sext <128 x i1> [[X:%.*]] to <128 x i8>
|
||||
; CHECK-NEXT: [[RES:%.*]] = call i8 @llvm.vector.reduce.add.v128i8(<128 x i8> [[SEXT]])
|
||||
; CHECK-NEXT: [[EXT:%.*]] = extractelement <128 x i8> [[SEXT]], i32 0
|
||||
; CHECK-NEXT: store i8 [[EXT]], i8* @glob, align 1
|
||||
; CHECK-NEXT: ret i8 [[RES]]
|
||||
;
|
||||
%sext = sext <128 x i1> %x to <128 x i8>
|
||||
%res = call i8 @llvm.vector.reduce.add.v128i8(<128 x i8> %sext)
|
||||
%ext = extractelement <128 x i8> %sext, i32 0
|
||||
store i8 %ext, i8* @glob, align 1
|
||||
ret i8 %res
|
||||
}
|
||||
|
||||
@glob1 = external global i64, align 8
|
||||
define i64 @reduce_add_zext_external_use(<8 x i1> %x) {
|
||||
; CHECK-LABEL: @reduce_add_zext_external_use(
|
||||
; CHECK-NEXT: [[ZEXT:%.*]] = zext <8 x i1> [[X:%.*]] to <8 x i64>
|
||||
; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[ZEXT]])
|
||||
; CHECK-NEXT: [[EXT:%.*]] = extractelement <8 x i64> [[ZEXT]], i32 0
|
||||
; CHECK-NEXT: store i64 [[EXT]], i64* @glob1, align 8
|
||||
; CHECK-NEXT: ret i64 [[RES]]
|
||||
;
|
||||
%zext = zext <8 x i1> %x to <8 x i64>
|
||||
%res = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %zext)
|
||||
%ext = extractelement <8 x i64> %zext, i32 0
|
||||
store i64 %ext, i64* @glob1, align 8
|
||||
ret i64 %res
|
||||
}
|
||||
|
||||
declare i1 @llvm.vector.reduce.add.v8i32(<8 x i1> %a)
|
||||
declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %a)
|
||||
declare i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %a)
|
||||
declare i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %a)
|
||||
declare i8 @llvm.vector.reduce.add.v128i8(<128 x i8> %a)
|
Loading…
Reference in New Issue