[AVX-512] Make VEX encoded FMA instructions available when AVX512 is enabled regardless of whether +fma was added on the command line.

We weren't able to handle isel of the 128/256-bit FMA instructions when AVX512F was enabled but VLX and FMA weren't.

I didn't mask FeatureAVX512 imply FeatureFMA as I wasn't sure I wanted disabling FMA to also disable AVX512. Instead we just can't prevent FMA instructions if AVX512 is enabled.

Another option would be to promote 128/256-bit to 512-bit, do the operation and extract it. But that requires a lot of extra isel patterns. Since no CPUs exist that support AVX512, but not FMA just using the VEX instructions seems better.

llvm-svn: 298051
This commit is contained in:
Craig Topper 2017-03-17 07:37:31 +00:00
parent 02cd0bfa46
commit a8d4097445
2 changed files with 234 additions and 60 deletions

View File

@ -435,9 +435,9 @@ public:
bool hasPCLMUL() const { return HasPCLMUL; }
// Prefer FMA4 to FMA - its better for commutation/memory folding and
// has equal or better performance on all supported targets.
bool hasFMA() const { return HasFMA && !HasFMA4; }
bool hasFMA() const { return (HasFMA || hasAVX512()) && !HasFMA4; }
bool hasFMA4() const { return HasFMA4; }
bool hasAnyFMA() const { return hasFMA() || hasFMA4() || hasAVX512(); }
bool hasAnyFMA() const { return hasFMA() || hasFMA4(); }
bool hasXOP() const { return HasXOP; }
bool hasTBM() const { return HasTBM; }
bool hasMOVBE() const { return HasMOVBE; }

View File

@ -1,10 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i386-apple-darwin10 -mattr=+sse2,+fma,-fma4 -show-mc-encoding | FileCheck %s --check-prefix=FMA32
; RUN: llc < %s -mtriple=i386-apple-darwin10 -mattr=+sse2,-fma,-fma4 -show-mc-encoding | FileCheck %s --check-prefix=FMACALL32
; RUN: llc < %s -mtriple=i386-apple-darwin10 -mattr=+avx,+fma,-fma4 -show-mc-encoding | FileCheck %s --check-prefix=FMA32
; RUN: llc < %s -mtriple=i386-apple-darwin10 -mattr=+avx,-fma,-fma4 -show-mc-encoding | FileCheck %s --check-prefix=FMACALL32
; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -mattr=+fma,-fma4 -show-mc-encoding | FileCheck %s --check-prefix=FMA64
; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -mattr=-fma,-fma4 -show-mc-encoding | FileCheck %s --check-prefix=FMACALL64
; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -mattr=+avx512f,-fma,-fma4 -show-mc-encoding | FileCheck %s --check-prefix=AVX51264NOFMA
; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -mattr=+avx512f,fma,-fma4 -show-mc-encoding | FileCheck %s --check-prefix=AVX51264
; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -mattr=+avx512f,-fma4 -show-mc-encoding | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX51264
; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -mattr=+avx512vl,-fma4 -show-mc-encoding | FileCheck %s --check-prefix=AVX512VL
; RUN: llc < %s -mtriple=i386-apple-darwin10 -mcpu=bdver2 -mattr=-fma4 -show-mc-encoding | FileCheck %s --check-prefix=FMA32
; RUN: llc < %s -mtriple=i386-apple-darwin10 -mcpu=bdver2 -mattr=-fma,-fma4 -show-mc-encoding | FileCheck %s --check-prefix=FMACALL32
@ -39,15 +39,15 @@ define float @test_f32(float %a, float %b, float %c) #0 {
; FMACALL64-NEXT: ## encoding: [0xeb,A]
; FMACALL64-NEXT: ## fixup A - offset: 1, value: _fmaf-1, kind: FK_PCRel_1
;
; AVX51264NOFMA-LABEL: test_f32:
; AVX51264NOFMA: ## BB#0: ## %entry
; AVX51264NOFMA-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa9,0xc2]
; AVX51264NOFMA-NEXT: retq ## encoding: [0xc3]
; AVX512-LABEL: test_f32:
; AVX512: ## BB#0: ## %entry
; AVX512-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa9,0xc2]
; AVX512-NEXT: retq ## encoding: [0xc3]
;
; AVX51264-LABEL: test_f32:
; AVX51264: ## BB#0: ## %entry
; AVX51264-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa9,0xc2]
; AVX51264-NEXT: retq ## encoding: [0xc3]
; AVX512VL-LABEL: test_f32:
; AVX512VL: ## BB#0: ## %entry
; AVX512VL-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa9,0xc2]
; AVX512VL-NEXT: retq ## encoding: [0xc3]
entry:
%call = call float @llvm.fma.f32(float %a, float %b, float %c)
ret float %call
@ -84,15 +84,15 @@ define double @test_f64(double %a, double %b, double %c) #0 {
; FMACALL64-NEXT: ## encoding: [0xeb,A]
; FMACALL64-NEXT: ## fixup A - offset: 1, value: _fma-1, kind: FK_PCRel_1
;
; AVX51264NOFMA-LABEL: test_f64:
; AVX51264NOFMA: ## BB#0: ## %entry
; AVX51264NOFMA-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa9,0xc2]
; AVX51264NOFMA-NEXT: retq ## encoding: [0xc3]
; AVX512-LABEL: test_f64:
; AVX512: ## BB#0: ## %entry
; AVX512-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa9,0xc2]
; AVX512-NEXT: retq ## encoding: [0xc3]
;
; AVX51264-LABEL: test_f64:
; AVX51264: ## BB#0: ## %entry
; AVX51264-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa9,0xc2]
; AVX51264-NEXT: retq ## encoding: [0xc3]
; AVX512VL-LABEL: test_f64:
; AVX512VL: ## BB#0: ## %entry
; AVX512VL-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa9,0xc2]
; AVX512VL-NEXT: retq ## encoding: [0xc3]
entry:
%call = call double @llvm.fma.f64(double %a, double %b, double %c)
ret double %call
@ -155,33 +155,33 @@ define x86_fp80 @test_f80(x86_fp80 %a, x86_fp80 %b, x86_fp80 %c) #0 {
; FMACALL64-NEXT: addq $56, %rsp ## encoding: [0x48,0x83,0xc4,0x38]
; FMACALL64-NEXT: retq ## encoding: [0xc3]
;
; AVX51264NOFMA-LABEL: test_f80:
; AVX51264NOFMA: ## BB#0: ## %entry
; AVX51264NOFMA-NEXT: subq $56, %rsp ## encoding: [0x48,0x83,0xec,0x38]
; AVX51264NOFMA-NEXT: fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x40]
; AVX51264NOFMA-NEXT: fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x50]
; AVX51264NOFMA-NEXT: fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x60]
; AVX51264NOFMA-NEXT: fstpt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x7c,0x24,0x20]
; AVX51264NOFMA-NEXT: fstpt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x7c,0x24,0x10]
; AVX51264NOFMA-NEXT: fstpt (%rsp) ## encoding: [0xdb,0x3c,0x24]
; AVX51264NOFMA-NEXT: callq _fmal ## encoding: [0xe8,A,A,A,A]
; AVX51264NOFMA-NEXT: ## fixup A - offset: 1, value: _fmal-4, kind: FK_PCRel_4
; AVX51264NOFMA-NEXT: addq $56, %rsp ## encoding: [0x48,0x83,0xc4,0x38]
; AVX51264NOFMA-NEXT: retq ## encoding: [0xc3]
; AVX512-LABEL: test_f80:
; AVX512: ## BB#0: ## %entry
; AVX512-NEXT: subq $56, %rsp ## encoding: [0x48,0x83,0xec,0x38]
; AVX512-NEXT: fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x40]
; AVX512-NEXT: fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x50]
; AVX512-NEXT: fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x60]
; AVX512-NEXT: fstpt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x7c,0x24,0x20]
; AVX512-NEXT: fstpt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x7c,0x24,0x10]
; AVX512-NEXT: fstpt (%rsp) ## encoding: [0xdb,0x3c,0x24]
; AVX512-NEXT: callq _fmal ## encoding: [0xe8,A,A,A,A]
; AVX512-NEXT: ## fixup A - offset: 1, value: _fmal-4, kind: FK_PCRel_4
; AVX512-NEXT: addq $56, %rsp ## encoding: [0x48,0x83,0xc4,0x38]
; AVX512-NEXT: retq ## encoding: [0xc3]
;
; AVX51264-LABEL: test_f80:
; AVX51264: ## BB#0: ## %entry
; AVX51264-NEXT: subq $56, %rsp ## encoding: [0x48,0x83,0xec,0x38]
; AVX51264-NEXT: fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x40]
; AVX51264-NEXT: fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x50]
; AVX51264-NEXT: fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x60]
; AVX51264-NEXT: fstpt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x7c,0x24,0x20]
; AVX51264-NEXT: fstpt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x7c,0x24,0x10]
; AVX51264-NEXT: fstpt (%rsp) ## encoding: [0xdb,0x3c,0x24]
; AVX51264-NEXT: callq _fmal ## encoding: [0xe8,A,A,A,A]
; AVX51264-NEXT: ## fixup A - offset: 1, value: _fmal-4, kind: FK_PCRel_4
; AVX51264-NEXT: addq $56, %rsp ## encoding: [0x48,0x83,0xc4,0x38]
; AVX51264-NEXT: retq ## encoding: [0xc3]
; AVX512VL-LABEL: test_f80:
; AVX512VL: ## BB#0: ## %entry
; AVX512VL-NEXT: subq $56, %rsp ## encoding: [0x48,0x83,0xec,0x38]
; AVX512VL-NEXT: fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x40]
; AVX512VL-NEXT: fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x50]
; AVX512VL-NEXT: fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x60]
; AVX512VL-NEXT: fstpt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x7c,0x24,0x20]
; AVX512VL-NEXT: fstpt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x7c,0x24,0x10]
; AVX512VL-NEXT: fstpt (%rsp) ## encoding: [0xdb,0x3c,0x24]
; AVX512VL-NEXT: callq _fmal ## encoding: [0xe8,A,A,A,A]
; AVX512VL-NEXT: ## fixup A - offset: 1, value: _fmal-4, kind: FK_PCRel_4
; AVX512VL-NEXT: addq $56, %rsp ## encoding: [0x48,0x83,0xc4,0x38]
; AVX512VL-NEXT: retq ## encoding: [0xc3]
entry:
%call = call x86_fp80 @llvm.fma.f80(x86_fp80 %a, x86_fp80 %b, x86_fp80 %c)
ret x86_fp80 %call
@ -214,26 +214,200 @@ define float @test_f32_cst() #0 {
; FMACALL64-NEXT: ## xmm0 = mem[0],zero,zero,zero
; FMACALL64-NEXT: retq ## encoding: [0xc3]
;
; AVX51264NOFMA-LABEL: test_f32_cst:
; AVX51264NOFMA: ## BB#0: ## %entry
; AVX51264NOFMA-NEXT: vmovss {{.*}}(%rip), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x05,A,A,A,A]
; AVX51264NOFMA-NEXT: ## fixup A - offset: 4, value: LCPI3_0-4, kind: reloc_riprel_4byte
; AVX51264NOFMA-NEXT: ## xmm0 = mem[0],zero,zero,zero
; AVX51264NOFMA-NEXT: retq ## encoding: [0xc3]
; AVX512-LABEL: test_f32_cst:
; AVX512: ## BB#0: ## %entry
; AVX512-NEXT: vmovss {{.*}}(%rip), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x05,A,A,A,A]
; AVX512-NEXT: ## fixup A - offset: 4, value: LCPI3_0-4, kind: reloc_riprel_4byte
; AVX512-NEXT: ## xmm0 = mem[0],zero,zero,zero
; AVX512-NEXT: retq ## encoding: [0xc3]
;
; AVX51264-LABEL: test_f32_cst:
; AVX51264: ## BB#0: ## %entry
; AVX51264-NEXT: vmovss {{.*}}(%rip), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x05,A,A,A,A]
; AVX51264-NEXT: ## fixup A - offset: 4, value: LCPI3_0-4, kind: reloc_riprel_4byte
; AVX51264-NEXT: ## xmm0 = mem[0],zero,zero,zero
; AVX51264-NEXT: retq ## encoding: [0xc3]
; AVX512VL-LABEL: test_f32_cst:
; AVX512VL: ## BB#0: ## %entry
; AVX512VL-NEXT: vmovss {{.*}}(%rip), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x05,A,A,A,A]
; AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI3_0-4, kind: reloc_riprel_4byte
; AVX512VL-NEXT: ## xmm0 = mem[0],zero,zero,zero
; AVX512VL-NEXT: retq ## encoding: [0xc3]
entry:
%call = call float @llvm.fma.f32(float 3.0, float 3.0, float 3.0)
ret float %call
}
define <4 x float> @test_v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) #0 {
; FMA32-LABEL: test_v4f32:
; FMA32: ## BB#0: ## %entry
; FMA32-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x71,0xa8,0xc2]
; FMA32-NEXT: retl ## encoding: [0xc3]
;
; FMA64-LABEL: test_v4f32:
; FMA64: ## BB#0: ## %entry
; FMA64-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x71,0xa8,0xc2]
; FMA64-NEXT: retq ## encoding: [0xc3]
;
; AVX512-LABEL: test_v4f32:
; AVX512: ## BB#0: ## %entry
; AVX512-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x71,0xa8,0xc2]
; AVX512-NEXT: retq ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_v4f32:
; AVX512VL: ## BB#0: ## %entry
; AVX512VL-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa8,0xc2]
; AVX512VL-NEXT: retq ## encoding: [0xc3]
entry:
%call = call <4 x float> @llvm.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c)
ret <4 x float> %call
}
define <8 x float> @test_v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c) #0 {
; FMA32-LABEL: test_v8f32:
; FMA32: ## BB#0: ## %entry
; FMA32-NEXT: vfmadd213ps %ymm2, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x75,0xa8,0xc2]
; FMA32-NEXT: retl ## encoding: [0xc3]
;
; FMA64-LABEL: test_v8f32:
; FMA64: ## BB#0: ## %entry
; FMA64-NEXT: vfmadd213ps %ymm2, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x75,0xa8,0xc2]
; FMA64-NEXT: retq ## encoding: [0xc3]
;
; AVX512-LABEL: test_v8f32:
; AVX512: ## BB#0: ## %entry
; AVX512-NEXT: vfmadd213ps %ymm2, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x75,0xa8,0xc2]
; AVX512-NEXT: retq ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_v8f32:
; AVX512VL: ## BB#0: ## %entry
; AVX512VL-NEXT: vfmadd213ps %ymm2, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xa8,0xc2]
; AVX512VL-NEXT: retq ## encoding: [0xc3]
entry:
%call = call <8 x float> @llvm.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c)
ret <8 x float> %call
}
define <16 x float> @test_v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c) #0 {
; FMA32-LABEL: test_v16f32:
; FMA32: ## BB#0: ## %entry
; FMA32-NEXT: pushl %ebp ## encoding: [0x55]
; FMA32-NEXT: movl %esp, %ebp ## encoding: [0x89,0xe5]
; FMA32-NEXT: andl $-32, %esp ## encoding: [0x83,0xe4,0xe0]
; FMA32-NEXT: subl $32, %esp ## encoding: [0x83,0xec,0x20]
; FMA32-NEXT: vfmadd213ps 8(%ebp), %ymm2, %ymm0 ## encoding: [0xc4,0xe2,0x6d,0xa8,0x45,0x08]
; FMA32-NEXT: vfmadd213ps 40(%ebp), %ymm3, %ymm1 ## encoding: [0xc4,0xe2,0x65,0xa8,0x4d,0x28]
; FMA32-NEXT: movl %ebp, %esp ## encoding: [0x89,0xec]
; FMA32-NEXT: popl %ebp ## encoding: [0x5d]
; FMA32-NEXT: retl ## encoding: [0xc3]
;
; FMA64-LABEL: test_v16f32:
; FMA64: ## BB#0: ## %entry
; FMA64-NEXT: vfmadd213ps %ymm4, %ymm2, %ymm0 ## encoding: [0xc4,0xe2,0x6d,0xa8,0xc4]
; FMA64-NEXT: vfmadd213ps %ymm5, %ymm3, %ymm1 ## encoding: [0xc4,0xe2,0x65,0xa8,0xcd]
; FMA64-NEXT: retq ## encoding: [0xc3]
;
; AVX512-LABEL: test_v16f32:
; AVX512: ## BB#0: ## %entry
; AVX512-NEXT: vfmadd213ps %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x48,0xa8,0xc2]
; AVX512-NEXT: retq ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_v16f32:
; AVX512VL: ## BB#0: ## %entry
; AVX512VL-NEXT: vfmadd213ps %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x48,0xa8,0xc2]
; AVX512VL-NEXT: retq ## encoding: [0xc3]
entry:
%call = call <16 x float> @llvm.fma.v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c)
ret <16 x float> %call
}
define <2 x double> @test_v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c) #0 {
; FMA32-LABEL: test_v2f64:
; FMA32: ## BB#0: ## %entry
; FMA32-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0xf1,0xa8,0xc2]
; FMA32-NEXT: retl ## encoding: [0xc3]
;
; FMA64-LABEL: test_v2f64:
; FMA64: ## BB#0: ## %entry
; FMA64-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0xf1,0xa8,0xc2]
; FMA64-NEXT: retq ## encoding: [0xc3]
;
; AVX512-LABEL: test_v2f64:
; AVX512: ## BB#0: ## %entry
; AVX512-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0xf1,0xa8,0xc2]
; AVX512-NEXT: retq ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_v2f64:
; AVX512VL: ## BB#0: ## %entry
; AVX512VL-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa8,0xc2]
; AVX512VL-NEXT: retq ## encoding: [0xc3]
entry:
%call = call <2 x double> @llvm.fma.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c)
ret <2 x double> %call
}
define <4 x double> @test_v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c) #0 {
; FMA32-LABEL: test_v4f64:
; FMA32: ## BB#0: ## %entry
; FMA32-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0xf5,0xa8,0xc2]
; FMA32-NEXT: retl ## encoding: [0xc3]
;
; FMA64-LABEL: test_v4f64:
; FMA64: ## BB#0: ## %entry
; FMA64-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0xf5,0xa8,0xc2]
; FMA64-NEXT: retq ## encoding: [0xc3]
;
; AVX512-LABEL: test_v4f64:
; AVX512: ## BB#0: ## %entry
; AVX512-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0xf5,0xa8,0xc2]
; AVX512-NEXT: retq ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_v4f64:
; AVX512VL: ## BB#0: ## %entry
; AVX512VL-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xa8,0xc2]
; AVX512VL-NEXT: retq ## encoding: [0xc3]
entry:
%call = call <4 x double> @llvm.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c)
ret <4 x double> %call
}
define <8 x double> @test_v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c) #0 {
; FMA32-LABEL: test_v8f64:
; FMA32: ## BB#0: ## %entry
; FMA32-NEXT: pushl %ebp ## encoding: [0x55]
; FMA32-NEXT: movl %esp, %ebp ## encoding: [0x89,0xe5]
; FMA32-NEXT: andl $-32, %esp ## encoding: [0x83,0xe4,0xe0]
; FMA32-NEXT: subl $32, %esp ## encoding: [0x83,0xec,0x20]
; FMA32-NEXT: vfmadd213pd 8(%ebp), %ymm2, %ymm0 ## encoding: [0xc4,0xe2,0xed,0xa8,0x45,0x08]
; FMA32-NEXT: vfmadd213pd 40(%ebp), %ymm3, %ymm1 ## encoding: [0xc4,0xe2,0xe5,0xa8,0x4d,0x28]
; FMA32-NEXT: movl %ebp, %esp ## encoding: [0x89,0xec]
; FMA32-NEXT: popl %ebp ## encoding: [0x5d]
; FMA32-NEXT: retl ## encoding: [0xc3]
;
; FMA64-LABEL: test_v8f64:
; FMA64: ## BB#0: ## %entry
; FMA64-NEXT: vfmadd213pd %ymm4, %ymm2, %ymm0 ## encoding: [0xc4,0xe2,0xed,0xa8,0xc4]
; FMA64-NEXT: vfmadd213pd %ymm5, %ymm3, %ymm1 ## encoding: [0xc4,0xe2,0xe5,0xa8,0xcd]
; FMA64-NEXT: retq ## encoding: [0xc3]
;
; AVX512-LABEL: test_v8f64:
; AVX512: ## BB#0: ## %entry
; AVX512-NEXT: vfmadd213pd %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x48,0xa8,0xc2]
; AVX512-NEXT: retq ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_v8f64:
; AVX512VL: ## BB#0: ## %entry
; AVX512VL-NEXT: vfmadd213pd %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x48,0xa8,0xc2]
; AVX512VL-NEXT: retq ## encoding: [0xc3]
entry:
%call = call <8 x double> @llvm.fma.v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c)
ret <8 x double> %call
}
declare float @llvm.fma.f32(float, float, float)
declare double @llvm.fma.f64(double, double, double)
declare x86_fp80 @llvm.fma.f80(x86_fp80, x86_fp80, x86_fp80)
declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>)
declare <8 x float> @llvm.fma.v8f32(<8 x float>, <8 x float>, <8 x float>)
declare <16 x float> @llvm.fma.v16f32(<16 x float>, <16 x float>, <16 x float>)
declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>)
declare <4 x double> @llvm.fma.v4f64(<4 x double>, <4 x double>, <4 x double>)
declare <8 x double> @llvm.fma.v8f64(<8 x double>, <8 x double>, <8 x double>)
attributes #0 = { nounwind }