diff --git a/llvm/test/CodeGen/X86/fmaxnum.ll b/llvm/test/CodeGen/X86/fmaxnum.ll index beeefac9a29e..c6c9fa73aac6 100644 --- a/llvm/test/CodeGen/X86/fmaxnum.ll +++ b/llvm/test/CodeGen/X86/fmaxnum.ll @@ -335,8 +335,8 @@ define <4 x float> @maxnum_intrinsic_nnan_fmf_f432(<4 x float> %a, <4 x float> % ; FIXME: Current (but legacy someday): a function-level attribute should also enable the fold. -define float @maxnum_intrinsic_nnan_fmf_f32(float %a, float %b) #0 { -; SSE-LABEL: maxnum_intrinsic_nnan_fmf_f32: +define float @maxnum_intrinsic_nnan_attr_f32(float %a, float %b) #0 { +; SSE-LABEL: maxnum_intrinsic_nnan_attr_f32: ; SSE: # %bb.0: ; SSE-NEXT: movaps %xmm0, %xmm2 ; SSE-NEXT: cmpunordss %xmm0, %xmm2 @@ -348,7 +348,7 @@ define float @maxnum_intrinsic_nnan_fmf_f32(float %a, float %b) #0 { ; SSE-NEXT: movaps %xmm2, %xmm0 ; SSE-NEXT: retq ; -; AVX-LABEL: maxnum_intrinsic_nnan_fmf_f32: +; AVX-LABEL: maxnum_intrinsic_nnan_attr_f32: ; AVX: # %bb.0: ; AVX-NEXT: vmaxss %xmm0, %xmm1, %xmm2 ; AVX-NEXT: vcmpunordss %xmm0, %xmm0, %xmm0 diff --git a/llvm/test/CodeGen/X86/fminnum.ll b/llvm/test/CodeGen/X86/fminnum.ll index 7374f8e9132f..f5205035c1b4 100644 --- a/llvm/test/CodeGen/X86/fminnum.ll +++ b/llvm/test/CodeGen/X86/fminnum.ll @@ -327,8 +327,8 @@ define <2 x double> @minnum_intrinsic_nnan_fmf_v2f64(<2 x double> %a, <2 x doubl ; FIXME: Current (but legacy someday): a function-level attribute should also enable the fold. -define double @minnum_intrinsic_nnan_fmf_f64(double %a, double %b) #0 { -; SSE-LABEL: minnum_intrinsic_nnan_fmf_f64: +define double @minnum_intrinsic_nnan_attr_f64(double %a, double %b) #0 { +; SSE-LABEL: minnum_intrinsic_nnan_attr_f64: ; SSE: # %bb.0: ; SSE-NEXT: movapd %xmm0, %xmm2 ; SSE-NEXT: cmpunordsd %xmm0, %xmm2 @@ -340,13 +340,13 @@ define double @minnum_intrinsic_nnan_fmf_f64(double %a, double %b) #0 { ; SSE-NEXT: movapd %xmm2, %xmm0 ; SSE-NEXT: retq ; -; AVX-LABEL: minnum_intrinsic_nnan_fmf_f64: +; AVX-LABEL: minnum_intrinsic_nnan_attr_f64: ; AVX: # %bb.0: ; AVX-NEXT: vminsd %xmm0, %xmm1, %xmm2 ; AVX-NEXT: vcmpunordsd %xmm0, %xmm0, %xmm0 ; AVX-NEXT: vblendvpd %xmm0, %xmm1, %xmm2, %xmm0 ; AVX-NEXT: retq - %r = tail call nnan double @llvm.minnum.f64(double %a, double %b) + %r = tail call double @llvm.minnum.f64(double %a, double %b) ret double %r }