Swift: Only build vldm/vstm with q register aligned register lists

Unaligned vldm/vstm need more uops and therefore are slower in general on swift.

radar://14522102

llvm-svn: 189961
This commit is contained in:
Arnold Schwaighofer 2013-09-04 17:41:16 +00:00
parent d9063c46f5
commit d7e8d92606
2 changed files with 32 additions and 1 deletions

View File

@ -489,7 +489,10 @@ ARMLoadStoreOpt::MergeLDR_STR(MachineBasicBlock &MBB, unsigned SIndex,
if (Reg != ARM::SP &&
NewOffset == Offset + (int)Size &&
((isNotVFP && RegNum > PRegNum) ||
((Count < Limit) && RegNum == PRegNum+1))) {
((Count < Limit) && RegNum == PRegNum+1)) &&
// On Swift we don't want vldm/vstm to start with a odd register num
// because Q register unaligned vldm/vstm need more uops.
(!STI->isSwift() || isNotVFP || Count != 1 || !(PRegNum & 0x1))) {
Offset += Size;
PRegNum = RegNum;
++Count;

View File

@ -0,0 +1,28 @@
; RUN: llc < %s -mcpu=swift -mtriple=armv7s-apple-ios | FileCheck %s
; vldm with registers not aligned with q registers need more micro-ops so that
; so that there usage becomes unbeneficial on swift.
; CHECK-LABEL: test_vldm
; CHECK: vldmia r1, {d18, d19, d20}
; CHECK-NOT: vldmia r1, {d17, d18, d19, d20}
define double @test_vldm(double %a, double %b, double* nocapture %x) {
entry:
%mul73 = fmul double %a, %b
%addr1 = getelementptr double * %x, i32 1
%addr2 = getelementptr double * %x, i32 2
%addr3 = getelementptr double * %x, i32 3
%load0 = load double * %x
%load1 = load double * %addr1
%load2 = load double * %addr2
%load3 = load double * %addr3
%sub = fsub double %mul73, %load1
%mul = fmul double %mul73, %load0
%add = fadd double %mul73, %load2
%div = fdiv double %mul73, %load3
%red = fadd double %sub, %mul
%red2 = fadd double %div, %add
%red3 = fsub double %red, %red2
ret double %red3
}