[SystemZ] Rework getInterleavedMemoryOpCost()

Model this function more closely after the BasicTTIImpl version, with
separate handling of loads and stores. For loads, the set of actually loaded
vectors is checked.

This makes it more readable and just slightly more accurate generally.

Review: Ulrich Weigand
https://reviews.llvm.org/D53071

llvm-svn: 345998
This commit is contained in:
Jonas Paulsson 2018-11-02 17:15:36 +00:00
parent 11793f429f
commit 79f2441eee
2 changed files with 194 additions and 13 deletions

View File

@ -979,6 +979,11 @@ int SystemZTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
return NumOps; return NumOps;
} }
// The generic implementation of getInterleavedMemoryOpCost() is based on
// adding costs of the memory operations plus all the extracts and inserts
// needed for using / defining the vector operands. The SystemZ version does
// roughly the same but bases the computations on vector permutations
// instead.
int SystemZTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, int SystemZTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
unsigned Factor, unsigned Factor,
ArrayRef<unsigned> Indices, ArrayRef<unsigned> Indices,
@ -993,22 +998,49 @@ int SystemZTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
assert(isa<VectorType>(VecTy) && assert(isa<VectorType>(VecTy) &&
"Expect a vector type for interleaved memory op"); "Expect a vector type for interleaved memory op");
int NumWideParts = getNumVectorRegs(VecTy); // Return the ceiling of dividing A by B.
auto ceil = [](unsigned A, unsigned B) { return (A + B - 1) / B; };
// How many source vectors are handled to produce a vectorized operand? unsigned NumElts = VecTy->getVectorNumElements();
int NumElsPerVector = (VecTy->getVectorNumElements() / NumWideParts); assert(Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor");
int NumSrcParts = unsigned VF = NumElts / Factor;
((NumWideParts > NumElsPerVector) ? NumElsPerVector : NumWideParts); unsigned NumEltsPerVecReg = (128U / getScalarSizeInBits(VecTy));
unsigned NumVectorMemOps = getNumVectorRegs(VecTy);
unsigned NumPermutes = 0;
// A Load group may have gaps. if (Opcode == Instruction::Load) {
unsigned NumOperands = // Loading interleave groups may have gaps, which may mean fewer
((Opcode == Instruction::Load) ? Indices.size() : Factor); // loads. Find out how many vectors will be loaded in total, and in how
// many of them each value will be in.
BitVector UsedInsts(NumVectorMemOps, false);
std::vector<BitVector> ValueVecs(Factor, BitVector(NumVectorMemOps, false));
for (unsigned Index : Indices)
for (unsigned Elt = 0; Elt < VF; ++Elt) {
unsigned Vec = (Index + Elt * Factor) / NumEltsPerVecReg;
UsedInsts.set(Vec);
ValueVecs[Index].set(Vec);
}
NumVectorMemOps = UsedInsts.count();
// Each needed permute takes two vectors as input. for (unsigned Index : Indices) {
if (NumSrcParts > 1) // Estimate that each loaded source vector containing this Index
NumSrcParts--; // requires one operation, except that vperm can handle two input
int NumPermutes = NumSrcParts * NumOperands; // registers first time for each dst vector.
unsigned NumSrcVecs = ValueVecs[Index].count();
unsigned NumDstVecs = ceil(VF * getScalarSizeInBits(VecTy), 128U);
assert (NumSrcVecs >= NumDstVecs && "Expected at least as many sources");
NumPermutes += std::max(1U, NumSrcVecs - NumDstVecs);
}
} else {
// Estimate the permutes for each stored vector as the smaller of the
// number of elements and the number of source vectors. Subtract one per
// dst vector for vperm (S.A.).
unsigned NumSrcVecs = std::min(NumEltsPerVecReg, Factor);
unsigned NumDstVecs = NumVectorMemOps;
assert (NumSrcVecs > 1 && "Expected at least two source vectors.");
NumPermutes += (NumDstVecs * NumSrcVecs) - NumDstVecs;
}
// Cost of load/store operations and the permutations needed. // Cost of load/store operations and the permutations needed.
return NumWideParts + NumPermutes; return NumVectorMemOps + NumPermutes;
} }

View File

@ -0,0 +1,149 @@
; REQUIRES: asserts
; RUN: opt -mtriple=s390x-unknown-linux -mcpu=z13 -loop-vectorize \
; RUN: -debug-only=loop-vectorize,vectorutils -max-interleave-group-factor=64\
; RUN: -disable-output < %s 2>&1 | FileCheck %s
;
; Check that some cost estimations for interleave groups make sense.
; This loop is loading four i16 values at indices [0, 1, 2, 3], with a stride
; of 4. At VF=4, memory interleaving means loading 4 * 4 * 16 bits = 2 vector
; registers. Each of the 4 vector values must then be constructed from the
; two vector registers using one vperm each, which gives a cost of 2 + 4 = 6.
;
; CHECK: LV: Checking a loop in "fun0"
; CHECK: LV: Found an estimated cost of 6 for VF 4 For instruction: %ld0 = load i16
; CHECK: LV: Found an estimated cost of 0 for VF 4 For instruction: %ld1 = load i16
; CHECK: LV: Found an estimated cost of 0 for VF 4 For instruction: %ld2 = load i16
; CHECK: LV: Found an estimated cost of 0 for VF 4 For instruction: %ld3 = load i16
define void @fun0(i16 *%ptr, i16 *%dst) {
entry:
br label %for.body
for.body:
%ivptr = phi i16* [ %ptr.next, %for.body ], [ %ptr, %entry ]
%iv = phi i64 [ %inc, %for.body ], [ 0, %entry ]
%inc = add i64 %iv, 4
%ptr0 = getelementptr inbounds i16, i16* %ivptr, i64 0
%ld0 = load i16, i16* %ptr0
%ptr1 = getelementptr inbounds i16, i16* %ivptr, i64 1
%ld1 = load i16, i16* %ptr1
%ptr2 = getelementptr inbounds i16, i16* %ivptr, i64 2
%ld2 = load i16, i16* %ptr2
%ptr3 = getelementptr inbounds i16, i16* %ivptr, i64 3
%ld3 = load i16, i16* %ptr3
%a1 = add i16 %ld0, %ld1
%a2 = add i16 %a1, %ld2
%a3 = add i16 %a2, %ld3
%dstptr = getelementptr inbounds i16, i16* %dst, i64 %iv
store i16 %a3, i16* %dstptr
%ptr.next = getelementptr inbounds i16, i16* %ivptr, i64 4
%cmp = icmp eq i64 %inc, 100
br i1 %cmp, label %for.end, label %for.body
for.end:
ret void
}
; This loop loads one i8 value in a stride of 3. At VF=16, this means loading
; 3 vector registers, and then constructing the vector value with two vperms,
; which gives a cost of 5.
;
; CHECK: LV: Checking a loop in "fun1"
; CHECK: LV: Found an estimated cost of 5 for VF 16 For instruction: %ld0 = load i8
define void @fun1(i8 *%ptr, i8 *%dst) {
entry:
br label %for.body
for.body:
%ivptr = phi i8* [ %ptr.next, %for.body ], [ %ptr, %entry ]
%iv = phi i64 [ %inc, %for.body ], [ 0, %entry ]
%inc = add i64 %iv, 4
%ptr0 = getelementptr inbounds i8, i8* %ivptr, i64 0
%ld0 = load i8, i8* %ptr0
%dstptr = getelementptr inbounds i8, i8* %dst, i64 %iv
store i8 %ld0, i8* %dstptr
%ptr.next = getelementptr inbounds i8, i8* %ivptr, i64 3
%cmp = icmp eq i64 %inc, 100
br i1 %cmp, label %for.end, label %for.body
for.end:
ret void
}
; This loop is loading 4 i8 values at indexes [0, 1, 2, 3], with a stride of
; 32. At VF=2, this means loading 2 vector registers, and using 4 vperms to
; produce the vector values, which gives a cost of 6.
;
; CHECK: LV: Checking a loop in "fun2"
; CHECK: LV: Found an estimated cost of 6 for VF 2 For instruction: %ld0 = load i8
; CHECK: LV: Found an estimated cost of 0 for VF 2 For instruction: %ld1 = load i8
; CHECK: LV: Found an estimated cost of 0 for VF 2 For instruction: %ld2 = load i8
; CHECK: LV: Found an estimated cost of 0 for VF 2 For instruction: %ld3 = load i8
define void @fun2(i8 *%ptr, i8 *%dst) {
entry:
br label %for.body
for.body:
%ivptr = phi i8* [ %ptr.next, %for.body ], [ %ptr, %entry ]
%iv = phi i64 [ %inc, %for.body ], [ 0, %entry ]
%inc = add i64 %iv, 4
%ptr0 = getelementptr inbounds i8, i8* %ivptr, i64 0
%ld0 = load i8, i8* %ptr0
%ptr1 = getelementptr inbounds i8, i8* %ivptr, i64 1
%ld1 = load i8, i8* %ptr1
%ptr2 = getelementptr inbounds i8, i8* %ivptr, i64 2
%ld2 = load i8, i8* %ptr2
%ptr3 = getelementptr inbounds i8, i8* %ivptr, i64 3
%ld3 = load i8, i8* %ptr3
%a1 = add i8 %ld0, %ld1
%a2 = add i8 %a1, %ld2
%a3 = add i8 %a2, %ld3
%dstptr = getelementptr inbounds i8, i8* %dst, i64 %iv
store i8 %a3, i8* %dstptr
%ptr.next = getelementptr inbounds i8, i8* %ivptr, i64 32
%cmp = icmp eq i64 %inc, 100
br i1 %cmp, label %for.end, label %for.body
for.end:
ret void
}
; This loop is loading 4 i8 values at indexes [0, 1, 2, 3], with a stride of
; 30. At VF=2, this means loading 3 vector registers, and using 4 vperms to
; produce the vector values, which gives a cost of 7. This is the same loop
; as in fun2, except the stride makes the second iterations values overlap a
; vector register boundary.
;
; CHECK: LV: Checking a loop in "fun3"
; CHECK: LV: Found an estimated cost of 7 for VF 2 For instruction: %ld0 = load i8
; CHECK: LV: Found an estimated cost of 0 for VF 2 For instruction: %ld1 = load i8
; CHECK: LV: Found an estimated cost of 0 for VF 2 For instruction: %ld2 = load i8
; CHECK: LV: Found an estimated cost of 0 for VF 2 For instruction: %ld3 = load i8
define void @fun3(i8 *%ptr, i8 *%dst) {
entry:
br label %for.body
for.body:
%ivptr = phi i8* [ %ptr.next, %for.body ], [ %ptr, %entry ]
%iv = phi i64 [ %inc, %for.body ], [ 0, %entry ]
%inc = add i64 %iv, 4
%ptr0 = getelementptr inbounds i8, i8* %ivptr, i64 0
%ld0 = load i8, i8* %ptr0
%ptr1 = getelementptr inbounds i8, i8* %ivptr, i64 1
%ld1 = load i8, i8* %ptr1
%ptr2 = getelementptr inbounds i8, i8* %ivptr, i64 2
%ld2 = load i8, i8* %ptr2
%ptr3 = getelementptr inbounds i8, i8* %ivptr, i64 3
%ld3 = load i8, i8* %ptr3
%a1 = add i8 %ld0, %ld1
%a2 = add i8 %a1, %ld2
%a3 = add i8 %a2, %ld3
%dstptr = getelementptr inbounds i8, i8* %dst, i64 %iv
store i8 %a3, i8* %dstptr
%ptr.next = getelementptr inbounds i8, i8* %ivptr, i64 30
%cmp = icmp eq i64 %inc, 100
br i1 %cmp, label %for.end, label %for.body
for.end:
ret void
}