[DAGCombiner] Fix load-store forwarding of indexed loads.

Summary:
Handle extra output from index loads in cases where we wish to
forward a load value directly from a preceeding store.

Fixes PR39571.

Reviewers: peter.smith, rengolin

Subscribers: javed.absar, hiraditya, arphaman, llvm-commits

Differential Revision: https://reviews.llvm.org/D54265

llvm-svn: 346654
This commit is contained in:
Nirav Dave 2018-11-12 14:05:40 +00:00
parent ac7c4f1db3
commit a395e2df56
2 changed files with 50 additions and 3 deletions

View File

@ -12866,6 +12866,20 @@ SDValue DAGCombiner::ForwardStoreValueToDirectLoad(LoadSDNode *LD) {
bool STCoversLD =
(Offset >= 0) &&
(Offset * 8 + LDMemType.getSizeInBits() <= STMemType.getSizeInBits());
auto ReplaceLd = [&](LoadSDNode *LD, SDValue Val, SDValue Chain) -> SDValue {
if (LD->isIndexed()) {
bool IsSub = (LD->getAddressingMode() == ISD::PRE_DEC ||
LD->getAddressingMode() == ISD::POST_DEC);
unsigned Opc = IsSub ? ISD::SUB : ISD::ADD;
SDValue Idx = DAG.getNode(Opc, SDLoc(LD), LD->getOperand(1).getValueType(),
LD->getOperand(1), LD->getOperand(2));
SDValue Ops[] = {Val, Idx, Chain};
return CombineTo(LD, Ops, 3);
}
return CombineTo(LD, Val, Chain);
};
if (!STCoversLD)
return SDValue();
@ -12873,7 +12887,7 @@ SDValue DAGCombiner::ForwardStoreValueToDirectLoad(LoadSDNode *LD) {
if (Offset == 0 && LDType == STType && STMemType == LDMemType) {
// Simple case: Direct non-truncating forwarding
if (LDType.getSizeInBits() == LDMemType.getSizeInBits())
return CombineTo(LD, ST->getValue(), Chain);
return ReplaceLd(LD, ST->getValue(), Chain);
// Can we model the truncate and extension with an and mask?
if (STType.isInteger() && LDMemType.isInteger() && !STType.isVector() &&
!LDMemType.isVector() && LD->getExtensionType() != ISD::SEXTLOAD) {
@ -12883,7 +12897,7 @@ SDValue DAGCombiner::ForwardStoreValueToDirectLoad(LoadSDNode *LD) {
STMemType.getSizeInBits()),
SDLoc(ST), STType);
auto Val = DAG.getNode(ISD::AND, SDLoc(LD), LDType, ST->getValue(), Mask);
return CombineTo(LD, Val, Chain);
return ReplaceLd(LD, Val, Chain);
}
}
@ -12908,7 +12922,7 @@ SDValue DAGCombiner::ForwardStoreValueToDirectLoad(LoadSDNode *LD) {
}
if (!extendLoadedValueToExtension(LD, Val))
continue;
return CombineTo(LD, Val, Chain);
return ReplaceLd(LD, Val, Chain);
} while (false);
// On failure, cleanup dead nodes we may have created.

View File

@ -0,0 +1,33 @@
; RUN: llc < %s -mtriple armv4t-unknown-linux-gnueabi -mattr=+strict-align
; Avoid crash from forwarding indexed-loads back to store.
%struct.anon = type { %struct.ma*, %struct.mb }
%struct.ma = type { i8 }
%struct.mb = type { i8, i8 }
%struct.anon.0 = type { %struct.anon.1 }
%struct.anon.1 = type { %struct.ds }
%struct.ds = type <{ i8, %union.ie }>
%union.ie = type { %struct.ib }
%struct.ib = type { i8, i8, i16 }
@a = common dso_local local_unnamed_addr global %struct.anon* null, align 4
@b = common dso_local local_unnamed_addr global %struct.anon.0 zeroinitializer, align 1
; Function Attrs: norecurse nounwind
define dso_local void @func() local_unnamed_addr {
entry:
%0 = load %struct.anon*, %struct.anon** @a, align 4
%ad = getelementptr inbounds %struct.anon, %struct.anon* %0, i32 0, i32 0
%1 = load %struct.ma*, %struct.ma** %ad, align 4
%c.sroa.0.0..sroa_idx = getelementptr inbounds %struct.ma, %struct.ma* %1, i32 0, i32 0
%c.sroa.0.0.copyload = load i8, i8* %c.sroa.0.0..sroa_idx, align 1
%cb = getelementptr inbounds %struct.anon, %struct.anon* %0, i32 0, i32 1
%band = getelementptr inbounds %struct.anon, %struct.anon* %0, i32 0, i32 1, i32 1
store i8 %c.sroa.0.0.copyload, i8* %band, align 4
store i8 6, i8* getelementptr inbounds (%struct.anon.0, %struct.anon.0* @b, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0), align 1
store i8 2, i8* getelementptr inbounds (%struct.anon.0, %struct.anon.0* @b, i32 0, i32 0, i32 0, i32 1, i32 0, i32 1), align 1
%2 = bitcast %struct.mb* %cb to i32*
%3 = load i32, i32* bitcast (i8* getelementptr inbounds (%struct.anon.0, %struct.anon.0* @b, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0) to i32*), align 1
store i32 %3, i32* %2, align 1
ret void
}