NewGVN: Handle coercion of constant stores, loads, memory insts.

Summary:
Depends on D30928.

This adds support for coercion of stores and memory instructions that do not require insertion to process.
Another few tests down.
I added the relevant tests from rle.ll

Reviewers: davide

Subscribers: llvm-commits, Prazek

Differential Revision: https://reviews.llvm.org/D30929

llvm-svn: 299330
This commit is contained in:
Daniel Berlin 2017-04-02 13:23:44 +00:00
parent fca527af5c
commit 07daac8a36
6 changed files with 152 additions and 6 deletions

View File

@ -83,12 +83,14 @@
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/MemorySSA.h"
#include "llvm/Transforms/Utils/PredicateInfo.h"
#include "llvm/Transforms/Utils/VNCoercion.h"
#include <unordered_map>
#include <utility>
#include <vector>
using namespace llvm;
using namespace PatternMatch;
using namespace llvm::GVNExpression;
using namespace llvm::VNCoercion;
#define DEBUG_TYPE "newgvn"
STATISTIC(NumGVNInstrDeleted, "Number of instructions deleted");
@ -359,6 +361,8 @@ private:
const Expression *checkSimplificationResults(Expression *, Instruction *,
Value *);
const Expression *performSymbolicEvaluation(Value *);
const Expression *performSymbolicLoadCoercion(Type *, Value *, LoadInst *,
Instruction *, MemoryAccess *);
const Expression *performSymbolicLoadEvaluation(Instruction *);
const Expression *performSymbolicStoreEvaluation(Instruction *);
const Expression *performSymbolicCallEvaluation(Instruction *);
@ -867,6 +871,86 @@ const Expression *NewGVN::performSymbolicStoreEvaluation(Instruction *I) {
return createStoreExpression(SI, StoreAccess);
}
// See if we can extract the value of a loaded pointer from a load, a store, or
// a memory instruction.
const Expression *
NewGVN::performSymbolicLoadCoercion(Type *LoadType, Value *LoadPtr,
LoadInst *LI, Instruction *DepInst,
MemoryAccess *DefiningAccess) {
assert((!LI || LI->isSimple()) && "Not a simple load");
if (auto *DepSI = dyn_cast<StoreInst>(DepInst)) {
// Can't forward from non-atomic to atomic without violating memory model.
// Also don't need to coerce if they are the same type, we will just
// propogate..
if (LI->isAtomic() > DepSI->isAtomic() ||
LoadType == DepSI->getValueOperand()->getType())
return nullptr;
int Offset = analyzeLoadFromClobberingStore(LoadType, LoadPtr, DepSI, DL);
if (Offset >= 0) {
if (auto *C = dyn_cast<Constant>(
lookupOperandLeader(DepSI->getValueOperand()))) {
DEBUG(dbgs() << "Coercing load from store " << *DepSI << " to constant "
<< *C << "\n");
return createConstantExpression(
getConstantStoreValueForLoad(C, Offset, LoadType, DL));
}
}
} else if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) {
// Can't forward from non-atomic to atomic without violating memory model.
if (LI->isAtomic() > DepLI->isAtomic())
return nullptr;
int Offset = analyzeLoadFromClobberingLoad(LoadType, LoadPtr, DepLI, DL);
if (Offset >= 0) {
// We can coerce a constant load into a load
if (auto *C = dyn_cast<Constant>(lookupOperandLeader(DepLI)))
if (auto *PossibleConstant =
getConstantLoadValueForLoad(C, Offset, LoadType, DL)) {
DEBUG(dbgs() << "Coercing load from load " << *LI << " to constant "
<< *PossibleConstant << "\n");
return createConstantExpression(PossibleConstant);
}
}
} else if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInst)) {
int Offset = analyzeLoadFromClobberingMemInst(LoadType, LoadPtr, DepMI, DL);
if (Offset >= 0) {
if (auto *PossibleConstant =
getConstantMemInstValueForLoad(DepMI, Offset, LoadType, DL)) {
DEBUG(dbgs() << "Coercing load from meminst " << *DepMI
<< " to constant " << *PossibleConstant << "\n");
return createConstantExpression(PossibleConstant);
}
}
}
// All of the below are only true if the loaded pointer is produced
// by the dependent instruction.
if (LoadPtr != lookupOperandLeader(DepInst) &&
!AA->isMustAlias(LoadPtr, DepInst))
return nullptr;
// If this load really doesn't depend on anything, then we must be loading an
// undef value. This can happen when loading for a fresh allocation with no
// intervening stores, for example. Note that this is only true in the case
// that the result of the allocation is pointer equal to the load ptr.
if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst, TLI)) {
return createConstantExpression(UndefValue::get(LoadType));
}
// If this load occurs either right after a lifetime begin,
// then the loaded value is undefined.
else if (auto *II = dyn_cast<IntrinsicInst>(DepInst)) {
if (II->getIntrinsicID() == Intrinsic::lifetime_start)
return createConstantExpression(UndefValue::get(LoadType));
}
// If this load follows a calloc (which zero initializes memory),
// then the loaded value is zero
else if (isCallocLikeFn(DepInst, TLI)) {
return createConstantExpression(Constant::getNullValue(LoadType));
}
return nullptr;
}
const Expression *NewGVN::performSymbolicLoadEvaluation(Instruction *I) {
auto *LI = cast<LoadInst>(I);
@ -888,11 +972,19 @@ const Expression *NewGVN::performSymbolicLoadEvaluation(Instruction *I) {
// If the defining instruction is not reachable, replace with undef.
if (!ReachableBlocks.count(DefiningInst->getParent()))
return createConstantExpression(UndefValue::get(LI->getType()));
// This will handle stores and memory insts. We only do if it the
// defining access has a different type, or it is a pointer produced by
// certain memory operations that cause the memory to have a fixed value
// (IE things like calloc).
const Expression *CoercionResult = performSymbolicLoadCoercion(
LI->getType(), LoadAddressLeader, LI, DefiningInst, DefiningAccess);
if (CoercionResult)
return CoercionResult;
}
}
const Expression *E =
createLoadExpression(LI->getType(), LI->getPointerOperand(), LI,
createLoadExpression(LI->getType(), LoadAddressLeader, LI,
lookupMemoryAccessEquiv(DefiningAccess));
return E;
}

View File

@ -1,4 +1,3 @@
; XFAIL: *
; RUN: opt -S -basicaa -newgvn < %s | FileCheck %s
; RUN: opt -S -basicaa -newgvn -disable-simplify-libcalls < %s | FileCheck %s -check-prefix=CHECK_NO_LIBCALLS
; Check that loads from calloc are recognized as being zero.

View File

@ -1,4 +1,3 @@
; XFAIL: *
; GVN failed to do constant expression folding and expanded
; them unfolded in many places, producing exponentially large const
; expressions. As a result, the compilation never fisished.

View File

@ -1,4 +1,3 @@
; XFAIL: *
; RUN: opt -S -basicaa -newgvn < %s | FileCheck %s
; RUN: opt -S -basicaa -newgvn -disable-simplify-libcalls < %s | FileCheck %s -check-prefix=CHECK_NO_LIBCALLS
; PR13694

View File

@ -1,6 +1,4 @@
; XFAIL: *
; RUN: opt -newgvn -S -o - < %s | FileCheck %s
; NewGVN fails this due to missing load coercion
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"

View File

@ -0,0 +1,59 @@
; RUN: opt < %s -data-layout="e-p:32:32:32-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-n8:16:32" -basicaa -newgvn -S -die | FileCheck %s
; RUN: opt < %s -data-layout="E-p:32:32:32-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-n32" -basicaa -newgvn -S -die | FileCheck %s
; memset -> i16 forwarding.
define signext i16 @memset_to_i16_local(i16* %A) nounwind ssp {
entry:
%conv = bitcast i16* %A to i8*
tail call void @llvm.memset.p0i8.i64(i8* %conv, i8 1, i64 200, i32 1, i1 false)
%arrayidx = getelementptr inbounds i16, i16* %A, i64 42
%tmp2 = load i16, i16* %arrayidx
ret i16 %tmp2
; CHECK-LABEL: @memset_to_i16_local(
; CHECK-NOT: load
; CHECK: ret i16 257
}
@GCst = constant {i32, float, i32 } { i32 42, float 14., i32 97 }
@GCst_as1 = addrspace(1) constant {i32, float, i32 } { i32 42, float 14., i32 97 }
; memset -> float forwarding.
define float @memcpy_to_float_local(float* %A) nounwind ssp {
entry:
%conv = bitcast float* %A to i8* ; <i8*> [#uses=1]
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %conv, i8* bitcast ({i32, float, i32 }* @GCst to i8*), i64 12, i32 1, i1 false)
%arrayidx = getelementptr inbounds float, float* %A, i64 1 ; <float*> [#uses=1]
%tmp2 = load float, float* %arrayidx ; <float> [#uses=1]
ret float %tmp2
; CHECK-LABEL: @memcpy_to_float_local(
; CHECK-NOT: load
; CHECK: ret float 1.400000e+01
}
; memcpy from address space 1
define float @memcpy_to_float_local_as1(float* %A) nounwind ssp {
entry:
%conv = bitcast float* %A to i8* ; <i8*> [#uses=1]
tail call void @llvm.memcpy.p0i8.p1i8.i64(i8* %conv, i8 addrspace(1)* bitcast ({i32, float, i32 } addrspace(1)* @GCst_as1 to i8 addrspace(1)*), i64 12, i32 1, i1 false)
%arrayidx = getelementptr inbounds float, float* %A, i64 1 ; <float*> [#uses=1]
%tmp2 = load float, float* %arrayidx ; <float> [#uses=1]
ret float %tmp2
; CHECK-LABEL: @memcpy_to_float_local_as1(
; CHECK-NOT: load
; CHECK: ret float 1.400000e+01
}
; PR6642
define i32 @memset_to_load() nounwind readnone {
entry:
%x = alloca [256 x i32], align 4 ; <[256 x i32]*> [#uses=2]
%tmp = bitcast [256 x i32]* %x to i8* ; <i8*> [#uses=1]
call void @llvm.memset.p0i8.i64(i8* %tmp, i8 0, i64 1024, i32 4, i1 false)
%arraydecay = getelementptr inbounds [256 x i32], [256 x i32]* %x, i32 0, i32 0 ; <i32*>
%tmp1 = load i32, i32* %arraydecay ; <i32> [#uses=1]
ret i32 %tmp1
; CHECK-LABEL: @memset_to_load(
; CHECK: ret i32 0
}
declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
declare void @llvm.memcpy.p0i8.p1i8.i64(i8* nocapture, i8 addrspace(1)* nocapture, i64, i32, i1) nounwind