Revert r269081 and r269082 while I try to find the right incantation to fix MSVC build.

llvm-svn: 269091
This commit is contained in:
Peter Collingbourne 2016-05-10 17:54:43 +00:00
parent b6211a0b4f
commit 4d41cb6cc6
4 changed files with 51 additions and 133 deletions

View File

@ -1,38 +0,0 @@
//===- BitSetUtils.h - Utilities related to pointer bitsets ------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains functions that make it easier to manipulate bitsets for
// devirtualization.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_BITSETUTILS_H
#define LLVM_ANALYSIS_BITSETUTILS_H
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/CallSite.h"
namespace llvm {
/// A call site that could be devirtualized.
struct DevirtCallSite {
/// The offset from the address point to the virtual function.
uint64_t Offset;
/// The call site itself.
CallSite CS;
};
/// Given a call to the intrinsic @llvm.bitset.test, find all devirtualizable
/// call sites based on the call and return them in DevirtCalls.
void findDevirtualizableCalls(SmallVectorImpl<DevirtCallSite> &DevirtCalls,
SmallVectorImpl<CallInst *> &Assumes,
CallInst *CI);
}
#endif

View File

@ -1,82 +0,0 @@
//===- BitSetUtils.cpp - Utilities related to pointer bitsets -------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains functions that make it easier to manipulate bitsets for
// devirtualization.
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/BitSetUtils.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
using namespace llvm;
// Search for virtual calls that call FPtr and add them to DevirtCalls.
static void
findCallsAtConstantOffset(SmallVectorImpl<DevirtCallSite> &DevirtCalls,
Value *FPtr, uint64_t Offset) {
for (const Use &U : FPtr->uses()) {
Value *User = U.getUser();
if (isa<BitCastInst>(User)) {
findCallsAtConstantOffset(DevirtCalls, User, Offset);
} else if (auto CI = dyn_cast<CallInst>(User)) {
DevirtCalls.push_back({Offset, CI});
} else if (auto II = dyn_cast<InvokeInst>(User)) {
DevirtCalls.push_back({Offset, II});
}
}
}
// Search for virtual calls that load from VPtr and add them to DevirtCalls.
static void
findLoadCallsAtConstantOffset(Module *M,
SmallVectorImpl<DevirtCallSite> &DevirtCalls,
Value *VPtr, uint64_t Offset) {
for (const Use &U : VPtr->uses()) {
Value *User = U.getUser();
if (isa<BitCastInst>(User)) {
findLoadCallsAtConstantOffset(M, DevirtCalls, User, Offset);
} else if (isa<LoadInst>(User)) {
findCallsAtConstantOffset(DevirtCalls, User, Offset);
} else if (auto GEP = dyn_cast<GetElementPtrInst>(User)) {
// Take into account the GEP offset.
if (VPtr == GEP->getPointerOperand() && GEP->hasAllConstantIndices()) {
SmallVector<Value *, 8> Indices(GEP->op_begin() + 1, GEP->op_end());
uint64_t GEPOffset = M->getDataLayout().getIndexedOffsetInType(
GEP->getSourceElementType(), Indices);
findLoadCallsAtConstantOffset(M, DevirtCalls, User, Offset + GEPOffset);
}
}
}
}
void llvm::findDevirtualizableCalls(
SmallVectorImpl<DevirtCallSite> &DevirtCalls,
SmallVectorImpl<CallInst *> &Assumes, CallInst *CI) {
assert(CI->getCalledFunction()->getIntrinsicID() == Intrinsic::bitset_test);
Module *M = CI->getParent()->getParent()->getParent();
// Find llvm.assume intrinsics for this llvm.bitset.test call.
for (const Use &CIU : CI->uses()) {
auto AssumeCI = dyn_cast<CallInst>(CIU.getUser());
if (AssumeCI) {
Function *F = AssumeCI->getCalledFunction();
if (F && F->getIntrinsicID() == Intrinsic::assume)
Assumes.push_back(AssumeCI);
}
}
// If we found any, search for virtual calls based on %p and add them to
// DevirtCalls.
if (!Assumes.empty())
findLoadCallsAtConstantOffset(M, DevirtCalls,
CI->getArgOperand(0)->stripPointerCasts(), 0);
}

View File

@ -5,7 +5,6 @@ add_llvm_library(LLVMAnalysis
Analysis.cpp
AssumptionCache.cpp
BasicAliasAnalysis.cpp
BitSetUtils.cpp
BlockFrequencyInfo.cpp
BlockFrequencyInfoImpl.cpp
BranchProbabilityInfo.cpp

View File

@ -31,7 +31,6 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/Analysis/BitSetUtils.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
@ -232,6 +231,10 @@ struct DevirtModule {
: M(M), Int8Ty(Type::getInt8Ty(M.getContext())),
Int8PtrTy(Type::getInt8PtrTy(M.getContext())),
Int32Ty(Type::getInt32Ty(M.getContext())) {}
void findLoadCallsAtConstantOffset(Metadata *BitSet, Value *Ptr,
uint64_t Offset, Value *VTable);
void findCallsAtConstantOffset(Metadata *BitSet, Value *Ptr, uint64_t Offset,
Value *VTable);
void buildBitSets(std::vector<VTableBits> &Bits,
DenseMap<Metadata *, std::set<BitSetInfo>> &BitSets);
@ -280,6 +283,43 @@ ModulePass *llvm::createWholeProgramDevirtPass() {
return new WholeProgramDevirt;
}
// Search for virtual calls that call FPtr and add them to CallSlots.
void DevirtModule::findCallsAtConstantOffset(Metadata *BitSet, Value *FPtr,
uint64_t Offset, Value *VTable) {
for (const Use &U : FPtr->uses()) {
Value *User = U.getUser();
if (isa<BitCastInst>(User)) {
findCallsAtConstantOffset(BitSet, User, Offset, VTable);
} else if (auto CI = dyn_cast<CallInst>(User)) {
CallSlots[{BitSet, Offset}].push_back({VTable, CI});
} else if (auto II = dyn_cast<InvokeInst>(User)) {
CallSlots[{BitSet, Offset}].push_back({VTable, II});
}
}
}
// Search for virtual calls that load from VPtr and add them to CallSlots.
void DevirtModule::findLoadCallsAtConstantOffset(Metadata *BitSet, Value *VPtr,
uint64_t Offset,
Value *VTable) {
for (const Use &U : VPtr->uses()) {
Value *User = U.getUser();
if (isa<BitCastInst>(User)) {
findLoadCallsAtConstantOffset(BitSet, User, Offset, VTable);
} else if (isa<LoadInst>(User)) {
findCallsAtConstantOffset(BitSet, User, Offset, VTable);
} else if (auto GEP = dyn_cast<GetElementPtrInst>(User)) {
// Take into account the GEP offset.
if (VPtr == GEP->getPointerOperand() && GEP->hasAllConstantIndices()) {
SmallVector<Value *, 8> Indices(GEP->op_begin() + 1, GEP->op_end());
uint64_t GEPOffset = M.getDataLayout().getIndexedOffsetInType(
GEP->getSourceElementType(), Indices);
findLoadCallsAtConstantOffset(BitSet, User, Offset + GEPOffset, VTable);
}
}
}
}
void DevirtModule::buildBitSets(
std::vector<VTableBits> &Bits,
DenseMap<Metadata *, std::set<BitSetInfo>> &BitSets) {
@ -634,23 +674,22 @@ bool DevirtModule::run() {
if (!CI)
continue;
// Search for virtual calls based on %p and add them to DevirtCalls.
SmallVector<DevirtCallSite, 1> DevirtCalls;
// Find llvm.assume intrinsics for this llvm.bitset.test call.
SmallVector<CallInst *, 1> Assumes;
findDevirtualizableCalls(DevirtCalls, Assumes, CI);
for (const Use &CIU : CI->uses()) {
auto AssumeCI = dyn_cast<CallInst>(CIU.getUser());
if (AssumeCI && AssumeCI->getCalledValue() == AssumeFunc)
Assumes.push_back(AssumeCI);
}
// If we found any, add them to CallSlots. Only do this if we haven't seen
// the vtable pointer before, as it may have been CSE'd with pointers from
// other call sites, and we don't want to process call sites multiple times.
// If we found any, search for virtual calls based on %p and add them to
// CallSlots.
if (!Assumes.empty()) {
Metadata *BitSet =
cast<MetadataAsValue>(CI->getArgOperand(1))->getMetadata();
Value *Ptr = CI->getArgOperand(0)->stripPointerCasts();
if (SeenPtrs.insert(Ptr).second) {
for (DevirtCallSite Call : DevirtCalls)
CallSlots[{BitSet, Call.Offset}].push_back(
{CI->getArgOperand(0), Call.CS});
}
if (SeenPtrs.insert(Ptr).second)
findLoadCallsAtConstantOffset(BitSet, Ptr, 0, CI->getArgOperand(0));
}
// We no longer need the assumes or the bitset test.