[LegalizeDataflow] update impl structure and fix bugs, support resnet18 legalization; [EmitHLSCpp] emit bool rather than ap_int<1>

This commit is contained in:
Hanchen Ye 2020-12-25 00:04:10 -06:00
parent fef0cdc3fe
commit 9652bee260
12 changed files with 87 additions and 3150 deletions

1
.gitignore vendored
View File

@ -2,6 +2,7 @@
.ipynb_checkpoints
build
tmp
samples/hls_proj
samples/cpp_src
samples/test_results

View File

@ -116,17 +116,12 @@ hlscpp::ArrayOp getArrayOp(Value memref);
hlscpp::ArrayOp getArrayOp(Operation *op);
// For storing all accessed memrefs indexed by an operation (e.g. AffineForOp).
using MemRefs = SmallVector<Value, 4>;
using MemRefsMap = DenseMap<Operation *, MemRefs>;
// For storing the intermediate memory and successor loops indexed by the
// predecessor loop.
using Successors = SmallVector<std::pair<Value, Operation *>, 2>;
using SuccessorsMap = DenseMap<Operation *, Successors>;
/// With the generated MemRefsMap, given a specific loop, we can easily find all
/// memories which are consumed by the loop.
void getLoopLoadMemsMap(Block &block, MemRefsMap &map);
/// With the generated MemAccessesMap, given a specific memory, we can easily
/// find the loops which produce data to the memory.
void getLoopMemStoresMap(Block &block, MemAccessesMap &map);
void getSuccessorsMap(Block &block, SuccessorsMap &map);
} // namespace scalehls
} // namespace mlir

View File

@ -4,6 +4,7 @@
#include "Analysis/Utils.h"
#include "mlir/Analysis/AffineAnalysis.h"
#include "llvm/ADT/SmallPtrSet.h"
using namespace mlir;
using namespace scalehls;
@ -140,41 +141,34 @@ hlscpp::ArrayOp scalehls::getArrayOp(Operation *op) {
return getArrayOp(MemRefAccess(op).memref);
}
/// With the generated MemRefsMap, given a specific loop, we can easily find all
/// memories which are consumed by the loop.
void scalehls::getLoopLoadMemsMap(Block &block, MemRefsMap &map) {
for (auto loop : block.getOps<AffineForOp>()) {
loop.walk([&](Operation *op) {
if (auto affineLoad = dyn_cast<AffineLoadOp>(op)) {
auto &mems = map[loop];
if (std::find(mems.begin(), mems.end(), affineLoad.getMemRef()) ==
mems.end())
mems.push_back(affineLoad.getMemRef());
void scalehls::getSuccessorsMap(Block &block, SuccessorsMap &map) {
DenseMap<Operation *, SmallPtrSet<Value, 2>> memsMap;
DenseMap<Value, SmallPtrSet<Operation *, 2>> loopsMap;
} else if (auto load = dyn_cast<LoadOp>(op)) {
auto &mems = map[loop];
if (std::find(mems.begin(), mems.end(), load.getMemRef()) == mems.end())
mems.push_back(load.getMemRef());
}
});
}
}
/// With the generated MemAccessesMap, given a specific memory, we can easily
/// find the loops which produce data to the memory.
void scalehls::getLoopMemStoresMap(Block &block, MemAccessesMap &map) {
for (auto loop : block.getOps<AffineForOp>()) {
for (auto loop : block.getOps<AffineForOp>())
loop.walk([&](Operation *op) {
if (auto affineStore = dyn_cast<AffineStoreOp>(op)) {
auto &loops = map[affineStore.getMemRef()];
if (std::find(loops.begin(), loops.end(), loop) == loops.end())
loops.push_back(loop);
memsMap[loop].insert(affineStore.getMemRef());
} else if (auto store = dyn_cast<StoreOp>(op)) {
auto &loops = map[store.getMemRef()];
if (std::find(loops.begin(), loops.end(), loop) == loops.end())
loops.push_back(loop);
memsMap[loop].insert(store.getMemRef());
} else if (auto affineLoad = dyn_cast<AffineLoadOp>(op)) {
loopsMap[affineLoad.getMemRef()].insert(loop);
} else if (auto load = dyn_cast<LoadOp>(op)) {
loopsMap[load.getMemRef()].insert(loop);
}
});
}
for (auto loop : block.getOps<AffineForOp>())
for (auto mem : memsMap[loop])
for (auto successor : loopsMap[mem]) {
// If the successor loop not only loads from the memory, but also store
// to the memory, it will not be considered as a legal successor.
if (successor == loop || memsMap[successor].count(mem))
continue;
map[loop].push_back(std::pair<Value, Operation *>(mem, successor));
}
}

View File

@ -1347,10 +1347,14 @@ void ModuleEmitter::emitValue(Value val, unsigned rank, bool isPtr) {
else if (valType.isa<IndexType>())
os << "int ";
else if (auto intType = valType.dyn_cast<IntegerType>()) {
os << "ap_";
if (intType.getSignedness() == IntegerType::SignednessSemantics::Unsigned)
os << "u";
os << "int<" << intType.getWidth() << "> ";
if (intType.getWidth() == 1)
os << "bool ";
else {
os << "ap_";
if (intType.getSignedness() == IntegerType::SignednessSemantics::Unsigned)
os << "u";
os << "int<" << intType.getWidth() << "> ";
}
} else
emitError(val.getDefiningOp(), "has unsupported type.");

View File

@ -41,7 +41,7 @@ void LegalizeDataflow::runOnOperation() {
dataflowLevel = max(dataflowLevel, attr.getInt());
else
op->emitError(
"HLSKernelOp has unexpected predecessor, legalization failed");
"HLSKernelOp has unexpected successor, legalization failed");
}
}
@ -86,69 +86,59 @@ void LegalizeDataflow::runOnOperation() {
// this point. Therefore, HLSKernel ops and loops will never have dependencies
// with each other in this pass.
// TODO: analyze live ins.
MemRefsMap loadMemsMap;
MemAccessesMap memStoresMap;
getLoopLoadMemsMap(func.front(), loadMemsMap);
getLoopMemStoresMap(func.front(), memStoresMap);
SuccessorsMap successorsMap;
getSuccessorsMap(func.front(), successorsMap);
for (auto loop : func.front().getOps<mlir::AffineForOp>()) {
int64_t dataflowLevel = 0;
for (auto mem : loadMemsMap[loop]) {
for (auto predLoop : memStoresMap[mem]) {
if (predLoop == loop)
continue;
for (auto it = func.front().rbegin(); it != func.front().rend(); ++it) {
if (auto loop = dyn_cast<mlir::AffineForOp>(*it)) {
int64_t dataflowLevel = 0;
// Establish an ASAP dataflow schedule.
if (auto attr = predLoop->getAttrOfType<IntegerAttr>("dataflow_level"))
// Walk through all successor loops.
for (auto pair : successorsMap[loop]) {
auto successor = pair.second;
if (auto attr = successor->getAttrOfType<IntegerAttr>("dataflow_level"))
dataflowLevel = max(dataflowLevel, attr.getInt());
else
loop.emitError(
"loop has unexpected predecessor, legalization failed");
else {
loop.emitError("loop has unexpected successor, legalization failed");
return;
}
}
}
// Set an attribute for indicating the scheduled dataflow level.
loop.setAttr("dataflow_level", builder.getIntegerAttr(builder.getI64Type(),
dataflowLevel + 1));
// Set an attribute for indicating the scheduled dataflow level.
loop.setAttr(
"dataflow_level",
builder.getIntegerAttr(builder.getI64Type(), dataflowLevel + 1));
// Eliminate bypass paths.
for (auto mem : loadMemsMap[loop]) {
for (auto predLoop : memStoresMap[mem]) {
if (predLoop == loop)
continue;
// Eliminate bypass paths.
for (auto pair : successorsMap[loop]) {
auto mem = pair.first;
auto successor = pair.second;
auto successorDataflowLevel =
successor->getAttrOfType<IntegerAttr>("dataflow_level").getInt();
auto predDataflowLevel =
predLoop->getAttrOfType<IntegerAttr>("dataflow_level").getInt();
// Insert CopyOps if required.
SmallVector<Value, 4> mems;
mems.push_back(mem);
builder.setInsertionPoint(successor);
// Insert dummy CopyOps if required.
SmallVector<Operation *, 4> dummyOps;
dummyOps.push_back(loop);
for (auto i = dataflowLevel; i > predDataflowLevel; --i) {
for (auto i = dataflowLevel; i > successorDataflowLevel; --i) {
// Create CopyOp.
builder.setInsertionPoint(dummyOps.back());
auto interMem = builder.create<mlir::AllocOp>(
auto newMem = builder.create<mlir::AllocOp>(
loop.getLoc(), mem.getType().cast<MemRefType>());
auto dummyOp =
builder.create<linalg::CopyOp>(loop.getLoc(), mem, interMem);
dummyOp.setAttr("dataflow_level",
builder.getIntegerAttr(builder.getI64Type(), i));
auto copyOp = builder.create<linalg::CopyOp>(loop.getLoc(),
mems.back(), newMem);
// Set CopyOp dataflow level.
copyOp.setAttr("dataflow_level",
builder.getIntegerAttr(builder.getI64Type(), i));
// Chain created CopyOps.
if (i == dataflowLevel) {
loop.walk([&](Operation *op) {
if (auto affineLoad = dyn_cast<mlir::AffineLoadOp>(op)) {
if (affineLoad.getMemRef() == mem)
affineLoad.setMemRef(interMem);
} else if (auto load = dyn_cast<mlir::LoadOp>(op)) {
if (load.getMemRef() == mem)
load.setMemRef(interMem);
}
if (i == successorDataflowLevel + 1)
mem.replaceUsesWithIf(newMem, [&](mlir::OpOperand &use) {
return successor->isProperAncestor(use.getOwner());
});
} else
dummyOps.back()->setOperand(0, interMem);
dummyOps.push_back(dummyOp);
else
mems.push_back(newMem);
}
}
}

View File

@ -107,7 +107,7 @@ void SplitFunction::runOnOperation() {
for (unsigned i = 0, e = inputValues.size(); i < e; ++i)
inputValues[i].replaceUsesWithIf(
entry->getArgument(i), [&](mlir::OpOperand &use) {
return getSameLevelDstOp(returnOp, use.getOwner());
return func.getOperation()->isProperAncestor(use.getOwner());
});
opIndex += 1;
}

View File

@ -2,7 +2,7 @@
func @test_integer_compare(%arg0: i32, %arg1: i32) -> i1 {
// CHECK: ap_int<1> [[VAL_0:.*]] = [[ARG_0:.*]] == [[ARG_1:.*]];
// CHECK: bool [[VAL_0:.*]] = [[ARG_0:.*]] == [[ARG_1:.*]];
%0 = cmpi "eq", %arg0, %arg1 : i32
// CHECK: !=
%1 = cmpi "ne", %arg0, %arg1 : i32
@ -27,7 +27,7 @@ func @test_integer_compare(%arg0: i32, %arg1: i32) -> i1 {
func @test_float_compare(%arg0: f32, %arg1: f32) -> i1 {
// CHECK: ap_int<1> [[VAL_0:.*]] = [[ARG_0:.*]] == [[ARG_1:.*]];
// CHECK: bool [[VAL_0:.*]] = [[ARG_0:.*]] == [[ARG_1:.*]];
%0 = cmpf "oeq", %arg0, %arg1 : f32
// CHECK: ==
%1 = cmpf "ueq", %arg0, %arg1 : f32

View File

@ -8,7 +8,7 @@ func @test_constant(%arg0: i32) -> (i32, tensor<2x2xi32>, vector<2xi32>, i32) {
// CHECK: float [[VAL_1:.*]][2][2] = {1.100000e+01, 0.000000e+00, 0.000000e+00, -4.200000e+01};
%1 = constant dense<[[11.0, 0.0], [0.0, -42.0]]> : tensor<2x2xf32>
// CHECK: ap_int<1> [[VAL_2:.*]][2][2] = {1, 0, 0, 1};
// CHECK: bool [[VAL_2:.*]][2][2] = {1, 0, 0, 1};
%2 = constant dense<[[1, 0], [0, 1]]> : tensor<2x2xi1>
// CHECK: ap_int<32> [[VAL_3:.*]][2] = {0, -42};
@ -17,7 +17,7 @@ func @test_constant(%arg0: i32) -> (i32, tensor<2x2xi32>, vector<2xi32>, i32) {
// CHECK: float [[VAL_4:.*]][2] = {0.000000e+00, -4.200000e+01};
%4 = constant dense<[0.0, -42.0]> : vector<2xf32>
// CHECK: ap_int<1> [[VAL_5:.*]][2] = {0, 1};
// CHECK: bool [[VAL_5:.*]][2] = {0, 1};
%5 = constant dense<[0, 1]> : vector<2xi1>
// CHECK: *[[ARG_1:.*]] = 11 + [[ARG_0:.*]];

View File

@ -5,7 +5,7 @@ func @test_scf_if(%arg0: index, %arg1: memref<16xindex>) {
%c0 = constant 0 : index
// CHECK: int val2 = val0 + 11;
// CHECK: ap_int<1> val3 = val2 > 0;
// CHECK: bool val3 = val2 > 0;
// CHECK: int val4;
// CHECK: int val5[16];
// CHECK: if (val3) {

View File

@ -4,7 +4,7 @@
// CHECK: float [[VAL_0:.*]],
// CHECK: double [[VAL_1:.*]],
// CHECK: int [[VAL_2:.*]],
// CHECK: ap_int<1> [[VAL_3:.*]],
// CHECK: bool [[VAL_3:.*]],
// CHECK: ap_int<11> [[VAL_4:.*]],
// CHECK: ap_int<32> [[VAL_5:.*]],
// CHECK: ap_uint<32> [[VAL_6:.*]],
@ -17,7 +17,7 @@
// CHECK: float *[[VAL_13:.*]],
// CHECK: double *[[VAL_14:.*]],
// CHECK: int *[[VAL_15:.*]],
// CHECK: ap_int<1> *[[VAL_16:.*]],
// CHECK: bool *[[VAL_16:.*]],
// CHECK: ap_int<11> *[[VAL_17:.*]],
// CHECK: ap_int<32> *[[VAL_18:.*]],
// CHECK: ap_uint<32> *[[VAL_19:.*]],

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff