[Conversions] Remove this directory

This commit is contained in:
Hanchen Ye 2022-02-22 23:35:32 -06:00
parent 875f04a4c1
commit 0d8e73c09c
15 changed files with 18 additions and 345 deletions

View File

@ -1,3 +1,2 @@
add_subdirectory(Conversion)
add_subdirectory(Dialect)
add_subdirectory(Transforms)

View File

@ -1,3 +0,0 @@
set(LLVM_TARGET_DEFINITIONS Passes.td)
mlir_tablegen(Passes.h.inc -gen-pass-decls)
add_public_tablegen_target(MLIRScaleHLSConversionIncGen)

View File

@ -1,31 +0,0 @@
//===----------------------------------------------------------------------===//
//
// Copyright 2020-2021 The ScaleHLS Authors.
//
//===----------------------------------------------------------------------===//
#ifndef SCALEHLS_CONVERSION_PASSES_H
#define SCALEHLS_CONVERSION_PASSES_H
#include "mlir/Pass/Pass.h"
#include <memory>
namespace mlir {
class Pass;
} // namespace mlir
namespace mlir {
namespace scalehls {
/// Onnx kernel legalization pass.
std::unique_ptr<Pass> createLegalizeOnnxPass();
void registerConversionPasses();
#define GEN_PASS_CLASSES
#include "scalehls/Conversion/Passes.h.inc"
} // namespace scalehls
} // namespace mlir
#endif // SCALEHLS_CONVERSION_PASSES_H

View File

@ -1,26 +0,0 @@
//===----------------------------------------------------------------------===//
//
// Copyright 2020-2021 The ScaleHLS Authors.
//
//===----------------------------------------------------------------------===//
#ifndef SCALEHLS_CONVERSION_PASSES_TD
#define SCALEHLS_CONVERSION_PASSES_TD
include "mlir/Pass/PassBase.td"
//===----------------------------------------------------------------------===//
// Onnx Kernel Legalization Pass
//===----------------------------------------------------------------------===//
def LegalizeOnnx : Pass<"legalize-onnx", "ModuleOp"> {
let summary = "Legalize model lowered from onnx-mlir flow";
let description = [{
This legalize-onnx pass will legalize all operations lowered from onnx-mlir
flow, e.g. krnl.packed_const, krnl.global, and krnl.memcpy.
}];
let constructor = "mlir::scalehls::createLegalizeOnnxPass()";
}
#endif // SCALEHLS_CONVERSION_PASSES_TD

View File

@ -8,7 +8,6 @@
#define SCALEHLS_INITALLPASSES_H
#include "mlir/InitAllPasses.h"
#include "scalehls/Conversion/Passes.h"
#include "scalehls/Transforms/Passes.h"
namespace mlir {
@ -16,10 +15,7 @@ namespace scalehls {
// Add all the ScaleHLS passes.
inline void registerAllPasses() {
scalehls::registerConversionPasses();
scalehls::registerTransformsPasses();
// TODO: only register required passes.
mlir::registerAllPasses();
}

View File

@ -20,7 +20,6 @@ declare_mlir_python_extension(ScaleHLSBindingsPythonExtension.Core
MLIRScaleHLSCAPIEmitHLSCpp
PRIVATE_LINK_LIBS
LLVMSupport
MLIRScaleHLSConversion
MLIRScaleHLSTransforms
)

View File

@ -1,6 +1,5 @@
add_subdirectory(Bindings)
add_subdirectory(CAPI)
add_subdirectory(Conversion)
add_subdirectory(Dialect)
add_subdirectory(Support)
add_subdirectory(Transforms)

View File

@ -1,7 +0,0 @@
add_mlir_library(MLIRScaleHLSConversion
LegalizeOnnx.cpp
Passes.cpp
DEPENDS
MLIRScaleHLSConversionIncGen
)

View File

@ -1,234 +0,0 @@
//===----------------------------------------------------------------------===//
//
// Copyright 2020-2021 The ScaleHLS Authors.
//
//===----------------------------------------------------------------------===//
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
#include "mlir/Dialect/Bufferization/IR/Bufferization.h"
#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "scalehls/Conversion/Passes.h"
using namespace mlir;
using namespace scalehls;
namespace {
struct LegalizeOnnx : public LegalizeOnnxBase<LegalizeOnnx> {
void runOnOperation() override;
};
} // namespace
static AffineMap getIndexMap(int64_t memSize, MemRefType memType,
OpBuilder &builder) {
unsigned accSize = memSize;
SmallVector<AffineExpr, 4> exprs;
for (int64_t i = 0, e = memType.getRank(); i < e; ++i) {
auto dimSize = memType.getShape()[i];
accSize /= dimSize;
if (dimSize == 1)
exprs.push_back(builder.getAffineConstantExpr(0));
else if (accSize == memSize / dimSize)
exprs.push_back(builder.getAffineDimExpr(0).floorDiv(accSize));
else
exprs.push_back(builder.getAffineDimExpr(0).floorDiv(accSize) % dimSize);
}
return AffineMap::get(1, 0, exprs, builder.getContext());
}
void LegalizeOnnx::runOnOperation() {
auto module = getOperation();
auto builder = OpBuilder(module);
StringRef weightFileName = "";
int64_t weightSizeInBytes = 0;
int64_t numInputs = 0;
int64_t numOutputs = 0;
StringRef topFunction = "";
SmallVector<FuncOp, 2> funcs;
SmallVector<Operation *, 16> opsToErase;
for (auto &op : module) {
if (op.getName().getStringRef() == "krnl.packed_const") {
// Fetch weight information and erase packed_const operation.
weightFileName = op.getAttrOfType<StringAttr>("file_name").getValue();
weightSizeInBytes =
op.getAttrOfType<IntegerAttr>("size_in_bytes").getInt();
opsToErase.push_back(&op);
} else if (op.getName().getStringRef() == "krnl.entry_point") {
// Fetch top function information and erase entry_point operation.
topFunction = op.getAttrOfType<FlatSymbolRefAttr>("func").getValue();
numInputs = op.getAttrOfType<IntegerAttr>("numInputs").getInt();
numOutputs = op.getAttrOfType<IntegerAttr>("numOutputs").getInt();
opsToErase.push_back(&op);
} else if (auto func = dyn_cast<FuncOp>(op))
funcs.push_back(func);
}
for (auto func : funcs) {
// // Convert add operations to AffineApply.
// func.walk([&](arith::AddIOp addOp) {
// builder.setInsertionPoint(addOp);
// auto map = AffineMap::get(
// 2, 0, builder.getAffineDimExpr(0) + builder.getAffineDimExpr(1),
// builder.getContext());
// auto newAdd = builder.create<AffineApplyOp>(
// addOp.getLoc(), addOp.getType(), map, addOp.getOperands());
// addOp.getResult().replaceAllUsesWith(newAdd);
// });
// Convert normal load operations to AffineLoad.
func.walk([&](memref::LoadOp loadOp) {
SmallVector<AffineExpr, 4> exprs;
SmallVector<Value, 4> dims;
SmallVector<Value, 4> symbols;
bool isAffineFlag = true;
for (auto index : loadOp.getIndices()) {
// Handle constant defining operation.
if (auto defOp = index.getDefiningOp())
if (auto constOp = dyn_cast<arith::ConstantOp>(defOp))
if (constOp.getType().isa<IndexType>())
if (auto constAttr = constOp.getValue().dyn_cast<IntegerAttr>()) {
exprs.push_back(
builder.getAffineConstantExpr(constAttr.getUInt()));
continue;
}
// Check whether a valid affine index.
if (isValidDim(index)) {
exprs.push_back(builder.getAffineDimExpr(dims.size()));
dims.push_back(index);
continue;
}
if (isValidSymbol(index)) {
exprs.push_back(builder.getAffineSymbolExpr(symbols.size()));
symbols.push_back(index);
continue;
}
// If the index is not a constant or dim or symbol, break.
isAffineFlag = false;
break;
}
if (isAffineFlag) {
builder.setInsertionPoint(loadOp);
auto map = AffineMap::get(dims.size(), symbols.size(), exprs,
builder.getContext());
dims.append(symbols.begin(), symbols.end());
auto newLoad = builder.create<AffineLoadOp>(
loadOp.getLoc(), loadOp.getMemRef(), map, dims);
loadOp.getResult().replaceAllUsesWith(newLoad);
}
});
SmallVector<Type, 16> weightTypes;
SmallVector<int64_t, 16> weightOffsets;
SmallVector<Value, 16> weightValues;
for (auto &op : func.front()) {
if (op.getName().getStringRef() == "krnl.global") {
if (auto value = op.getAttrOfType<DenseFPElementsAttr>("value")) {
// If the kernel global operation gets a value, create a standard
// constant operation to substitute it.
builder.setInsertionPoint(&op);
auto tensor = builder.create<arith::ConstantOp>(op.getLoc(), value);
auto memref = builder.create<bufferization::ToMemrefOp>(
op.getLoc(), op.getResult(0).getType(), tensor);
op.getResult(0).replaceAllUsesWith(memref);
} else {
// If value attribute doesn't exist, record the type and offset.
weightTypes.push_back(op.getResult(0).getType());
weightOffsets.push_back(
op.getAttrOfType<IntegerAttr>("offset").getInt());
weightValues.push_back(op.getResult(0));
}
// Erase the kernel global operation.
opsToErase.push_back(&op);
} else if (op.getName().getStringRef() == "krnl.memcpy") {
// Reshape the source memref.
builder.setInsertionPoint(&op);
auto src = op.getOperand(1);
auto dst = op.getOperand(0);
auto srcType = src.getType().cast<MemRefType>();
auto dstType = dst.getType().cast<MemRefType>();
// Calculate memory size.
int64_t memSize = 1;
for (int64_t i = 0, e = dstType.getRank(); i < e; ++i)
memSize *= dstType.getShape()[i];
// Create affine loops for memory copy.
auto loop = builder.create<AffineForOp>(op.getLoc(), 0, memSize);
auto loopIdv = loop.getInductionVar();
builder.setInsertionPointToStart(loop.getBody());
// Create load and store operations.
auto val = builder.create<AffineLoadOp>(
op.getLoc(), src, getIndexMap(memSize, srcType, builder), loopIdv);
builder.create<AffineStoreOp>(op.getLoc(), val, dst,
getIndexMap(memSize, dstType, builder),
loopIdv);
opsToErase.push_back(&op);
} else if (isa<memref::DeallocOp>(op))
opsToErase.push_back(&op);
}
// Construct new function type.
SmallVector<Type, 16> inputTypes(func.getArgumentTypes().begin(),
func.getArgumentTypes().end());
inputTypes.append(weightTypes.begin(), weightTypes.end());
auto newType =
builder.getFunctionType(inputTypes, func.getType().getResults());
// Record the argument number of the old function.
auto oldArgNum = func.getNumArguments();
// Set function type to newType.
func.setType(newType);
// Add new arguments to the entry block.
auto locs =
SmallVector<Location, 16>(weightTypes.size(), builder.getUnknownLoc());
func.front().addArguments(weightTypes, locs);
// Replace all uses of the kernel global operation with corresponding entry
// block argument.
SmallVector<int64_t, 16> weightIndex;
for (unsigned i = 0, e = weightOffsets.size(); i < e; ++i) {
weightValues[i].replaceAllUsesWith(
func.front().getArgument(i + oldArgNum));
weightIndex.push_back(i + oldArgNum);
}
// Set weight offset and index attribute.
func->setAttr("weight_offsets", builder.getI64ArrayAttr(weightOffsets));
func->setAttr("weight_index", builder.getI64ArrayAttr(weightIndex));
// Set other function attributes if the current function is top function.
if (func.getName() == topFunction) {
func->setAttr("weight_file_name", builder.getStringAttr(weightFileName));
func->setAttr("weight_size_in_bytes",
builder.getI64IntegerAttr(weightSizeInBytes));
func->setAttr("inputs_num", builder.getI64IntegerAttr(numInputs));
func->setAttr("outputs_num", builder.getI64IntegerAttr(numOutputs));
}
}
// Erase all operations marked as erase.
for (auto op : opsToErase)
op->erase();
}
std::unique_ptr<Pass> scalehls::createLegalizeOnnxPass() {
return std::make_unique<LegalizeOnnx>();
}

View File

@ -1,17 +0,0 @@
//===----------------------------------------------------------------------===//
//
// Copyright 2020-2021 The ScaleHLS Authors.
//
//===----------------------------------------------------------------------===//
#include "scalehls/Conversion/Passes.h"
using namespace mlir;
using namespace scalehls;
namespace {
#define GEN_PASS_REGISTRATION
#include "scalehls/Conversion/Passes.h.inc"
} // namespace
void scalehls::registerConversionPasses() { registerPasses(); }

View File

@ -11,7 +11,6 @@
#include "mlir/Dialect/StandardOps/Transforms/Passes.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Transforms/Passes.h"
#include "scalehls/Conversion/Passes.h"
using namespace mlir;
using namespace scalehls;
@ -45,7 +44,6 @@ void scalehls::registerScaleHLSPassPipeline() {
if (opts.frontend == "torch") {
pm.addPass(mlir::createCanonicalizerPass());
} else if (opts.frontend == "onnx") {
pm.addPass(scalehls::createLegalizeOnnxPass());
pm.addPass(mlir::createAffineLoopNormalizePass());
pm.addPass(mlir::createSimplifyAffineStructuresPass());
pm.addPass(mlir::createCanonicalizerPass());

View File

@ -8,7 +8,6 @@
#include "mlir/Dialect/Affine/LoopUtils.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Transforms/Passes.h"
#include "scalehls/Conversion/Passes.h"
#include "scalehls/Transforms/Passes.h"
using namespace mlir;

View File

@ -1,15 +0,0 @@
// RUN: scalehls-opt -legalize-to-hlscpp="top-func=test_assign" %s | FileCheck %s
// CHECK-LABEL: func @test_assign(
// CHECK-SAME: %arg0: f32, %arg1: memref<16xf32, 1>) -> (f32, memref<16xf32, 1>, i32, memref<2x2xi32, 1>)
func @test_assign(%arg0: f32, %arg1: memref<16xf32, 1>) -> (f32, memref<16xf32, 1>, i32, memref<2x2xi32, 1>) {
%c11_i32 = arith.constant 11 : i32
%cst = arith.constant dense<[[11, 0], [0, -42]]> : tensor<2x2xi32>
%cst_memref = bufferization.to_memref %cst : memref<2x2xi32, 1>
// CHECK: %1 = "hlscpp.assign"(%arg0) : (f32) -> f32
// CHECK: %2 = "hlscpp.assign"(%arg1) : (memref<16xf32, 1>) -> memref<16xf32, 1>
// CHECK: %3 = "hlscpp.assign"(%c11_i32) : (i32) -> i32
// CHECK: return %1, %2, %3, %0 : f32, memref<16xf32, 1>, i32, memref<2x2xi32, 1>
return %arg0, %arg1, %c11_i32, %cst_memref : f32, memref<16xf32, 1>, i32, memref<2x2xi32, 1>
}

View File

@ -1,4 +1,4 @@
// RUN: scalehls-opt -legalize-to-hlscpp="top-func=test_syrk" %s | FileCheck %s
// RUN: scalehls-opt -legalize-to-hlscpp="top-func=test_syrk" -split-input-file %s | FileCheck %s
#map0 = affine_map<(d0) -> (d0)>
#map1 = affine_map<(d0) -> (d0 + 2)>
@ -43,3 +43,20 @@ module {
return
}
}
// -----
// CHECK-LABEL: func @test_assign(
// CHECK-SAME: %arg0: f32, %arg1: memref<16xf32, 1>) -> (f32, memref<16xf32, 1>, i32, memref<2x2xi32, 1>)
func @test_assign(%arg0: f32, %arg1: memref<16xf32, 1>) -> (f32, memref<16xf32, 1>, i32, memref<2x2xi32, 1>) {
%c11_i32 = arith.constant 11 : i32
%cst = arith.constant dense<[[11, 0], [0, -42]]> : tensor<2x2xi32>
%cst_memref = bufferization.to_memref %cst : memref<2x2xi32, 1>
// CHECK: %1 = "hlscpp.assign"(%arg0) : (f32) -> f32
// CHECK: %2 = "hlscpp.assign"(%arg1) : (memref<16xf32, 1>) -> memref<16xf32, 1>
// CHECK: %3 = "hlscpp.assign"(%c11_i32) : (i32) -> i32
// CHECK: return %1, %2, %3, %0 : f32, memref<16xf32, 1>, i32, memref<2x2xi32, 1>
return %arg0, %arg1, %c11_i32, %cst_memref : f32, memref<16xf32, 1>, i32, memref<2x2xi32, 1>
}

View File

@ -15,7 +15,6 @@ target_link_libraries(scalehls-opt
MLIRHLSCpp
MLIRScaleHLSSupport
MLIRScaleHLSConversion
MLIRScaleHLSTransforms
# Threads::Threads