[HLSCpp] remove all uses of ArrayOp, now all of the array-related pragmas are represented with MemRefType, fixed related bugs

This commit is contained in:
Hanchen Ye 2021-01-03 22:49:34 -06:00
parent f052331cbc
commit cf581f2694
12 changed files with 226 additions and 260 deletions

View File

@ -18,22 +18,6 @@ namespace scalehls {
class HLSCppAnalysisBase {
public:
explicit HLSCppAnalysisBase(OpBuilder builder) : builder(builder) {}
/// Get partition information methods.
StringRef getPartitionType(hlscpp::ArrayOp op, unsigned dim) {
if (auto attr = op.partition_type()[dim].cast<StringAttr>())
return attr.getValue();
else
return StringRef();
}
int64_t getPartitionFactor(hlscpp::ArrayOp op, unsigned dim) {
if (auto attr = op.partition_factor()[dim].cast<IntegerAttr>())
return attr.getInt();
else
return 0;
}
/// Get attribute value methods.
int64_t getIntAttrValue(Operation *op, StringRef name) {
if (auto attr = op->getAttrOfType<IntegerAttr>(name))
@ -101,6 +85,8 @@ Operation *getSameLevelDstOp(Operation *srcOp, Operation *dstOp);
AffineMap getLayoutMap(MemRefType memrefType);
// Collect partition factors and overall partition number through analysis the
// layout map of a MemRefType.
int64_t getPartitionFactors(MemRefType memrefType,
SmallVector<int64_t, 4> *factors = nullptr);

View File

@ -16,6 +16,18 @@ namespace hlscpp {
#include "Dialect/HLSCpp/HLSCppInterfaces.h.inc"
enum class MemoryKind {
BRAM_1P = 0,
BRAM_S2P = 1,
BRAM_T2P = 2,
// URAM_1P = 3,
// URAM_S2P = 4,
// URAM_T2P = 5,
DRAM = 3,
};
} // namespace hlscpp
} // namespace scalehls
} // namespace mlir

View File

@ -7,6 +7,7 @@
include "mlir/IR/OpBase.td"
// Deprecated. Will be removed in the future.
def PragmaOpInterface : OpInterface<"PragmaOpInterface"> {
let description = [{
This interface indicates the operation represents one pragma directive or a

View File

@ -18,8 +18,7 @@ def AssignOp : HLSCppOp<"assign", [SameOperandsAndResultType]> {
let results = (outs AnyType : $output);
}
// Outdated. This is a temporary approach, which will be substituted by a type
// or attribute based approach.
// Deprecated. Will be removed in the future.
def ArrayOp : HLSCppOp<"array", [SameOperandsAndResultType]> {
let summary = "A C++ array instance";
let description = [{

View File

@ -55,7 +55,7 @@ public:
SignExtendIOp, IndexCastOp, CallOp, ReturnOp, UIToFPOp, SIToFPOp,
FPToSIOp, FPToUIOp,
// Structure operations.
AssignOp, ArrayOp, EndOp>([&](auto opNode) -> ResultType {
AssignOp, EndOp>([&](auto opNode) -> ResultType {
return thisCast->visitOp(opNode, args...);
})
.Default([&](auto opNode) -> ResultType {
@ -190,7 +190,6 @@ public:
// Structure operations.
HANDLE(AssignOp);
HANDLE(ArrayOp);
HANDLE(EndOp);
#undef HANDLE
};

View File

@ -169,6 +169,8 @@ AffineMap scalehls::getLayoutMap(MemRefType memrefType) {
return memrefMaps.back();
}
// Collect partition factors and overall partition number through analysis the
// layout map of a MemRefType.
int64_t scalehls::getPartitionFactors(MemRefType memrefType,
SmallVector<int64_t, 4> *factors) {
auto shape = memrefType.getShape();

View File

@ -5,6 +5,7 @@
#include "Conversion/Passes.h"
#include "Dialect/HLSCpp/HLSCpp.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h"
#include "llvm/ADT/SmallPtrSet.h"
using namespace mlir;
using namespace scalehls;
@ -33,84 +34,16 @@ void LegalizeToHLSCpp::runOnOperation() {
else
func.setAttr("top_function", builder.getBoolAttr(false));
// Insert AssignOp when an arguments or result of ConstantOp are directly
// connected to ReturnOp.
if (auto returnOp = dyn_cast<ReturnOp>(func.front().getTerminator())) {
builder.setInsertionPoint(returnOp);
unsigned idx = 0;
for (auto operand : returnOp.getOperands()) {
if (operand.getKind() == Value::Kind::BlockArgument) {
auto value = builder.create<AssignOp>(returnOp.getLoc(),
operand.getType(), operand);
returnOp.setOperand(idx, value);
} else if (isa<ConstantOp>(operand.getDefiningOp())) {
auto value = builder.create<AssignOp>(returnOp.getLoc(),
operand.getType(), operand);
returnOp.setOperand(idx, value);
}
idx += 1;
}
} else
func.emitError("doesn't have a return as terminator.");
SmallPtrSet<Value, 16> memrefs;
// Recursively convert every for loop body blocks.
// Walk through all operations in the function.
func.walk([&](Operation *op) {
// ArrayOp will be inserted after each MemRefType value from declaration
// or function signature.
for (auto operand : op->getOperands()) {
if (auto arrayType = operand.getType().dyn_cast<MemRefType>()) {
bool insertArrayOp = false;
if (operand.getKind() == Value::Kind::BlockArgument)
insertArrayOp = true;
else if (!isa<ArrayOp>(operand.getDefiningOp()) &&
!isa<AssignOp>(operand.getDefiningOp())) {
insertArrayOp = true;
if (!arrayType.hasStaticShape())
operand.getDefiningOp()->emitError(
"is unranked or has dynamic shape which is illegal.");
}
if (isa<ArrayOp>(op))
insertArrayOp = false;
if (insertArrayOp) {
// Insert array operation and set attributes.
builder.setInsertionPointAfterValue(operand);
auto arrayOp =
builder.create<ArrayOp>(op->getLoc(), operand.getType(), operand);
operand.replaceAllUsesExcept(arrayOp.getResult(),
SmallPtrSet<Operation *, 1>{arrayOp});
// Set array pragma attributes.
// TODO: A known bug is if ArrayOp is connected to ReturnOp through
// an AssignOp, it will always not be annotated as interface. This
// is acceptable because AssignOp is only used to handle some weird
// corner cases that rarely happen.
if (!arrayOp.interface() && func.getName() == topFunction) {
// Only if when the array is an block arguments or a returned
// value, it will be annotated as interface.
bool interfaceFlag =
operand.getKind() == Value::Kind::BlockArgument;
for (auto user : arrayOp.getResult().getUsers())
if (isa<mlir::ReturnOp>(user))
interfaceFlag = true;
arrayOp.setAttr("interface", builder.getBoolAttr(interfaceFlag));
} else
arrayOp.setAttr("interface", builder.getBoolAttr(false));
if (!arrayOp.storage()) {
arrayOp.setAttr("storage", builder.getBoolAttr(true));
arrayOp.setAttr("storage_type",
builder.getStringAttr("ram_1p_bram"));
}
if (!arrayOp.partition())
arrayOp.setAttr("partition", builder.getBoolAttr(false));
}
}
}
// Collect all memrefs.
for (auto operand : op->getOperands())
if (operand.getType().isa<MemRefType>())
memrefs.insert(operand);
// Set loop pragma attributes.
if (auto forOp = dyn_cast<AffineForOp>(op)) {
if (forOp.getLoopBody().getBlocks().size() != 1)
forOp.emitError("has zero or more than one basic blocks");
@ -119,13 +52,47 @@ void LegalizeToHLSCpp::runOnOperation() {
if (!forOp.getAttr("pipeline"))
forOp.setAttr("pipeline", builder.getBoolAttr(false));
// if (!forOp.getAttr("unroll"))
// forOp.setAttr("unroll", builder.getBoolAttr(false));
if (!forOp.getAttr("flatten"))
forOp.setAttr("flatten", builder.getBoolAttr(false));
}
});
// Set array pragma attributes.
for (auto memref : memrefs) {
auto type = memref.getType().cast<MemRefType>();
if (type.getMemorySpace() == 0) {
// TODO: determine memory kind according to data type.
MemoryKind kind = MemoryKind::BRAM_S2P;
auto newType = MemRefType::get(type.getShape(), type.getElementType(),
type.getAffineMaps(), (unsigned)kind);
memref.setType(newType);
}
}
// Align function type with entry block argument types.
auto resultTypes = func.front().getTerminator()->getOperandTypes();
auto inputTypes = func.front().getArgumentTypes();
func.setType(builder.getFunctionType(inputTypes, resultTypes));
// Insert AssignOp when an arguments or result of ConstantOp are directly
// connected to ReturnOp.
auto returnOp = func.front().getTerminator();
builder.setInsertionPoint(returnOp);
unsigned idx = 0;
for (auto operand : returnOp->getOperands()) {
if (operand.getKind() == Value::Kind::BlockArgument) {
auto value = builder.create<AssignOp>(returnOp->getLoc(),
operand.getType(), operand);
returnOp->setOperand(idx, value);
} else if (isa<ConstantOp>(operand.getDefiningOp())) {
auto value = builder.create<AssignOp>(returnOp->getLoc(),
operand.getType(), operand);
returnOp->setOperand(idx, value);
}
idx++;
}
}
std::unique_ptr<mlir::Pass> scalehls::createLegalizeToHLSCppPass() {

View File

@ -3,6 +3,7 @@
//===----------------------------------------------------------------------===//
#include "EmitHLSCpp.h"
#include "Analysis/Utils.h"
#include "Dialect/HLSCpp/Visitor.h"
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/Affine/IR/AffineValueMap.h"
@ -11,6 +12,7 @@
#include "mlir/IR/AffineExprVisitor.h"
#include "mlir/IR/IntegerSet.h"
#include "mlir/Translation.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/raw_ostream.h"
using namespace std;
@ -98,18 +100,6 @@ SmallString<8> HLSCppEmitterBase::addName(Value val, bool isPtr) {
valName += StringRef("val" + to_string(state.nameTable.size()));
state.nameTable[val] = valName;
// If the definition operation is ArrayOp, then the ArrayOp operand should be
// an alias of the current value.
if (auto defOp = val.getDefiningOp())
if (auto arrayOp = dyn_cast<ArrayOp>(defOp))
state.nameTable[arrayOp.getOperand()] = valName;
// If the first user is ArrayOp, then the ArrayOp result should be an alias of
// the current value.
if (!val.getUsers().empty())
if (auto arrayOp = dyn_cast<ArrayOp>(*val.getUsers().begin()))
return state.nameTable[arrayOp.getResult()] = valName;
return valName;
}
@ -120,18 +110,6 @@ SmallString<8> HLSCppEmitterBase::addAlias(Value val, Value alias) {
auto valName = getName(val);
state.nameTable[alias] = valName;
// If the definition operation is ArrayOp, then the ArrayOp operand should be
// an alias of the current value.
if (auto defOp = alias.getDefiningOp())
if (auto arrayOp = dyn_cast<ArrayOp>(defOp))
state.nameTable[arrayOp.getOperand()] = valName;
// If the first user is ArrayOp, then the ArrayOp result should be an alias of
// the current value.
if (!alias.getUsers().empty())
if (auto arrayOp = dyn_cast<ArrayOp>(*alias.getUsers().begin()))
return state.nameTable[arrayOp.getResult()] = valName;
return valName;
}
@ -212,7 +190,6 @@ public:
/// Structure operations emitters.
void emitAssign(AssignOp *op);
void emitArray(ArrayOp *op);
/// Top-level MLIR module emitter.
void emitModule(ModuleOp module);
@ -225,9 +202,11 @@ private:
void emitNestedLoopTail(unsigned rank);
void emitInfoAndNewLine(Operation *op);
/// MLIR component emitters.
/// MLIR component and HLS C++ pragma emitters.
void emitOperation(Operation *op);
void emitBlock(Block &block);
void emitArrayPragmas(Value memref);
void emitFunctionPragmas(FuncOp func, ArrayRef<Value> portList);
void emitFunction(FuncOp func);
};
} // namespace
@ -497,7 +476,6 @@ public:
/// Structure operations.
bool visitOp(AssignOp op) { return emitter.emitAssign(&op), true; }
bool visitOp(ArrayOp op) { return emitter.emitArray(&op), true; }
bool visitOp(EndOp op) { return true; }
private:
@ -551,13 +529,6 @@ void ModuleEmitter::emitScfFor(scf::ForOp *op) {
// os << "#pragma HLS loop_flatten off\n";
//}
if (auto unroll = op->getAttrOfType<BoolAttr>("unroll")) {
if (unroll.getValue()) {
indent();
os << "#pragma HLS unroll\n";
}
}
emitBlock(op->getLoopBody().front());
reduceIndent();
@ -690,13 +661,6 @@ void ModuleEmitter::emitAffineFor(AffineForOp *op) {
// os << "#pragma HLS loop_flatten off\n";
//}
if (auto unroll = op->getAttrOfType<BoolAttr>("unroll")) {
if (unroll.getValue()) {
indent();
os << "#pragma HLS unroll\n";
}
}
emitBlock(op->getLoopBody().front());
reduceIndent();
@ -1007,6 +971,7 @@ template <typename OpType> void ModuleEmitter::emitAlloc(OpType *op) {
emitArrayDecl(op->getResult());
os << ";";
emitInfoAndNewLine(*op);
emitArrayPragmas(op->getResult());
}
void ModuleEmitter::emitLoad(LoadOp *op) {
@ -1072,8 +1037,10 @@ void ModuleEmitter::emitTensorToMemref(TensorToMemrefOp *op) {
os << ";";
emitInfoAndNewLine(*op);
emitNestedLoopTail(rank);
} else
} else {
addAlias(op->getOperand(), op->getResult());
emitArrayPragmas(op->getResult());
}
}
void ModuleEmitter::emitDim(DimOp *op) {
@ -1262,62 +1229,6 @@ void ModuleEmitter::emitAssign(AssignOp *op) {
emitNestedLoopTail(rank);
}
void ModuleEmitter::emitArray(ArrayOp *op) {
bool emitPragmaFlag = false;
// Emit interface pragma.
if (op->interface()) {
emitPragmaFlag = true;
indent();
os << "#pragma HLS interface";
os << " " << op->interface_mode();
os << " port=";
emitValue(op->getOperand());
if (op->interface_mode() == "m_axi")
os << " offset=slave";
os << "\n";
}
// Emit resource pragma.
if (op->storage()) {
emitPragmaFlag = true;
indent();
os << "#pragma HLS resource";
os << " variable=";
emitValue(op->getOperand());
os << " core=";
os << op->storage_type();
os << "\n";
}
auto type = op->getOperand().getType().cast<ShapedType>();
if (op->partition() && type.hasStaticShape()) {
// Emit array_partition pragma(s).
for (unsigned dim = 0; dim < type.getRank(); ++dim) {
auto partitionType =
op->partition_type()[dim].cast<StringAttr>().getValue();
if (partitionType != "none") {
emitPragmaFlag = true;
indent();
os << "#pragma HLS array_partition";
os << " variable=";
emitValue(op->getOperand());
os << " " << partitionType;
if (partitionType != "complete")
os << " factor="
<< op->partition_factor()[dim].cast<IntegerAttr>().getInt();
os << " dim=" << dim + 1 << "\n";
}
}
}
// Emit an empty line.
if (emitPragmaFlag)
os << "\n";
}
/// C++ component emitters.
void ModuleEmitter::emitValue(Value val, unsigned rank, bool isPtr) {
assert(!(rank && isPtr) && "should be either an array or a pointer.");
@ -1436,7 +1347,7 @@ void ModuleEmitter::emitInfoAndNewLine(Operation *op) {
os << "\n";
}
/// MLIR component emitters.
/// MLIR component and HLS C++ pragma emitters.
void ModuleEmitter::emitOperation(Operation *op) {
if (ExprVisitor(*this).dispatchVisitor(op))
return;
@ -1455,6 +1366,127 @@ void ModuleEmitter::emitBlock(Block &block) {
emitOperation(&op);
}
void ModuleEmitter::emitArrayPragmas(Value memref) {
bool emitPragmaFlag = false;
auto type = memref.getType().cast<MemRefType>();
// Emit resource pragma.
auto kind = MemoryKind(type.getMemorySpace());
if (kind != MemoryKind::DRAM) {
emitPragmaFlag = true;
indent();
os << "#pragma HLS resource";
os << " variable=";
emitValue(memref);
os << " core=";
switch (kind) {
case MemoryKind::BRAM_1P:
os << "ram_1p_bram";
break;
case MemoryKind::BRAM_S2P:
os << "ram_s2p_bram";
break;
case MemoryKind::BRAM_T2P:
os << "ram_t2p_bram";
break;
default:
os << "ram_1p_bram";
break;
}
os << "\n";
}
if (auto layoutMap = getLayoutMap(type)) {
// Emit array_partition pragma(s).
SmallVector<int64_t, 4> factors;
getPartitionFactors(type, &factors);
for (unsigned dim = 0; dim < type.getRank(); ++dim) {
if (factors[dim] != 1) {
emitPragmaFlag = true;
indent();
os << "#pragma HLS array_partition";
os << " variable=";
emitValue(memref);
// Emit partition type.
if (layoutMap.getResult(dim).getKind() == AffineExprKind::FloorDiv)
os << " block";
else
os << " cyclic";
os << " factor=" << factors[dim];
os << " dim=" << dim + 1 << "\n";
}
}
}
// Emit an empty line.
if (emitPragmaFlag)
os << "\n";
}
void ModuleEmitter::emitFunctionPragmas(FuncOp func, ArrayRef<Value> portList) {
if (auto dataflow = func.getAttrOfType<BoolAttr>("dataflow")) {
if (dataflow.getValue()) {
indent();
os << "#pragma HLS dataflow\n";
// An empty line.
os << "\n";
}
}
// Only top function should emit interface pragmas.
if (auto topFunction = func.getAttrOfType<BoolAttr>("top_function")) {
if (topFunction.getValue()) {
indent();
os << "#pragma HLS interface s_axilite port=return bundle=ctrl\n";
for (auto &port : portList) {
// Array ports and scalar ports are handled separately. Here, we only
// handle MemRef types since we assume the IR has be fully bufferized.
if (auto memrefType = port.getType().dyn_cast<MemRefType>()) {
indent();
os << "#pragma HLS interface";
// For now, we set the offset of all m_axi interfaces as slave.
if (MemoryKind(memrefType.getMemorySpace()) == MemoryKind::DRAM)
os << " m_axi offset=slave";
else
os << " bram";
os << " port=";
emitValue(port);
os << "\n";
} else {
indent();
os << "#pragma HLS interface s_axilite";
os << " port=";
// TODO: This is a temporary solution.
auto name = getName(port);
if (name.front() == "*"[0])
name.erase(name.begin());
os << name;
os << " bundle=ctrl\n";
}
}
// An empty line.
os << "\n";
}
// Emit other pragmas for function ports.
for (auto &port : portList)
if (port.getType().isa<MemRefType>())
emitArrayPragmas(port);
}
}
void ModuleEmitter::emitFunction(FuncOp func) {
if (func.getBlocks().size() != 1)
emitError(func, "has zero or more than one basic blocks.");
@ -1473,25 +1505,24 @@ void ModuleEmitter::emitFunction(FuncOp func) {
os << "void " << func.getName() << "(\n";
addIndent();
// This vector is to record all scalar ports.
// This vector is to record all ports of the function.
SmallVector<Value, 8> portList;
// Handle input arguments.
// Emit input arguments.
unsigned argIdx = 0;
for (auto &arg : func.getArguments()) {
indent();
if (arg.getType().isa<ShapedType>())
emitArrayDecl(arg);
else {
else
emitValue(arg);
portList.push_back(arg);
}
portList.push_back(arg);
if (argIdx++ != func.getNumArguments() - 1)
os << ",\n";
}
// Handle results.
// Emit results.
if (auto funcReturn = dyn_cast<ReturnOp>(func.front().getTerminator())) {
for (auto result : funcReturn.getOperands()) {
os << ",\n";
@ -1500,12 +1531,12 @@ void ModuleEmitter::emitFunction(FuncOp func) {
// index, index. However, typically this should not happen.
if (result.getType().isa<ShapedType>())
emitArrayDecl(result);
else {
else
// In Vivado HLS, pointer indicates the value is an output.
emitValue(result, /*rank=*/0, /*isPtr=*/true);
portList.push_back(result);
}
}
} else
emitError(func, "doesn't have a return operation as terminator.");
@ -1516,40 +1547,7 @@ void ModuleEmitter::emitFunction(FuncOp func) {
// Emit function body.
addIndent();
if (auto dataflow = func.getAttrOfType<BoolAttr>("dataflow")) {
if (dataflow.getValue()) {
indent();
os << "#pragma HLS dataflow\n";
// An empty line.
os << "\n";
}
}
// Only top function should emit these pragmas.
if (auto topFlag = func.getAttrOfType<BoolAttr>("top_function")) {
if (topFlag.getValue()) {
indent();
os << "#pragma HLS interface s_axilite port=return bundle=ctrl\n";
// Interface pragma for array ports will be seperately handled since
// they are much more complicated.
for (auto &port : portList) {
indent();
os << "#pragma HLS interface s_axilite";
os << " port=";
// TODO: This is a temporary solution.
auto name = getName(port);
if (name.front() == "*"[0])
name.erase(name.begin());
os << name;
os << " bundle=ctrl\n";
}
os << "\n";
}
}
emitFunctionPragmas(func, portList);
emitBlock(func.front());
reduceIndent();
os << "}\n";

View File

@ -111,7 +111,6 @@ static void applyArrayPartition(MemAccessesMap &map, OpBuilder &builder) {
// Set new type.
memref.setType(newType);
// TODO: set function type.
}
}
@ -135,10 +134,16 @@ void ArrayPartition::runOnOperation() {
});
// Apply array partition pragma.
// TODO: how to decide which to pick?
applyArrayPartition<mlir::AffineLoadOp>(loadMap, builder);
applyArrayPartition<mlir::AffineStoreOp>(storeMap, builder);
}
}
// Align function type with entry block argument types.
auto resultTypes = func.front().getTerminator()->getOperandTypes();
auto inputTypes = func.front().getArgumentTypes();
func.setType(builder.getFunctionType(inputTypes, resultTypes));
}
std::unique_ptr<mlir::Pass> scalehls::createArrayPartitionPass() {

View File

@ -1,19 +0,0 @@
// RUN: scalehls-opt -legalize-to-hlscpp="top-function=test_array_assign" %s | FileCheck %s
// CHECK-LABEL: func @test_array_assign(
// CHECK-SAME: %arg0: f32, %arg1: memref<16xf32>) -> (f32, memref<16xf32>, i32, memref<2x2xi32>) attributes {dataflow = false, top_function = true} {
func @test_array_assign(%arg0: f32, %arg1: memref<16xf32>) -> (f32, memref<16xf32>, i32, memref<2x2xi32>) {
// CHECK: %0 = "hlscpp.array"(%arg1) {interface = true, partition = false, storage = true, storage_type = "ram_1p_bram"} : (memref<16xf32>) -> memref<16xf32>
%c11_i32 = constant 11 : i32
%cst = constant dense<[[11, 0], [0, -42]]> : tensor<2x2xi32>
// CHECK: %2 = "hlscpp.array"(%1) {interface = true, partition = false, storage = true, storage_type = "ram_1p_bram"} : (memref<2x2xi32>) -> memref<2x2xi32>
%cst_memref = tensor_to_memref %cst : memref<2x2xi32>
// CHECK: %3 = "hlscpp.assign"(%arg0) : (f32) -> f32
// CHECK: %4 = "hlscpp.assign"(%0) : (memref<16xf32>) -> memref<16xf32>
// CHECK: %5 = "hlscpp.assign"(%c11_i32) : (i32) -> i32
// CHECK: return %3, %4, %5, %2 : f32, memref<16xf32>, i32, memref<2x2xi32>
return %arg0, %arg1, %c11_i32, %cst_memref : f32, memref<16xf32>, i32, memref<2x2xi32>
}

View File

@ -0,0 +1,15 @@
// RUN: scalehls-opt -legalize-to-hlscpp="top-function=test_assign" %s | FileCheck %s
// CHECK-LABEL: func @test_assign(
// CHECK-SAME: %arg0: f32, %arg1: memref<16xf32, 1>) -> (f32, memref<16xf32, 1>, i32, memref<2x2xi32, 1>) attributes {dataflow = false, top_function = true} {
func @test_assign(%arg0: f32, %arg1: memref<16xf32, 1>) -> (f32, memref<16xf32, 1>, i32, memref<2x2xi32, 1>) {
%c11_i32 = constant 11 : i32
%cst = constant dense<[[11, 0], [0, -42]]> : tensor<2x2xi32>
%cst_memref = tensor_to_memref %cst : memref<2x2xi32, 1>
// CHECK: %1 = "hlscpp.assign"(%arg0) : (f32) -> f32
// CHECK: %2 = "hlscpp.assign"(%arg1) : (memref<16xf32, 1>) -> memref<16xf32, 1>
// CHECK: %3 = "hlscpp.assign"(%c11_i32) : (i32) -> i32
// CHECK: return %1, %2, %3, %0 : f32, memref<16xf32, 1>, i32, memref<2x2xi32, 1>
return %arg0, %arg1, %c11_i32, %cst_memref : f32, memref<16xf32, 1>, i32, memref<2x2xi32, 1>
}

View File

@ -15,6 +15,7 @@ set(LIBS
MLIRSupport
MLIRScaleHLSEmitHLSCpp
MLIRScaleHLSAnalysis
)
add_llvm_executable(scalehls-translate scalehls-translate.cpp)