[Readme] add onnx-mlir integration test instructions; add mnist.onnx, resnet18 test case; [EmitHLSCpp] support cast ops emission

This commit is contained in:
Hanchen Ye 2020-12-24 00:15:47 -06:00
parent a3b4ed185b
commit f02955c284
6 changed files with 3087 additions and 8 deletions

View File

@ -4,7 +4,7 @@ This project aims to create a framework that ultimately converts an algorithm wr
## Quick Start
### 1. Install LLVM and MLIR
**IMPORTANT** This step assumes that you have cloned LLVM from (https://github.com/circt/llvm) to `$LLVM_DIR`. To build LLVM and MLIR, run
**IMPORTANT** This step assumes that you have cloned LLVM from (https://github.com/circt/llvm) to `$LLVM_DIR`. To build LLVM and MLIR, run:
```sh
$ mkdir $LLVM_DIR/build
$ cd $LLVM_DIR/build
@ -18,7 +18,7 @@ $ ninja check-mlir
```
### 2. Install ScaleHLS
This step assumes this repository is cloned to `$SCALEHLS_DIR`. To build and launch the tests, run
This step assumes this repository is cloned to `$SCALEHLS_DIR`. To build and launch the tests, run:
```sh
$ mkdir $SCALEHLS_DIR/build
$ cd $SCALEHLS_DIR/build
@ -31,7 +31,7 @@ $ ninja check-scalehls
```
### 3. Try ScaleHLS
After the installation and test successfully completed, you should be able to play with
After the installation and test successfully completed, you should be able to play with:
```sh
$ export PATH=$SCALEHLS_DIR/build/bin:$PATH
$ cd $SCALEHLS_DIR
@ -61,7 +61,28 @@ $ benchmark-gen -type "cnn" -config "config/cnn-config.ini" -number 1 \
| scalehls-translate -emit-hlscpp
```
## Ablation study
## Integration with ONNX-MLIR
If you have installed ONNX-MLIR to `$ONNXMLIR_DIR` following the instruction from (https://github.com/onnx/onnx-mlir), you should be able to run the following integration test:
```sh
$ cd $SCALEHLS_DIR/test/onnx-mlir
$ # Parse ONNX model to MLIR.
$ $ONNXMLIR_DIR/build/bin/onnx-mlir -EmitONNXIR mnist.onnx
$ # Lower from ONNX dialect to Affine dialect.
$ $ONNXMLIR_DIR/build/bin/onnx-mlir-opt mnist.onnx.mlir -shape-inference \
-convert-onnx-to-krnl -pack-krnl-constants \
-convert-krnl-to-affine > mnist.mlir
$ # Legalize the output of ONNX-MLIR, optimize and emit C++ code.
$ scalehls-opt mnist.mlir -legalize-onnx \
-affine-loop-perfection -affine-loop-normalize \
-convert-to-hlscpp="top-function=main_graph" \
-store-op-forward -simplify-memref-access -cse -canonicalize \
| scalehls-translate -emit-hlscpp
```
## Ablation Study (Deprecated)
If Vivado HLS (2019.1 tested) is installed on your machine, running the following script will report the HLS results for some benchmarks (around 8 hours on AMD Ryzen7 3800X for all 33 tests).
For the `ablation_test_run.sh` script, `-n` determines the number of tests to be processed, the maximum supported value of which is 33; `-c` determines from which test to begin to rerun the C++ synthesis. The generated C++ source code will be written to `sample/cpp_src`; the Vivado HLS project will be established in `sample/hls_proj`; the collected report will be written to `sample/test_results`; the test summary will be generated to `sample`.

View File

@ -52,7 +52,8 @@ public:
AddCFOp, SubCFOp, ImOp, ReOp, CreateComplexOp,
// Special operations.
SelectOp, ConstantOp, CopySignOp, TruncateIOp, ZeroExtendIOp,
SignExtendIOp, IndexCastOp, CallOp, ReturnOp,
SignExtendIOp, IndexCastOp, CallOp, ReturnOp, UIToFPOp, SIToFPOp,
FPToSIOp, FPToUIOp,
// Structure operations.
AssignOp, ArrayOp, EndOp>([&](auto opNode) -> ResultType {
return thisCast->visitOp(opNode, args...);
@ -184,6 +185,10 @@ public:
HANDLE(IndexCastOp);
HANDLE(CallOp);
HANDLE(ReturnOp);
HANDLE(UIToFPOp);
HANDLE(SIToFPOp);
HANDLE(FPToUIOp);
HANDLE(FPToSIOp);
// Structure operations.
HANDLE(AssignOp);

View File

@ -207,7 +207,7 @@ public:
/// Special operation emitters.
void emitSelect(SelectOp *op);
void emitConstant(ConstantOp *op);
void emitIndexCast(IndexCastOp *op);
template <typename CastOpType> void emitCast(CastOpType *op);
void emitCall(CallOp *op);
/// Structure operations emitters.
@ -423,7 +423,13 @@ public:
/// Special operations.
bool visitOp(SelectOp op) { return emitter.emitSelect(&op), true; }
bool visitOp(ConstantOp op) { return emitter.emitConstant(&op), true; }
bool visitOp(IndexCastOp op) { return emitter.emitIndexCast(&op), true; }
bool visitOp(IndexCastOp op) {
return emitter.emitCast<IndexCastOp>(&op), true;
}
bool visitOp(UIToFPOp op) { return emitter.emitCast<UIToFPOp>(&op), true; }
bool visitOp(SIToFPOp op) { return emitter.emitCast<SIToFPOp>(&op), true; }
bool visitOp(FPToUIOp op) { return emitter.emitCast<FPToUIOp>(&op), true; }
bool visitOp(FPToSIOp op) { return emitter.emitCast<FPToSIOp>(&op), true; }
bool visitOp(CallOp op) { return emitter.emitCall(&op), true; }
bool visitOp(ReturnOp op) { return true; }
@ -1194,7 +1200,7 @@ void ModuleEmitter::emitConstant(ConstantOp *op) {
emitError(*op, "has unsupported constant type.");
}
void ModuleEmitter::emitIndexCast(IndexCastOp *op) {
template <typename CastOpType> void ModuleEmitter::emitCast(CastOpType *op) {
indent();
emitValue(op->getResult());
os << " = ";

BIN
test/onnx-mlir/mnist.onnx Normal file

Binary file not shown.

1542
test/onnx-mlir/resnet18.cpp Normal file

File diff suppressed because it is too large Load Diff

1505
test/onnx-mlir/resnet18.mlir Normal file

File diff suppressed because it is too large Load Diff