From 3bea951beb2cbc244bbbee9b1fb82eb069c3dd61 Mon Sep 17 00:00:00 2001 From: Hanchen Ye Date: Tue, 22 Feb 2022 00:46:36 -0600 Subject: [PATCH] [ArrayPartition][LegalizeHLSCpp] Handle the memref type of global op --- lib/Transforms/Directive/ArrayPartition.cpp | 9 +++++++++ lib/Transforms/LegalizeToHLSCpp.cpp | 9 +++++++++ lib/Transforms/Passes.cpp | 2 +- samples/pytorch/torch-mlir/README.md | 2 +- 4 files changed, 20 insertions(+), 2 deletions(-) diff --git a/lib/Transforms/Directive/ArrayPartition.cpp b/lib/Transforms/Directive/ArrayPartition.cpp index da34841..23d43c8 100644 --- a/lib/Transforms/Directive/ArrayPartition.cpp +++ b/lib/Transforms/Directive/ArrayPartition.cpp @@ -6,6 +6,7 @@ #include "mlir/Analysis/AffineAnalysis.h" #include "mlir/Dialect/Affine/IR/AffineValueMap.h" +#include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/Dialect/Vector/VectorOps.h" #include "scalehls/Transforms/Passes.h" #include "scalehls/Transforms/Utils.h" @@ -91,6 +92,14 @@ bool scalehls::applyArrayPartition(Value array, ArrayRef factors, // Set new type. array.setType(newType); + // FIXME: This is a very very bad practice... + // TODO: How to represent different memory resource? + if (auto getGlobal = array.getDefiningOp()) { + auto module = getGlobal->getParentOfType(); + auto global = module.lookupSymbol(getGlobal.nameAttr()); + global->setAttr(global.typeAttrName(), TypeAttr::get(newType)); + } + if (updateFuncSignature) if (auto func = dyn_cast(array.getParentBlock()->getParentOp())) { // Align function type with entry block argument types only if the array diff --git a/lib/Transforms/LegalizeToHLSCpp.cpp b/lib/Transforms/LegalizeToHLSCpp.cpp index c89f62a..abbaa2f 100644 --- a/lib/Transforms/LegalizeToHLSCpp.cpp +++ b/lib/Transforms/LegalizeToHLSCpp.cpp @@ -97,6 +97,15 @@ bool scalehls::applyLegalizeToHLSCpp(FuncOp func, bool isTopFunc) { MemRefType::get(type.getShape(), type.getElementType(), type.getLayout().getAffineMap(), (unsigned)kind); memref.setType(newType); + + // FIXME: This is a very very bad practice... + // TODO: How to represent different memory resource? + if (auto getGlobal = memref.getDefiningOp()) { + auto module = getGlobal->getParentOfType(); + auto global = + module.lookupSymbol(getGlobal.nameAttr()); + global->setAttr(global.typeAttrName(), TypeAttr::get(newType)); + } } } diff --git a/lib/Transforms/Passes.cpp b/lib/Transforms/Passes.cpp index d0a637e..fd95184 100644 --- a/lib/Transforms/Passes.cpp +++ b/lib/Transforms/Passes.cpp @@ -52,7 +52,7 @@ void scalehls::registerScaleHLSPassPipeline() { pm.addPass(mlir::createSimplifyAffineStructuresPass()); pm.addPass(mlir::createCanonicalizerPass()); } else - llvm_unreachable("please use support front-end: torch or onnx."); + llvm_unreachable("please use supported front-end: torch or onnx."); // Graph-level optimizations. if (dataflowGran) { diff --git a/samples/pytorch/torch-mlir/README.md b/samples/pytorch/torch-mlir/README.md index 8a2de4a..2563b65 100644 --- a/samples/pytorch/torch-mlir/README.md +++ b/samples/pytorch/torch-mlir/README.md @@ -7,11 +7,11 @@ $ # Parse PyTorch model to Linalg dialect (with mlir_venv activated). $ python3 export_resnet18_mlir.py | torch-mlir-opt \ -torchscript-module-to-torch-backend-pipeline="optimize=true" \ -torch-backend-to-linalg-on-tensors-backend-pipeline="optimize=true" \ - -linalg-comprehensive-module-bufferize="allow-return-memref allow-unknown-ops create-deallocs=false" \ -canonicalize > resnet18.mlir $ # Optimize the model and emit C++ code (not working, will be fixed soon). $ scalehls-opt resnet18.mlir \ + -linalg-comprehensive-module-bufferize="allow-return-memref allow-unknown-ops create-deallocs=false" \ -scalehls-pipeline="top-func=main_graph opt-level=2 frontend=torch" \ | scalehls-translate -emit-hlscpp > resnet18.cpp ```