[mlir][Standard] NFC - Drop remaining EDSC usage

Drop the remaining EDSC subdirectories and update all uses.

Differential Revision: https://reviews.llvm.org/D102911
This commit is contained in:
Nicolas Vasilache 2021-05-21 10:27:56 +00:00
parent eb6429d0fb
commit 8eb18a0f3e
38 changed files with 20 additions and 760 deletions

View File

@ -11,7 +11,6 @@ set(LIBS
MLIRAffineToStandard
MLIRAnalysis
MLIRSCFToStandard
MLIREDSC
MLIRParser
MLIRStandardToLLVM
MLIRSupport

View File

@ -1,110 +0,0 @@
# Background: declarative builders API
The main purpose of the declarative builders API is to provide an intuitive way
of constructing MLIR programmatically. In the majority of cases, the IR we wish
to construct exhibits structured control-flow. The Declarative builders in the
`EDSC` library (Embedded Domain Specific Constructs) provide an API to make MLIR
construction and manipulation very idiomatic, for the structured control-flow
case, in C++.
## ScopedContext
`mlir::edsc::ScopedContext` provides an implicit thread-local context,
supporting a simple declarative API with globally accessible builders. These
declarative builders are available within the lifetime of a `ScopedContext`.
## Intrinsics
`mlir::ValueBuilder` is a generic wrapper for the `mlir::OpBuilder::create`
method that operates on `Value` objects and return a single Value. For
instructions that return no values or that return multiple values, the
`mlir::edsc::OperationBuilder` can be used. Named intrinsics are provided as
syntactic sugar to further reduce boilerplate.
```c++
using load = ValueBuilder<LoadOp>;
using store = OperationBuilder<StoreOp>;
```
## LoopBuilder and AffineLoopNestBuilder
`mlir::edsc::AffineLoopNestBuilder` provides an interface to allow writing
concise and structured loop nests.
```c++
ScopedContext scope(f.get());
Value i, j, lb(f->getArgument(0)), ub(f->getArgument(1));
Value f7(std_constant_float(llvm::APFloat(7.0f), f32Type)),
f13(std_constant_float(llvm::APFloat(13.0f), f32Type)),
i7(constant_int(7, 32)),
i13(constant_int(13, 32));
AffineLoopNestBuilder(&i, lb, ub, 3)([&]{
lb * index_type(3) + ub;
lb + index_type(3);
AffineLoopNestBuilder(&j, lb, ub, 2)([&]{
ceilDiv(index_type(31) * floorDiv(i + j * index_type(3), index_type(32)),
index_type(32));
((f7 + f13) / f7) % f13 - f7 * f13;
((i7 + i13) / i7) % i13 - i7 * i13;
});
});
```
## IndexedValue
`mlir::edsc::IndexedValue` provides an index notation around load and store
operations on abstract data types by overloading the C++ assignment and
parenthesis operators. The relevant loads and stores are emitted as appropriate.
## Putting it all together
With declarative builders, it becomes fairly concise to build rank and
type-agnostic custom operations even though MLIR does not yet have generic
types. Here is what a definition of a general pointwise add looks in
Tablegen with declarative builders.
```c++
def AddOp : Op<"x.add">,
Arguments<(ins Tensor:$A, Tensor:$B)>,
Results<(outs Tensor: $C)> {
code referenceImplementation = [{
SmallVector<Value, 4> ivs(view_A.rank());
IndexedValue A(arg_A), B(arg_B), C(arg_C);
AffineLoopNestBuilder(
ivs, view_A.getLbs(), view_A.getUbs(), view_A.getSteps())([&]{
C(ivs) = A(ivs) + B(ivs)
});
}];
}
```
Depending on the function signature on which this emitter is called, the
generated IR resembles the following, for a 4-D memref of `vector<4xi8>`:
```
// CHECK-LABEL: func @t1(%lhs: memref<3x4x5x6xvector<4xi8>>, %rhs: memref<3x4x5x6xvector<4xi8>>, %result: memref<3x4x5x6xvector<4xi8>>) -> () {
// CHECK: affine.for {{.*}} = 0 to 3 {
// CHECK: affine.for {{.*}} = 0 to 4 {
// CHECK: affine.for {{.*}} = 0 to 5 {
// CHECK: affine.for {{.*}}= 0 to 6 {
// CHECK: {{.*}} = load %arg1[{{.*}}] : memref<3x4x5x6xvector<4xi8>>
// CHECK: {{.*}} = load %arg0[{{.*}}] : memref<3x4x5x6xvector<4xi8>>
// CHECK: {{.*}} = addi {{.*}} : vector<4xi8>
// CHECK: store {{.*}}, %arg2[{{.*}}] : memref<3x4x5x6xvector<4xi8>>
```
or the following, for a 0-D `memref<f32>`:
```
// CHECK-LABEL: func @t3(%lhs: memref<f32>, %rhs: memref<f32>, %result: memref<f32>) -> () {
// CHECK: {{.*}} = load %arg1[] : memref<f32>
// CHECK: {{.*}} = load %arg0[] : memref<f32>
// CHECK: {{.*}} = addf {{.*}}, {{.*}} : f32
// CHECK: store {{.*}}, %arg2[] : memref<f32>
```
Similar APIs are provided to emit the lower-level `scf.for` op with
`LoopNestBuilder`. See the `builder-api-test.cpp` test for more usage examples.
Since the implementation of declarative builders is in C++, it is also available
to program the IR with an embedded-DSL flavor directly integrated in MLIR.

View File

@ -1809,7 +1809,7 @@ requirements that were desirable:
will consider it.
* MLIR allows both defined and undefined ops.
* Defined ops should have fixed semantics and could have a corresponding
reference implementation defined using, for example, EDSC.
reference implementation defined.
* Dialects are under full control of the dialect owner and normally live
with the framework of the dialect.
* The op's traits (e.g., commutative) are modelled along with the op in the

View File

@ -12,7 +12,6 @@
#include "mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h"
#include "mlir/Dialect/Linalg/IR/LinalgOps.h"
#include "mlir/Dialect/SCF/SCF.h"
#include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SetVector.h"

View File

@ -1,25 +0,0 @@
//===- Intrinsics.h - MLIR EDSC Intrinsics for Math ops ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef MLIR_DIALECT_MATH_EDSC_INTRINSICS_H_
#define MLIR_DIALECT_MATH_EDSC_INTRINSICS_H_
#include "mlir/Dialect/Math/IR/Math.h"
#include "mlir/EDSC/Builders.h"
namespace mlir {
namespace edsc {
namespace intrinsics {
using math_rsqrt = ValueBuilder<math::RsqrtOp>;
using math_tanh = ValueBuilder<math::TanhOp>;
} // namespace intrinsics
} // namespace edsc
} // namespace mlir
#endif // MLIR_DIALECT_MATH_EDSC_INTRINSICS_H_

View File

@ -1,63 +0,0 @@
//===- Builders.h - MLIR EDSC Builders for StandardOps ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef MLIR_DIALECT_STANDARDOPS_EDSC_BUILDERS_H_
#define MLIR_DIALECT_STANDARDOPS_EDSC_BUILDERS_H_
#include "mlir/Dialect/StandardOps/IR/Ops.h"
#include "mlir/EDSC/Builders.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/Types.h"
namespace mlir {
namespace edsc {
/// Base class for MemRefBoundsCapture and VectorBoundsCapture.
class BoundsCapture {
public:
unsigned rank() const { return lbs.size(); }
Value lb(unsigned idx) const { return lbs[idx]; }
Value ub(unsigned idx) const { return ubs[idx]; }
int64_t step(unsigned idx) const { return steps[idx]; }
std::tuple<Value, Value, int64_t> range(unsigned idx) const {
return std::make_tuple(lbs[idx], ubs[idx], steps[idx]);
}
void swapRanges(unsigned i, unsigned j) {
if (i == j)
return;
std::swap(lbs[i], lbs[j]);
std::swap(ubs[i], ubs[j]);
std::swap(steps[i], steps[j]);
}
ArrayRef<Value> getLbs() const { return lbs; }
ArrayRef<Value> getUbs() const { return ubs; }
ArrayRef<int64_t> getSteps() const { return steps; }
protected:
SmallVector<Value, 8> lbs;
SmallVector<Value, 8> ubs;
SmallVector<int64_t, 8> steps;
};
/// A VectorBoundsCapture represents the information required to step through a
/// Vector accessing each scalar element at a time. It is the counterpart of
/// a MemRefBoundsCapture but for vectors. This exists purely for boilerplate
/// avoidance.
class VectorBoundsCapture : public BoundsCapture {
public:
explicit VectorBoundsCapture(Value v);
explicit VectorBoundsCapture(VectorType t);
private:
Value base;
};
} // namespace edsc
} // namespace mlir
#endif // MLIR_DIALECT_STANDARDOPS_EDSC_BUILDERS_H_

View File

@ -1,82 +0,0 @@
//===- Intrinsics.h - MLIR EDSC Intrinsics for StandardOps ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef MLIR_DIALECT_STANDARDOPS_EDSC_INTRINSICS_H_
#define MLIR_DIALECT_STANDARDOPS_EDSC_INTRINSICS_H_
#include "mlir/Dialect/StandardOps/EDSC/Builders.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
namespace mlir {
namespace edsc {
namespace intrinsics {
using std_addi = ValueBuilder<AddIOp>;
using std_addf = ValueBuilder<AddFOp>;
using std_call = OperationBuilder<CallOp>;
using std_constant = ValueBuilder<ConstantOp>;
using std_constant_float = ValueBuilder<ConstantFloatOp>;
using std_constant_index = ValueBuilder<ConstantIndexOp>;
using std_constant_int = ValueBuilder<ConstantIntOp>;
using std_divis = ValueBuilder<SignedDivIOp>;
using std_diviu = ValueBuilder<UnsignedDivIOp>;
using std_fpext = ValueBuilder<FPExtOp>;
using std_fptrunc = ValueBuilder<FPTruncOp>;
using std_index_cast = ValueBuilder<IndexCastOp>;
using std_muli = ValueBuilder<MulIOp>;
using std_mulf = ValueBuilder<MulFOp>;
using std_ret = OperationBuilder<ReturnOp>;
using std_select = ValueBuilder<SelectOp>;
using std_sign_extendi = ValueBuilder<SignExtendIOp>;
using std_splat = ValueBuilder<SplatOp>;
using std_subf = ValueBuilder<SubFOp>;
using std_subi = ValueBuilder<SubIOp>;
using std_zero_extendi = ValueBuilder<ZeroExtendIOp>;
using tensor_extract = ValueBuilder<tensor::ExtractOp>;
template <int N>
struct SExtiValueBuilder : public ValueBuilder<SignExtendIOp> {
using ValueBuilder<SignExtendIOp>::ValueBuilder;
template <typename... Args>
SExtiValueBuilder(Args... args)
: ValueBuilder<SignExtendIOp>(ScopedContext::getBuilderRef().getI32Type(),
args...) {}
};
using std_sexti32 = SExtiValueBuilder<32>;
template <CmpFPredicate Predicate>
struct CmpFValueBuilder : public ValueBuilder<CmpFOp> {
using ValueBuilder<CmpFOp>::ValueBuilder;
template <typename... Args>
CmpFValueBuilder(Args... args) : ValueBuilder<CmpFOp>(Predicate, args...) {}
};
using std_cmpf_ogt = CmpFValueBuilder<CmpFPredicate::OGT>;
using std_cmpf_olt = CmpFValueBuilder<CmpFPredicate::OLT>;
template <CmpIPredicate Predicate>
struct CmpIValueBuilder : public ValueBuilder<CmpIOp> {
using ValueBuilder<CmpIOp>::ValueBuilder;
template <typename... Args>
CmpIValueBuilder(Args... args) : ValueBuilder<CmpIOp>(Predicate, args...) {}
};
using std_cmpi_sgt = CmpIValueBuilder<CmpIPredicate::sgt>;
/// Branches into `block` with `operands`.
BranchOp std_br(Block *block, ValueRange operands);
/// Branches into `trueBranch` with `trueOperands` if `cond` evaluates to `true`
/// or to `falseBranch` and `falseOperand` if `cond` evaluates to `false`.
CondBranchOp std_cond_br(Value cond, Block *trueBranch, ValueRange trueOperands,
Block *falseBranch, ValueRange falseOperands);
} // namespace intrinsics
} // namespace edsc
} // namespace mlir
#endif // MLIR_DIALECT_STANDARDOPS_EDSC_INTRINSICS_H_

View File

@ -1,175 +0,0 @@
//===- Builders.h - MLIR Declarative Builder Classes ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Provides intuitive composable interfaces for building structured MLIR
// snippets in a declarative fashion.
//
//===----------------------------------------------------------------------===//
#ifndef MLIR_EDSC_BUILDERS_H_
#define MLIR_EDSC_BUILDERS_H_
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Types.h"
namespace mlir {
class OperationFolder;
namespace edsc {
/// Helper class to transparently handle builder insertion points by RAII.
/// As its name indicates, a ScopedContext is means to be used locally in a
/// scoped fashion. This abstracts away all the boilerplate related to
/// checking proper usage of captures, NestedBuilders as well as handling the
/// setting and restoring of insertion points.
class ScopedContext {
public:
ScopedContext(OpBuilder &b);
ScopedContext(OpBuilder &b, Location location);
/// Sets the insertion point of the builder to 'newInsertPt' for the duration
/// of the scope. The existing insertion point of the builder is restored on
/// destruction.
ScopedContext(OpBuilder &b, OpBuilder::InsertPoint newInsertPt,
Location location);
~ScopedContext();
static MLIRContext *getContext();
static OpBuilder &getBuilderRef();
static Location getLocation();
private:
/// Only NestedBuilder (which is used to create an operation with a body)
/// may access private members in order to implement scoping.
friend class NestedBuilder;
ScopedContext() = delete;
ScopedContext(const ScopedContext &) = delete;
ScopedContext &operator=(const ScopedContext &) = delete;
static ScopedContext *&getCurrentScopedContext();
/// Top level OpBuilder.
OpBuilder &builder;
/// Guard to the previous insertion point.
OpBuilder::InsertionGuard guard;
/// Current location.
Location location;
/// Parent context we return into.
ScopedContext *enclosingScopedContext;
};
template <typename Op>
struct ValueBuilder {
template <typename... Args>
ValueBuilder(Args... args) {
value = ScopedContext::getBuilderRef()
.create<Op>(ScopedContext::getLocation(), args...)
.getResult();
}
operator Value() { return value; }
Value value;
};
template <typename Op>
struct OperationBuilder {
template <typename... Args>
OperationBuilder(Args... args) {
op = ScopedContext::getBuilderRef().create<Op>(ScopedContext::getLocation(),
args...);
}
operator Op() { return op; }
operator Operation *() { return op.getOperation(); }
Op op;
};
/// Creates a block in the region that contains the insertion block of the
/// OpBuilder currently at the top of ScopedContext stack (appends the block to
/// the region). Be aware that this will NOT update the insertion point of the
/// builder to insert into the newly constructed block.
Block *createBlock(TypeRange argTypes = llvm::None);
/// Creates a block in the specified region using OpBuilder at the top of
/// ScopedContext stack (appends the block to the region). Be aware that this
/// will NOT update the insertion point of the builder to insert into the newly
/// constructed block.
Block *createBlockInRegion(Region &region, TypeRange argTypes = llvm::None);
/// Calls "builderFn" with ScopedContext reconfigured to insert into "block" and
/// passes in the block arguments. If the block has a terminator, the operations
/// are inserted before the terminator, otherwise appended to the block.
void appendToBlock(Block *block, function_ref<void(ValueRange)> builderFn);
/// Creates a block in the region that contains the insertion block of the
/// OpBuilder currently at the top of ScopedContext stack, and calls "builderFn"
/// to populate the body of the block while passing it the block arguments.
Block *buildInNewBlock(TypeRange argTypes,
function_ref<void(ValueRange)> builderFn);
/// Creates a block in the specified region using OpBuilder at the top of
/// ScopedContext stack, and calls "builderFn" to populate the body of the block
/// while passing it the block arguments.
Block *buildInNewBlock(Region &region, TypeRange argTypes,
function_ref<void(ValueRange)> builderFn);
/// A StructuredIndexed represents an indexable quantity that is either:
/// 1. a captured value, which is suitable for buffer and tensor operands, or;
/// 2. a captured type, which is suitable for tensor return values.
///
/// A StructuredIndexed itself is indexed and passed to `makeGenericLinalgOp`.
/// It enable an idiomatic syntax for index expressions such as:
///
/// ```
/// StructuredIndexed A(buffer_or_tensor_value), B(buffer_or_tensor_value),
/// C(buffer_value_or_tensor_type);
/// makeGenericLinalgOp({A({m, n}), B({k, n})}, {C({m, n})}, ... );
/// ```
struct StructuredIndexed {
StructuredIndexed(Value v) : value(v) {}
StructuredIndexed(Type t) : type(t) {}
StructuredIndexed operator()(ArrayRef<AffineExpr> indexings) {
return value ? StructuredIndexed(value, indexings)
: StructuredIndexed(type, indexings);
}
StructuredIndexed(Value v, ArrayRef<AffineExpr> indexings)
: value(v), exprs(indexings.begin(), indexings.end()) {
assert((v.getType().isa<MemRefType, RankedTensorType, VectorType>()) &&
"MemRef, RankedTensor or Vector expected");
}
StructuredIndexed(Type t, ArrayRef<AffineExpr> indexings)
: type(t), exprs(indexings.begin(), indexings.end()) {
assert((t.isa<MemRefType, RankedTensorType, VectorType>()) &&
"MemRef, RankedTensor or Vector expected");
}
bool hasValue() const { return (bool)value; }
Value getValue() const {
assert(value && "StructuredIndexed Value not set.");
return value;
}
Type getType() const {
assert((value || type) && "StructuredIndexed Value and Type not set.");
return value ? value.getType() : type;
}
ArrayRef<AffineExpr> getExprs() const { return exprs; }
operator Value() const { return getValue(); }
operator Type() const { return getType(); }
private:
// Only one of Value or type may be set.
Type type;
Value value;
SmallVector<AffineExpr, 4> exprs;
};
} // namespace edsc
} // namespace mlir
#endif // MLIR_EDSC_BUILDERS_H_

View File

@ -5,7 +5,6 @@ add_subdirectory(Analysis)
add_subdirectory(Bindings)
add_subdirectory(Conversion)
add_subdirectory(Dialect)
add_subdirectory(EDSC)
add_subdirectory(ExecutionEngine)
add_subdirectory(IR)
add_subdirectory(Interfaces)

View File

@ -14,7 +14,6 @@ add_mlir_conversion_library(MLIRLinalgToLLVM
LINK_LIBS PUBLIC
MLIRAffineToStandard
MLIREDSC
MLIRIR
MLIRLinalg
MLIRLLVMIR

View File

@ -20,7 +20,6 @@
#include "mlir/Dialect/Linalg/IR/LinalgTypes.h"
#include "mlir/Dialect/Linalg/Passes.h"
#include "mlir/Dialect/SCF/SCF.h"
#include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/Attributes.h"
@ -42,30 +41,9 @@
#include "llvm/Support/ErrorHandling.h"
using namespace mlir;
using namespace mlir::edsc;
using namespace mlir::edsc::intrinsics;
using namespace mlir::LLVM;
using namespace mlir::linalg;
using llvm_add = ValueBuilder<LLVM::AddOp>;
using llvm_bitcast = ValueBuilder<LLVM::BitcastOp>;
using llvm_constant = ValueBuilder<LLVM::ConstantOp>;
using llvm_extractvalue = ValueBuilder<LLVM::ExtractValueOp>;
using llvm_gep = ValueBuilder<LLVM::GEPOp>;
using llvm_insertvalue = ValueBuilder<LLVM::InsertValueOp>;
using llvm_call = OperationBuilder<LLVM::CallOp>;
using llvm_icmp = ValueBuilder<LLVM::ICmpOp>;
using llvm_load = ValueBuilder<LLVM::LoadOp>;
using llvm_store = OperationBuilder<LLVM::StoreOp>;
using llvm_select = ValueBuilder<LLVM::SelectOp>;
using llvm_mul = ValueBuilder<LLVM::MulOp>;
using llvm_ptrtoint = ValueBuilder<LLVM::PtrToIntOp>;
using llvm_sub = ValueBuilder<LLVM::SubOp>;
using llvm_undef = ValueBuilder<LLVM::UndefOp>;
using llvm_urem = ValueBuilder<LLVM::URemOp>;
using llvm_alloca = ValueBuilder<LLVM::AllocaOp>;
using llvm_return = OperationBuilder<LLVM::ReturnOp>;
template <typename T>
static Type getPtrToElementType(T containerType, LLVMTypeConverter &lowering) {
return LLVMPointerType::get(
@ -87,41 +65,6 @@ static Type convertRangeType(RangeType t, LLVMTypeConverter &converter) {
}
namespace {
/// EDSC-compatible wrapper for MemRefDescriptor.
class BaseViewConversionHelper {
public:
BaseViewConversionHelper(Type type)
: d(MemRefDescriptor::undef(rewriter(), loc(), type)) {}
BaseViewConversionHelper(Value v) : d(v) {}
/// Wrappers around MemRefDescriptor that use EDSC builder and location.
Value allocatedPtr() { return d.allocatedPtr(rewriter(), loc()); }
void setAllocatedPtr(Value v) { d.setAllocatedPtr(rewriter(), loc(), v); }
Value alignedPtr() { return d.alignedPtr(rewriter(), loc()); }
void setAlignedPtr(Value v) { d.setAlignedPtr(rewriter(), loc(), v); }
Value offset() { return d.offset(rewriter(), loc()); }
void setOffset(Value v) { d.setOffset(rewriter(), loc(), v); }
Value size(unsigned i) { return d.size(rewriter(), loc(), i); }
void setSize(unsigned i, Value v) { d.setSize(rewriter(), loc(), i, v); }
void setConstantSize(unsigned i, int64_t v) {
d.setConstantSize(rewriter(), loc(), i, v);
}
Value stride(unsigned i) { return d.stride(rewriter(), loc(), i); }
void setStride(unsigned i, Value v) { d.setStride(rewriter(), loc(), i, v); }
void setConstantStride(unsigned i, int64_t v) {
d.setConstantStride(rewriter(), loc(), i, v);
}
operator Value() { return d; }
private:
OpBuilder &rewriter() { return ScopedContext::getBuilderRef(); }
Location loc() { return ScopedContext::getLocation(); }
MemRefDescriptor d;
};
// RangeOp creates a new range descriptor.
class RangeOpConversion : public ConvertOpToLLVMPattern<RangeOp> {
public:
@ -133,14 +76,17 @@ public:
auto rangeDescriptorTy = convertRangeType(
rangeOp.getType().cast<RangeType>(), *getTypeConverter());
edsc::ScopedContext context(rewriter, rangeOp->getLoc());
ImplicitLocOpBuilder b(rangeOp->getLoc(), rewriter);
// Fill in an aggregate value of the descriptor.
RangeOpAdaptor adaptor(operands);
Value desc = llvm_undef(rangeDescriptorTy);
desc = llvm_insertvalue(desc, adaptor.min(), rewriter.getI64ArrayAttr(0));
desc = llvm_insertvalue(desc, adaptor.max(), rewriter.getI64ArrayAttr(1));
desc = llvm_insertvalue(desc, adaptor.step(), rewriter.getI64ArrayAttr(2));
Value desc = b.create<LLVM::UndefOp>(rangeDescriptorTy);
desc = b.create<LLVM::InsertValueOp>(desc, adaptor.min(),
rewriter.getI64ArrayAttr(0));
desc = b.create<LLVM::InsertValueOp>(desc, adaptor.max(),
rewriter.getI64ArrayAttr(1));
desc = b.create<LLVM::InsertValueOp>(desc, adaptor.step(),
rewriter.getI64ArrayAttr(2));
rewriter.replaceOp(rangeOp, desc);
return success();
}
@ -169,17 +115,18 @@ public:
}))
return failure();
edsc::ScopedContext context(rewriter, reshapeOp->getLoc());
ReshapeOpAdaptor adaptor(operands);
BaseViewConversionHelper baseDesc(adaptor.src());
BaseViewConversionHelper desc(typeConverter->convertType(dstType));
desc.setAllocatedPtr(baseDesc.allocatedPtr());
desc.setAlignedPtr(baseDesc.alignedPtr());
desc.setOffset(baseDesc.offset());
MemRefDescriptor baseDesc(adaptor.src());
Location loc = reshapeOp->getLoc();
auto desc = MemRefDescriptor::undef(rewriter, reshapeOp->getLoc(),
typeConverter->convertType(dstType));
desc.setAllocatedPtr(rewriter, loc, baseDesc.allocatedPtr(rewriter, loc));
desc.setAlignedPtr(rewriter, loc, baseDesc.alignedPtr(rewriter, loc));
desc.setOffset(rewriter, loc, baseDesc.offset(rewriter, loc));
for (auto en : llvm::enumerate(dstType.getShape()))
desc.setConstantSize(en.index(), en.value());
desc.setConstantSize(rewriter, loc, en.index(), en.value());
for (auto en : llvm::enumerate(strides))
desc.setConstantStride(en.index(), en.value());
desc.setConstantStride(rewriter, loc, en.index(), en.value());
rewriter.replaceOp(reshapeOp, {desc});
return success();
}

View File

@ -11,7 +11,6 @@ add_mlir_conversion_library(MLIRLinalgToStandard
Core
LINK_LIBS PUBLIC
MLIREDSC
MLIRIR
MLIRLinalg
MLIRMemRef

View File

@ -17,7 +17,6 @@ add_mlir_conversion_library(MLIRShapeToStandard
Core
LINK_LIBS PUBLIC
MLIREDSC
MLIRIR
MLIRMemRef
MLIRShape

View File

@ -8,7 +8,6 @@ add_mlir_conversion_library(MLIRVectorToSCF
Core
LINK_LIBS PUBLIC
MLIREDSC
MLIRLLVMIR
MLIRMemRef
MLIRTransforms

View File

@ -11,7 +11,6 @@ add_mlir_dialect_library(MLIRAffine
MLIRAffineOpsIncGen
LINK_LIBS PUBLIC
MLIREDSC
MLIRIR
MLIRLoopLikeInterface
MLIRMemRef

View File

@ -20,7 +20,6 @@ add_mlir_dialect_library(MLIRAffineTransforms
LINK_LIBS PUBLIC
MLIRAffine
MLIRAffineUtils
MLIREDSC
MLIRIR
MLIRMemRef
MLIRPass

View File

@ -47,7 +47,6 @@ add_mlir_dialect_library(MLIRGPU
MLIRAsync
MLIRDataLayoutInterfaces
MLIRDLTI
MLIREDSC
MLIRIR
MLIRMemRef
MLIRLLVMIR

View File

@ -13,15 +13,14 @@
#include "mlir/Dialect/GPU/MemoryPromotion.h"
#include "mlir/Dialect/GPU/GPUDialect.h"
#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/Dialect/SCF/SCF.h"
#include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h"
#include "mlir/IR/ImplicitLocOpBuilder.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/LoopUtils.h"
using namespace mlir;
using namespace mlir::edsc;
using namespace mlir::edsc::intrinsics;
using namespace mlir::gpu;
/// Returns the textual name of a GPU dimension.

View File

@ -18,7 +18,6 @@
#include "mlir/Dialect/Linalg/Passes.h"
#include "mlir/Dialect/Linalg/Transforms/Transforms.h"
#include "mlir/Dialect/Linalg/Utils/Utils.h"
#include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/Transforms/FoldUtils.h"
@ -29,8 +28,6 @@
#define DEBUG_TYPE "linalg-drop-unit-dims"
using namespace mlir;
using namespace mlir::edsc;
using namespace mlir::edsc::intrinsics;
using namespace mlir::linalg;
/// Implements a pass that canonicalizes the uses of unit-extent dimensions for

View File

@ -19,7 +19,6 @@
#include "mlir/Dialect/Linalg/Transforms/Transforms.h"
#include "mlir/Dialect/Linalg/Utils/Utils.h"
#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
@ -37,8 +36,6 @@
#define DEBUG_TYPE "linalg-fusion"
using namespace mlir;
using namespace mlir::edsc;
using namespace mlir::edsc::intrinsics;
using namespace mlir::linalg;
using llvm::dbgs;
@ -408,7 +405,6 @@ mlir::linalg::fuseProducerOfBuffer(OpBuilder &b, OpOperand &consumerOpOperand,
// Fuse `producer` just before `consumer`.
OpBuilder::InsertionGuard g(b);
b.setInsertionPoint(consumerOpOperand.getOwner());
ScopedContext scope(b, consumerOpOperand.getOwner()->getLoc());
LLVM_DEBUG(llvm::dbgs() << "Fuse into consumer: "
<< *consumerOpOperand.getOwner() << "\n");
@ -491,7 +487,6 @@ mlir::linalg::fuseProducerOfTensor(OpBuilder &b, OpResult producerOpResult,
// Insert fused `producer` just before `consumer`.
OpBuilder::InsertionGuard g(b);
b.setInsertionPoint(consumerOp);
ScopedContext scope(b, consumerOp->getLoc());
LLVM_DEBUG(llvm::dbgs() << "Fuse into consumer: " << *consumerOp << "\n");
LinalgOp fusedProducer =
fuse(b, producerOp,
@ -886,7 +881,6 @@ tileAndFuseLinalgOpsImpl(OpBuilder &b, ArrayRef<LinalgOp> ops,
OpBuilder::InsertionGuard guard(b);
b.setInsertionPoint(rootOp);
ScopedContext scope(b, rootOp.getLoc());
// Find all the producers.
LLVM_DEBUG(llvm::dbgs() << "findAllFusableDependences\n");

View File

@ -15,7 +15,6 @@
#include "mlir/Dialect/Linalg/IR/LinalgOps.h"
#include "mlir/Dialect/Linalg/Passes.h"
#include "mlir/Dialect/Linalg/Transforms/Transforms.h"
#include "mlir/EDSC/Builders.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"

View File

@ -14,7 +14,6 @@
#include "mlir/Dialect/Linalg/IR/LinalgOps.h"
#include "mlir/Dialect/Linalg/Transforms/Transforms.h"
#include "mlir/Dialect/Linalg/Utils/Utils.h"
#include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
#include "mlir/Dialect/Utils/StructuredOpsUtils.h"
#include "mlir/Dialect/Vector/VectorOps.h"
#include "mlir/IR/AffineExpr.h"

View File

@ -16,7 +16,6 @@
#include "mlir/Dialect/Linalg/Transforms/Transforms.h"
#include "mlir/Dialect/Linalg/Utils/Utils.h"
#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
@ -26,8 +25,6 @@
#include "llvm/Support/CommandLine.h"
using namespace mlir;
using namespace mlir::edsc;
using namespace mlir::edsc::intrinsics;
using namespace mlir::linalg;
using namespace mlir::scf;
@ -316,7 +313,6 @@ Optional<TiledLinalgOp> static tileLinalgOpImpl(
OpBuilder &b, LinalgOp op, const LinalgTilingOptions &options) {
OpBuilder::InsertionGuard g(b);
b.setInsertionPoint(op);
ScopedContext scope(b, op.getLoc());
if (!options.tileSizeComputationFunction)
return llvm::None;

View File

@ -16,7 +16,6 @@
#include "mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h"
#include "mlir/Dialect/Linalg/IR/LinalgOps.h"
#include "mlir/Dialect/Linalg/Utils/Utils.h"
#include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
#include "mlir/Dialect/Utils/StructuredOpsUtils.h"
#include "mlir/Dialect/Vector/VectorOps.h"
#include "mlir/IR/AffineExpr.h"
@ -32,8 +31,6 @@
#define DEBUG_TYPE "linalg-transforms"
using namespace mlir;
using namespace mlir::edsc;
using namespace mlir::edsc::intrinsics;
using namespace mlir::linalg;
#define DBGS() (llvm::dbgs() << "[" DEBUG_TYPE << "]: ")

View File

@ -15,7 +15,6 @@
#include "mlir/Dialect/Linalg/IR/LinalgOps.h"
#include "mlir/Dialect/Linalg/Transforms/Transforms.h"
#include "mlir/Dialect/Linalg/Utils/Utils.h"
#include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
#include "mlir/Dialect/Utils/StructuredOpsUtils.h"
#include "mlir/Dialect/Vector/VectorOps.h"
#include "mlir/IR/AffineExpr.h"
@ -31,8 +30,6 @@
#include <type_traits>
using namespace mlir;
using namespace mlir::edsc;
using namespace mlir::edsc::intrinsics;
using namespace mlir::linalg;
using llvm::dbgs;
@ -640,9 +637,7 @@ mlir::linalg::vectorizeLinalgOp(OpBuilder &b, Operation *op,
if (failed(vectorizeLinalgOpPrecondition(op)))
return failure();
edsc::ScopedContext scope(b, op->getLoc());
auto linalgOp = cast<LinalgOp>(op);
if (isaContractionOpInterface(linalgOp))
return vectorizeContraction(b, linalgOp, newResults);
@ -726,7 +721,6 @@ LogicalResult ConvOpVectorization<ConvOp, N>::matchAndRewrite(
ConvOp op, PatternRewriter &rewriter) const {
Location loc = op.getLoc();
MLIRContext *context = op.getContext();
edsc::ScopedContext scope(rewriter, loc);
ShapedType inShapeType = op.getInputShapedType(0);
ShapedType kShapeType = op.getInputShapedType(1);

View File

@ -16,7 +16,6 @@
#include "mlir/Dialect/Linalg/IR/LinalgOps.h"
#include "mlir/Dialect/Linalg/IR/LinalgTypes.h"
#include "mlir/Dialect/SCF/SCF.h"
#include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h"
#include "mlir/Dialect/StandardOps/Utils/Utils.h"
#include "mlir/IR/AffineExpr.h"
@ -31,8 +30,6 @@
#define DEBUG_TYPE "linalg-utils"
using namespace mlir;
using namespace mlir::edsc;
using namespace mlir::edsc::intrinsics;
using namespace mlir::linalg;
using namespace mlir::scf;
@ -470,8 +467,6 @@ void GenerateLoopNest<scf::ParallelOp>::doit(
SmallVector<DistributionMethod, 0> distributionMethod;
if (distributionOptions) {
auto &options = distributionOptions.getValue();
OpBuilder &b = edsc::ScopedContext::getBuilderRef();
Location loc = edsc::ScopedContext::getLocation();
distributionMethod.assign(distributionOptions->distributionMethod.begin(),
distributionOptions->distributionMethod.end());
SmallVector<Range, 2> parallelLoopRanges;

View File

@ -1,7 +1,5 @@
add_mlir_dialect_library(MLIRStandard
IR/Ops.cpp
EDSC/Builders.cpp
EDSC/Intrinsics.cpp
Utils/Utils.cpp
ADDITIONAL_HEADER_DIRS
@ -14,7 +12,6 @@ add_mlir_dialect_library(MLIRStandard
MLIRCallInterfaces
MLIRCastInterfaces
MLIRControlFlowInterfaces
MLIREDSC
MLIRIR
MLIRSideEffectInterfaces
MLIRTensor

View File

@ -1,26 +0,0 @@
//===- Builders.cpp - MLIR Declarative Builder Classes --------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
using namespace mlir;
using namespace mlir::edsc;
using namespace mlir::edsc::intrinsics;
mlir::edsc::VectorBoundsCapture::VectorBoundsCapture(VectorType t) {
for (auto s : t.getShape()) {
lbs.push_back(std_constant_index(0));
ubs.push_back(std_constant_index(s));
steps.push_back(1);
}
}
mlir::edsc::VectorBoundsCapture::VectorBoundsCapture(Value v)
: VectorBoundsCapture(v.getType().cast<VectorType>()) {}

View File

@ -1,25 +0,0 @@
//===- Intrinsics.cpp - MLIR Operations for Declarative Builders ----------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
#include "mlir/IR/AffineExpr.h"
using namespace mlir;
using namespace mlir::edsc;
BranchOp mlir::edsc::intrinsics::std_br(Block *block, ValueRange operands) {
return OperationBuilder<BranchOp>(block, operands);
}
CondBranchOp mlir::edsc::intrinsics::std_cond_br(Value cond, Block *trueBranch,
ValueRange trueOperands,
Block *falseBranch,
ValueRange falseOperands) {
return OperationBuilder<CondBranchOp>(cond, trueBranch, trueOperands,
falseBranch, falseOperands);
}

View File

@ -1,116 +0,0 @@
//===- Builders.cpp - MLIR Declarative Builder Classes --------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "mlir/EDSC/Builders.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "llvm/ADT/Optional.h"
using namespace mlir;
using namespace mlir::edsc;
mlir::edsc::ScopedContext::ScopedContext(OpBuilder &b)
: ScopedContext(b, b.getInsertionPoint()->getLoc()) {}
mlir::edsc::ScopedContext::ScopedContext(OpBuilder &b, Location location)
: builder(b), guard(builder), location(location),
enclosingScopedContext(ScopedContext::getCurrentScopedContext()) {
getCurrentScopedContext() = this;
}
/// Sets the insertion point of the builder to 'newInsertPt' for the duration
/// of the scope. The existing insertion point of the builder is restored on
/// destruction.
mlir::edsc::ScopedContext::ScopedContext(OpBuilder &b,
OpBuilder::InsertPoint newInsertPt,
Location location)
: builder(b), guard(builder), location(location),
enclosingScopedContext(ScopedContext::getCurrentScopedContext()) {
getCurrentScopedContext() = this;
builder.restoreInsertionPoint(newInsertPt);
}
mlir::edsc::ScopedContext::~ScopedContext() {
getCurrentScopedContext() = enclosingScopedContext;
}
ScopedContext *&mlir::edsc::ScopedContext::getCurrentScopedContext() {
thread_local ScopedContext *context = nullptr;
return context;
}
OpBuilder &mlir::edsc::ScopedContext::getBuilderRef() {
assert(ScopedContext::getCurrentScopedContext() &&
"Unexpected Null ScopedContext");
return ScopedContext::getCurrentScopedContext()->builder;
}
Location mlir::edsc::ScopedContext::getLocation() {
assert(ScopedContext::getCurrentScopedContext() &&
"Unexpected Null ScopedContext");
return ScopedContext::getCurrentScopedContext()->location;
}
MLIRContext *mlir::edsc::ScopedContext::getContext() {
return getBuilderRef().getContext();
}
Block *mlir::edsc::createBlock(TypeRange argTypes) {
assert(ScopedContext::getContext() != nullptr && "ScopedContext not set up");
OpBuilder &builder = ScopedContext::getBuilderRef();
Block *block = builder.getInsertionBlock();
assert(block != nullptr &&
"insertion point not set up in the builder within ScopedContext");
return createBlockInRegion(*block->getParent(), argTypes);
}
Block *mlir::edsc::createBlockInRegion(Region &region, TypeRange argTypes) {
assert(ScopedContext::getContext() != nullptr && "ScopedContext not set up");
OpBuilder &builder = ScopedContext::getBuilderRef();
OpBuilder::InsertionGuard guard(builder);
return builder.createBlock(&region, {}, argTypes);
}
void mlir::edsc::appendToBlock(Block *block,
function_ref<void(ValueRange)> builderFn) {
assert(ScopedContext::getContext() != nullptr && "ScopedContext not set up");
OpBuilder &builder = ScopedContext::getBuilderRef();
OpBuilder::InsertionGuard guard(builder);
if (block->empty() || !block->back().mightHaveTrait<OpTrait::IsTerminator>())
builder.setInsertionPointToEnd(block);
else
builder.setInsertionPoint(&block->back());
builderFn(block->getArguments());
}
Block *mlir::edsc::buildInNewBlock(TypeRange argTypes,
function_ref<void(ValueRange)> builderFn) {
assert(ScopedContext::getContext() != nullptr && "ScopedContext not set up");
OpBuilder &builder = ScopedContext::getBuilderRef();
Block *block = builder.getInsertionBlock();
assert(block != nullptr &&
"insertion point not set up in the builder within ScopedContext");
return buildInNewBlock(*block->getParent(), argTypes, builderFn);
}
Block *mlir::edsc::buildInNewBlock(Region &region, TypeRange argTypes,
function_ref<void(ValueRange)> builderFn) {
assert(ScopedContext::getContext() != nullptr && "ScopedContext not set up");
OpBuilder &builder = ScopedContext::getBuilderRef();
Block *block = createBlockInRegion(region, argTypes);
OpBuilder::InsertionGuard guard(builder);
builder.setInsertionPointToStart(block);
builderFn(block->getArguments());
return block;
}

View File

@ -1,16 +0,0 @@
set(LLVM_OPTIONAL_SOURCES
Builders.cpp
CoreAPIs.cpp
)
add_mlir_library(MLIREDSC
Builders.cpp
ADDITIONAL_HEADER_DIRS
${MLIR_MAIN_INCLUDE_DIR}/mlir/EDSC
LINK_LIBS PUBLIC
MLIRIR
MLIRSupport
)

View File

@ -11,7 +11,6 @@ add_llvm_tool(mlir-cpu-runner
llvm_update_compile_flags(mlir-cpu-runner)
target_link_libraries(mlir-cpu-runner PRIVATE
MLIRAnalysis
MLIREDSC
MLIRExecutionEngine
MLIRIR
MLIRJitRunner

View File

@ -32,7 +32,6 @@ set(LIBS
MLIRLoopAnalysis
MLIRAnalysis
MLIRDialect
MLIREDSC
MLIRLspServerLib
MLIRParser
MLIRPass

View File

@ -40,7 +40,6 @@ set(LIBS
MLIRLoopAnalysis
MLIRAnalysis
MLIRDialect
MLIREDSC
MLIROptLib
MLIRParser
MLIRPass

View File

@ -31,7 +31,6 @@ set(LIBS
${test_libs}
MLIRAnalysis
MLIRDialect
MLIREDSC
MLIRIR
MLIRLoopAnalysis
MLIROptLib

View File

@ -16,7 +16,6 @@ if (MLIR_SPIRV_CPU_RUNNER_ENABLED)
target_link_libraries(mlir-spirv-cpu-runner PRIVATE
${conversion_libs}
MLIRAnalysis
MLIREDSC
MLIRExecutionEngine
MLIRGPU
MLIRIR

View File

@ -56,7 +56,6 @@ if (MLIR_VULKAN_RUNNER_ENABLED)
set(LIBS
${conversion_libs}
MLIRAnalysis
MLIREDSC
MLIRExecutionEngine
MLIRGPU
MLIRIR