Revert "[mlir][MemRef] Fix DimOp folding of OffsetSizeAndStrideInterface."
This reverts commit 6c0fd4db79
.
This simple implementation is unfortunately not extensible and needs to be reverted.
The extensible way should be to extend https://reviews.llvm.org/D104321.
This commit is contained in:
parent
d58c7a9238
commit
31f80393bc
|
@ -110,10 +110,6 @@ public:
|
|||
/// size. Otherwise, abort.
|
||||
int64_t getNumDynamicDims() const;
|
||||
|
||||
/// If `dim` is a dynamic dim, return its relative index among the dynamic
|
||||
/// dims. Otherwise, abort. The result is guaranteed to be nonnegative.
|
||||
int64_t getRelativeIndexOfDynamicDim(unsigned dim) const;
|
||||
|
||||
/// If this is ranked type, return the size of the specified dimension.
|
||||
/// Otherwise, abort.
|
||||
int64_t getDimSize(unsigned idx) const;
|
||||
|
|
|
@ -175,9 +175,9 @@ struct SimplifyDeadAlloc : public OpRewritePattern<T> {
|
|||
LogicalResult matchAndRewrite(T alloc,
|
||||
PatternRewriter &rewriter) const override {
|
||||
if (llvm::any_of(alloc->getUsers(), [&](Operation *op) {
|
||||
if (auto storeOp = dyn_cast<StoreOp>(op))
|
||||
return storeOp.value() == alloc;
|
||||
return !isa<DeallocOp>(op);
|
||||
if (auto storeOp = dyn_cast<StoreOp>(op))
|
||||
return storeOp.value() == alloc;
|
||||
return !isa<DeallocOp>(op);
|
||||
}))
|
||||
return failure();
|
||||
|
||||
|
@ -677,9 +677,9 @@ OpFoldResult DimOp::fold(ArrayRef<Attribute> operands) {
|
|||
|
||||
if (auto sizeInterface =
|
||||
dyn_cast_or_null<OffsetSizeAndStrideOpInterface>(definingOp)) {
|
||||
int64_t nthDynamicIndex =
|
||||
memrefType.getRelativeIndexOfDynamicDim(unsignedIndex);
|
||||
return sizeInterface.sizes()[nthDynamicIndex];
|
||||
assert(sizeInterface.isDynamicSize(unsignedIndex) &&
|
||||
"Expected dynamic subview size");
|
||||
return sizeInterface.getDynamicSize(unsignedIndex);
|
||||
}
|
||||
|
||||
// dim(memrefcast) -> dim
|
||||
|
|
|
@ -271,21 +271,13 @@ OpFoldResult DimOp::fold(ArrayRef<Attribute> operands) {
|
|||
return Value{*dynExtents};
|
||||
}
|
||||
|
||||
// dim(insert_slice.result()) -> dim(insert_slice.dest())
|
||||
if (auto insertSliceOp =
|
||||
dyn_cast_or_null<tensor::InsertSliceOp>(definingOp)) {
|
||||
this->sourceMutable().assign(insertSliceOp.dest());
|
||||
return getResult();
|
||||
}
|
||||
|
||||
// The size at the given index is now known to be a dynamic size.
|
||||
unsigned unsignedIndex = index.getValue().getZExtValue();
|
||||
|
||||
if (auto sizeInterface =
|
||||
dyn_cast_or_null<OffsetSizeAndStrideOpInterface>(definingOp)) {
|
||||
int64_t nthDynamicIndex =
|
||||
tensorType.getRelativeIndexOfDynamicDim(unsignedIndex);
|
||||
return sizeInterface.sizes()[nthDynamicIndex];
|
||||
if (auto sliceOp = dyn_cast_or_null<tensor::ExtractSliceOp>(definingOp)) {
|
||||
assert(sliceOp.isDynamicSize(unsignedIndex) &&
|
||||
"Expected dynamic slice size");
|
||||
return sliceOp.getDynamicSize(unsignedIndex);
|
||||
}
|
||||
|
||||
// dim(cast) -> dim
|
||||
|
|
|
@ -427,15 +427,6 @@ int64_t ShapedType::getNumDynamicDims() const {
|
|||
return llvm::count_if(getShape(), isDynamic);
|
||||
}
|
||||
|
||||
int64_t ShapedType::getRelativeIndexOfDynamicDim(unsigned dim) const {
|
||||
assert(isDynamicDim(dim) && "expected a dynamic dim");
|
||||
int nthDynamicIndex = -1;
|
||||
for (unsigned idx = 0; idx <= dim; ++idx)
|
||||
if (isDynamicDim(idx))
|
||||
++nthDynamicIndex;
|
||||
return nthDynamicIndex;
|
||||
}
|
||||
|
||||
bool ShapedType::hasStaticShape() const {
|
||||
return hasRank() && llvm::none_of(getShape(), isDynamic);
|
||||
}
|
||||
|
|
|
@ -387,32 +387,11 @@ func @alloc_const_fold_with_symbols2() -> memref<?xi32, #map0> {
|
|||
}
|
||||
|
||||
// -----
|
||||
|
||||
// CHECK-LABEL: func @allocator
|
||||
// CHECK: %[[alloc:.+]] = memref.alloc
|
||||
// CHECK: memref.store %[[alloc:.+]], %arg0
|
||||
func @allocator(%arg0 : memref<memref<?xi32>>, %arg1 : index) {
|
||||
%0 = memref.alloc(%arg1) : memref<?xi32>
|
||||
memref.store %0, %arg0[] : memref<memref<?xi32>>
|
||||
return
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
#map0 = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
|
||||
|
||||
// CHECK-LABEL: func @rank_reducing_subview_dim
|
||||
// CHECK-SAME: %[[IDX_0:[0-9a-zA-Z]*]]: index
|
||||
// CHECK-SAME: %[[IDX_1:[0-9a-zA-Z]*]]: index
|
||||
func @rank_reducing_subview_dim(%arg0 : memref<?x?x?xf32>, %arg1 : index,
|
||||
%arg2 : index) -> index
|
||||
{
|
||||
%c0 = constant 0 : index
|
||||
%c1 = constant 1 : index
|
||||
%c4 = constant 4 : index
|
||||
%0 = memref.subview %arg0[%c0, %arg1, %c1] [%c4, 1, %arg2] [%c1, %c1, %c1] : memref<?x?x?xf32> to memref<?x?xf32, #map0>
|
||||
%1 = memref.dim %0, %c1 : memref<?x?xf32, #map0>
|
||||
|
||||
// CHECK-NEXT: return %[[IDX_1]] : index
|
||||
return %1 : index
|
||||
return
|
||||
}
|
||||
|
|
|
@ -517,42 +517,3 @@ func @fold_dim_of_tensor.cast(%arg0 : tensor<4x?xf32>) -> (index, index) {
|
|||
%2 = tensor.dim %0, %c1 : tensor<?x?xf32>
|
||||
return %1, %2: index, index
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
// CHECK-LABEL: func @rank_reducing_extract_slice_dim
|
||||
// CHECK-SAME: %[[IDX_0:[0-9a-zA-Z]*]]: index
|
||||
// CHECK-SAME: %[[IDX_1:[0-9a-zA-Z]*]]: index
|
||||
func @rank_reducing_extract_slice_dim(%arg0 : tensor<?x?x?xf32>, %arg1 : index,
|
||||
%arg2 : index) -> index
|
||||
{
|
||||
%c0 = constant 0 : index
|
||||
%c1 = constant 1 : index
|
||||
%c4 = constant 4 : index
|
||||
%0 = tensor.extract_slice %arg0[%c0, %arg1, %c1] [%c4, 1, %arg2] [%c1, %c1, %c1] : tensor<?x?x?xf32> to tensor<?x?xf32>
|
||||
%1 = tensor.dim %0, %c1 : tensor<?x?xf32>
|
||||
|
||||
// CHECK-NEXT: return %[[IDX_1]] : index
|
||||
return %1 : index
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
// CHECK-LABEL: func @rank_reducing_insert_slice_dim
|
||||
// CHECK-SAME: %[[OUT:[0-9a-zA-Z]*]]: tensor<?x?x?xf32>
|
||||
func @rank_reducing_insert_slice_dim(%out : tensor<?x?x?xf32>, %in : tensor<?x?xf32>, %arg1 : index,
|
||||
%arg2 : index) -> index
|
||||
{
|
||||
// CHECK-NEXT: %[[C1:.*]] = constant 1 : index
|
||||
|
||||
%c0 = constant 0 : index
|
||||
%c1 = constant 1 : index
|
||||
%c4 = constant 4 : index
|
||||
%0 = tensor.insert_slice %in into %out[%c0, %arg1, %c1] [1, 1, 1] [%c1, %c1, %c1] : tensor<?x?xf32> into tensor<?x?x?xf32>
|
||||
|
||||
// CHECK-NEXT: %[[D1:.*]] = tensor.dim %[[OUT]], %[[C1]] : tensor<?x?x?xf32>
|
||||
%1 = tensor.dim %0, %c1 : tensor<?x?x?xf32>
|
||||
|
||||
// CHECK-NEXT: return %[[D1]] : index
|
||||
return %1 : index
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue