aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAart Bik <ajcbik@google.com>2022-08-04 11:49:00 -0700
committerAart Bik <ajcbik@google.com>2022-08-04 13:35:13 -0700
commit7f5b16733651c658b8cb4b05f2a3075ce84c3057 (patch)
treeee002eaf97a263f381fcb91b9df19ac7f6413048
parent6e45162adfec14ef4f53ec99312cb26d23f929bc (diff)
[mlir][sparse] fix bug in complex zero detection
We were checking real-part twice, not real/imag-part. The new test only passes after the bug fix. Reviewed By: Peiming Differential Revision: https://reviews.llvm.org/D131190
-rw-r--r--mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp2
-rw-r--r--mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir38
2 files changed, 38 insertions, 2 deletions
diff --git a/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp b/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp
index f844987e5a70..6ef79f868f8a 100644
--- a/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp
@@ -807,7 +807,7 @@ bool Merger::maybeZero(unsigned e) const {
if (auto c = tensorExps[e].val.getDefiningOp<complex::ConstantOp>()) {
ArrayAttr arrayAttr = c.getValue();
return arrayAttr[0].cast<FloatAttr>().getValue().isZero() &&
- arrayAttr[0].cast<FloatAttr>().getValue().isZero();
+ arrayAttr[1].cast<FloatAttr>().getValue().isZero();
}
if (auto c = tensorExps[e].val.getDefiningOp<arith::ConstantIntOp>())
return c.value() == 0;
diff --git a/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir b/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir
index edcaeb509310..8e78f5dafea4 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir
@@ -1,4 +1,3 @@
-// NOTE: Assertions have been autogenerated by utils/generate-test-checks.py
// RUN: mlir-opt %s -sparsification | FileCheck %s
#SV = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>
@@ -400,3 +399,40 @@ func.func @zero_preserving_math(%arga: tensor<32xf64, #SV>) -> tensor<32xf64, #S
} -> tensor<32xf64, #SV>
return %0 : tensor<32xf64, #SV>
}
+
+// CHECK-LABEL: func.func @complex_divbyc(
+// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xcomplex<f64>, #sparse_tensor.encoding<{{.*}}>> {
+// CHECK-DAG: %[[VAL_1:.*]] = arith.constant 0 : index
+// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 1 : index
+// CHECK-DAG: %[[VAL_3:.*]] = complex.constant [0.000000e+00, 1.000000e+00] : complex<f64>
+// CHECK: %[[VAL_4:.*]] = bufferization.alloc_tensor() : tensor<32xcomplex<f64>, #sparse_tensor.encoding<{{.*}}>>
+// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_1]] : tensor<32xcomplex<f64>, #sparse_tensor.encoding<{{.*}}>> to memref<?xindex>
+// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_1]] : tensor<32xcomplex<f64>, #sparse_tensor.encoding<{{.*}}>> to memref<?xindex>
+// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xcomplex<f64>, #sparse_tensor.encoding<{{.*}}>> to memref<?xcomplex<f64>>
+// CHECK: %[[VAL_8:.*]] = memref.alloca(%[[VAL_2]]) : memref<?xindex>
+// CHECK: %[[VAL_9:.*]] = memref.alloca() : memref<complex<f64>>
+// CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_1]]] : memref<?xindex>
+// CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_2]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_10]] to %[[VAL_11]] step %[[VAL_2]] {
+// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_12]]] : memref<?xindex>
+// CHECK: memref.store %[[VAL_13]], %[[VAL_8]]{{\[}}%[[VAL_1]]] : memref<?xindex>
+// CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_12]]] : memref<?xcomplex<f64>>
+// CHECK: %[[VAL_15:.*]] = complex.div %[[VAL_14]], %[[VAL_3]] : complex<f64>
+// CHECK: memref.store %[[VAL_15]], %[[VAL_9]][] : memref<complex<f64>>
+// CHECK: sparse_tensor.lex_insert %[[VAL_4]], %[[VAL_8]], %[[VAL_9]] : tensor<32xcomplex<f64>, #sparse_tensor.encoding<{{.*}}>>, memref<?xindex>, memref<complex<f64>>
+// CHECK: }
+// CHECK: %[[VAL_16:.*]] = sparse_tensor.load %[[VAL_4]] hasInserts : tensor<32xcomplex<f64>, #sparse_tensor.encoding<{{.*}}>>
+// CHECK: return %[[VAL_16]] : tensor<32xcomplex<f64>, #sparse_tensor.encoding<{{.*}}>>
+// CHECK: }
+func.func @complex_divbyc(%arg0: tensor<32xcomplex<f64>, #SV>) -> tensor<32xcomplex<f64>, #SV> {
+ %c = complex.constant [0.0, 1.0] : complex<f64>
+ %init = bufferization.alloc_tensor() : tensor<32xcomplex<f64>, #SV>
+ %0 = linalg.generic #traitc
+ ins(%arg0: tensor<32xcomplex<f64>, #SV>)
+ outs(%init: tensor<32xcomplex<f64>, #SV>) {
+ ^bb(%a: complex<f64>, %x: complex<f64>):
+ %0 = complex.div %a, %c : complex<f64>
+ linalg.yield %0 : complex<f64>
+ } -> tensor<32xcomplex<f64>, #SV>
+ return %0 : tensor<32xcomplex<f64>, #SV>
+}