Example #1
0
import numpy as np
import os
import sys

_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
sys.path.append(_SCRIPT_PATH)
from tools import mlir_pytaco_api as pt

compressed = pt.compressed
dense = pt.dense

# Ensure that we can run an unmodified PyTACO program with a simple tensor
# algebra expression using tensor index notation, and produce the expected
# result.
i, j = pt.get_index_vars(2)
A = pt.tensor([2, 3])
B = pt.tensor([2, 3])
C = pt.tensor([2, 3])
D = pt.tensor([2, 3], compressed)
A.insert([0, 1], 10)
A.insert([1, 2], 40)
B.insert([0, 0], 20)
B.insert([1, 2], 30)
C.insert([0, 1], 5)
C.insert([1, 2], 7)
D[i, j] = A[i, j] + B[i, j] - C[i, j]

indices, values = D.get_coordinates_and_values()
passed = np.array_equal(indices, [[0, 0], [0, 1], [1, 2]])
passed += np.allclose(values, [20.0, 5.0, 63.0])
Example #2
0
# RUN: SUPPORTLIB=%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext %PYTHON %s | FileCheck %s
import numpy as np
import os
import sys

_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
sys.path.append(_SCRIPT_PATH)
from tools import mlir_pytaco_api as pt

compressed = pt.compressed

passed = 0
all_types = [pt.complex64, pt.complex128]
for t in all_types:
    i, j = pt.get_index_vars(2)
    A = pt.tensor([2, 3], dtype=t)
    B = pt.tensor([2, 3], dtype=t)
    C = pt.tensor([2, 3], compressed, dtype=t)
    A.insert([0, 1], 10 + 20j)
    A.insert([1, 2], 40 + 0.5j)
    B.insert([0, 0], 20)
    B.insert([1, 2], 30 + 15j)
    C[i, j] = A[i, j] + B[i, j]

    indices, values = C.get_coordinates_and_values()
    passed += isinstance(values[0], t.value)
    passed += np.array_equal(indices, [[0, 0], [0, 1], [1, 2]])
    passed += np.allclose(values, [20, 10 + 20j, 70 + 15.5j])

# CHECK: Number of passed: 6
print("Number of passed:", passed)
Example #3
0
# Load a sparse matrix stored in the matrix market format) and store it
# as a CSR matrix.  The matrix in this test is a reduced version of the data
# downloaded from here:
# https://www.cise.ufl.edu/research/sparse/MM/Boeing/pwtk.tar.gz
# In order to run the program using the matrix above, you can download the
# matrix and replace this path to the actual path to the file.
A = pt.read(os.path.join(_SCRIPT_PATH, "data/pwtk.mtx"), csr)

# These two lines have been modified from the original program to use static
# data to support result comparison.
x = pt.from_array(np.full((A.shape[1], ), 1, dtype=np.float64))
z = pt.from_array(np.full((A.shape[0], ), 2, dtype=np.float64))

# Declare the result to be a dense vector
y = pt.tensor([A.shape[0]], dv)

# Declare index vars
i, j = pt.get_index_vars(2)

# Define the SpMV computation
y[i] = A[i, j] * x[j] + z[i]

##########################################################################

# Perform the SpMV computation and write the result to file
with tempfile.TemporaryDirectory() as test_dir:
    golden_file = os.path.join(_SCRIPT_PATH, "data/gold_y.tns")
    out_file = os.path.join(test_dir, "y.tns")
    pt.write(out_file, y)
    #
Example #4
0
# RUN: SUPPORTLIB=%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext %PYTHON %s | FileCheck %s

import os
import sys

_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
sys.path.append(_SCRIPT_PATH)
from tools import mlir_pytaco_api as pt

compressed = pt.compressed
dense = pt.dense

# Ensure that we can run an unmodified PyTACO program with a simple tensor
# algebra expression using tensor index notation, and produce the expected
# result.
i, j = pt.get_index_vars(2)
A = pt.tensor([2, 3])
B = pt.tensor([2, 3])
C = pt.tensor([2, 3])
D = pt.tensor([2, 3], dense)
A.insert([0, 1], 10)
A.insert([1, 2], 40)
B.insert([0, 0], 20)
B.insert([1, 2], 30)
C.insert([0, 1], 5)
C.insert([1, 2], 7)
D[i, j] = A[i, j] + B[i, j] - C[i, j]

# CHECK: [20. 5. 0. 0. 0. 63.]
print(D.to_array().reshape(6))
Example #5
0
import sys
import tempfile

_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
sys.path.append(_SCRIPT_PATH)

from tools import mlir_pytaco_api as pt
from tools import testing_utils as utils

# Define the CSR format.
csr = pt.format([pt.dense, pt.compressed], [0, 1])

# Read matrices A and B from file, infer size of output matrix C.
A = pt.read(os.path.join(_SCRIPT_PATH, "data/A.mtx"), csr)
B = pt.read(os.path.join(_SCRIPT_PATH, "data/B.mtx"), csr)
C = pt.tensor([A.shape[0], B.shape[1]], csr)

# Define the kernel.
i, j, k = pt.get_index_vars(3)
C[i, j] = A[i, k] * B[k, j]

# Force evaluation of the kernel by writing out C.
with tempfile.TemporaryDirectory() as test_dir:
    golden_file = os.path.join(_SCRIPT_PATH, "data/gold_C.tns")
    out_file = os.path.join(test_dir, "C.tns")
    pt.write(out_file, C)
    #
    # CHECK: Compare result True
    #
    print(f"Compare result {utils.compare_sparse_tns(golden_file, out_file)}")
# RUN: SUPPORTLIB=%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext %PYTHON %s | FileCheck %s

import numpy as np
import os
import sys

_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
sys.path.append(_SCRIPT_PATH)
from tools import mlir_pytaco_api as pt

i, j = pt.get_index_vars(2)
A = pt.tensor([2, 3])
B = pt.tensor([2, 3])
A.insert([0, 1], 10.3)
A.insert([1, 1], 40.7)
A.insert([0, 2], -11.3)
A.insert([1, 2], -41.7)

B[i, j] = abs(A[i, j])
indices, values = B.get_coordinates_and_values()
passed = np.array_equal(indices, [[0, 1], [0, 2], [1, 1], [1, 2]])
passed += np.allclose(values, [10.3, 11.3, 40.7, 41.7])

B[i, j] = pt.ceil(A[i, j])
indices, values = B.get_coordinates_and_values()
passed += np.array_equal(indices, [[0, 1], [0, 2], [1, 1], [1, 2]])
passed += np.allclose(values, [11, -11, 41, -41])

B[i, j] = pt.floor(A[i, j])
indices, values = B.get_coordinates_and_values()
passed += np.array_equal(indices, [[0, 1], [0, 2], [1, 1], [1, 2]])
import numpy as np
import os
import sys

_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
sys.path.append(_SCRIPT_PATH)
from tools import mlir_pytaco_api as pt

compressed = pt.compressed
dense = pt.dense

# Ensure that we can run an unmodified PyTACO program with a simple tensor
# algebra expression using tensor index notation, and produce the expected
# result.
i, j = pt.get_index_vars(2)
A = pt.tensor([2, 3])
B = pt.tensor([2, 3])
C = pt.tensor([2, 3])
D = pt.tensor([2, 3], compressed)
A.insert([0, 1], 10)
A.insert([1, 2], 40)
B.insert([0, 0], 20)
B.insert([1, 2], 30)
C.insert([0, 1], 5)
C.insert([1, 2], 7)
D[i, j] = A[i, j] + B[i, j] - C[i, j]

indices, values = D.get_coordinates_and_values()
passed = np.array_equal(indices, [[0, 0], [0, 1], [1, 2]])
passed += np.allclose(values, [20.0, 5.0, 63.0])
Example #8
0
import tempfile

_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
sys.path.append(_SCRIPT_PATH)

from tools import mlir_pytaco_api as pt
from tools import testing_utils as utils

i, j, k = pt.get_index_vars(3)

# Set up dense matrices.
A = pt.from_array(np.full((8, 8), 2.0, dtype=np.float32))
B = pt.from_array(np.full((8, 8), 3.0, dtype=np.float32))

# Set up sparse matrices.
S = pt.tensor([8, 8], pt.format([pt.compressed, pt.compressed]))
X = pt.tensor([8, 8], pt.format([pt.compressed, pt.compressed]))
Y = pt.tensor([8, 8], pt.compressed)  # alternative syntax works too

S.insert([0, 7], 42.0)

# Define the SDDMM kernel. Since this performs the reduction as
#   sum(k, S[i, j] * A[i, k] * B[k, j])
# we only compute the intermediate dense matrix product that are actually
# needed to compute the result, with proper asymptotic complexity.
X[i, j] = S[i, j] * A[i, k] * B[k, j]

# Alternative way to define SDDMM kernel. Since this performs the reduction as
#   sum(k, A[i, k] * B[k, j]) * S[i, j]
# the MLIR lowering results in two separate tensor index expressions that are
# fused prior to running the sparse compiler in order to guarantee proper
Example #9
0
import filecmp
import numpy as np
import os
import sys
import tempfile

_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
sys.path.append(_SCRIPT_PATH)

from tools import mlir_pytaco_api as pt
from tools import testing_utils as utils

i, j, k = pt.get_index_vars(3)

# Set up scalar and sparse tensors.
alpha = pt.tensor(42.0)
S = pt.tensor([8, 8, 8],
              pt.format([pt.compressed, pt.compressed, pt.compressed]))
X = pt.tensor([8, 8, 8],
              pt.format([pt.compressed, pt.compressed, pt.compressed]))
S.insert([0, 0, 0], 2.0)
S.insert([1, 1, 1], 3.0)
S.insert([4, 4, 4], 4.0)
S.insert([7, 7, 7], 5.0)

# TODO: make this work:
# X[i, j, k] = alpha[0] * S[i, j, k]
X[i, j, k] = S[i, j, k]

expected = """; extended FROSTT format
3 4
Example #10
0
rm = pt.format([dense, dense])

# Load a sparse three-dimensional tensor from file (stored in the FROSTT
# format) and store it as a compressed sparse fiber tensor. We use a small
# tensor for the purpose of testing. To run the program using the data from
# the real application, please download the data from:
# http://frostt.io/tensors/nell-2/
B = pt.read(os.path.join(_SCRIPT_PATH, "data/nell-2.tns"), csf)

# These two lines have been modified from the original program to use static
# data to support result comparison.
C = pt.from_array(np.full((B.shape[1], 25), 1, dtype=np.float64))
D = pt.from_array(np.full((B.shape[2], 25), 2, dtype=np.float64))

# Declare the result to be a dense matrix.
A = pt.tensor([B.shape[0], 25], rm)

# Declare index vars.
i, j, k, l = pt.get_index_vars(4)

# Define the MTTKRP computation.
A[i, j] = B[i, k, l] * D[l, j] * C[k, j]

##########################################################################

# Perform the MTTKRP computation and write the result to file.
with tempfile.TemporaryDirectory() as test_dir:
    golden_file = os.path.join(_SCRIPT_PATH, "data/gold_A.tns")
    out_file = os.path.join(test_dir, "A.tns")
    pt.write(out_file, A)
    #
Example #11
0
import filecmp
import numpy as np
import os
import sys
import tempfile

_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
sys.path.append(_SCRIPT_PATH)

from tools import mlir_pytaco_api as pt
from tools import testing_utils as utils

i, j, k, l, m = pt.get_index_vars(5)

# Set up scalar.
alpha = pt.tensor(42.0)

# Set up some sparse tensors with different dim annotations and ordering.
S = pt.tensor([8, 8, 8],
              pt.format([pt.compressed, pt.dense, pt.compressed], [1, 0, 2]))
X = pt.tensor([8, 8, 8],
              pt.format([pt.compressed, pt.compressed, pt.compressed],
                        [1, 0, 2]))
S.insert([0, 0, 0], 2.0)
S.insert([1, 1, 1], 3.0)
S.insert([4, 4, 4], 4.0)
S.insert([7, 7, 7], 5.0)

X[i, j, k] = alpha[0] * S[i, j, k]

# Set up tensors with a dense last dimension. This results in a full
Example #12
0
# RUN: SUPPORTLIB=%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext %PYTHON %s | FileCheck %s

import numpy as np
import os
import sys

_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
sys.path.append(_SCRIPT_PATH)
from tools import mlir_pytaco_api as pt

compressed = pt.compressed

i, j = pt.get_index_vars(2)
A = pt.tensor([2, 3])
S = pt.tensor(3)  # S is a scalar tensor.
B = pt.tensor([2, 3], compressed)
A.insert([0, 1], 10)
A.insert([1, 2], 40)

# Use [0] to index the scalar tensor.
B[i, j] = A[i, j] * S[0]

indices, values = B.get_coordinates_and_values()
passed = np.array_equal(indices, [[0, 1], [1, 2]])
passed += np.array_equal(values, [30.0, 120.0])

# Sum all the values in A.
S[0] = A[i, j]
passed += (S.get_scalar_value() == 50.0)

indices, values = S.get_coordinates_and_values()
Example #13
0
import os
import sys
import tempfile

_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
sys.path.append(_SCRIPT_PATH)

from tools import mlir_pytaco_api as pt

# Define the CSR format.
csr = pt.format([pt.dense, pt.compressed], [0, 1])

# Read matrices A and B from file, infer size of output matrix C.
A = pt.read(os.path.join(_SCRIPT_PATH, "data/A.mtx"), csr)
B = pt.read(os.path.join(_SCRIPT_PATH, "data/B.mtx"), csr)
C = pt.tensor((A.shape[0], B.shape[1]), csr)

# Define the kernel.
i, j, k = pt.get_index_vars(3)
C[i, j] = A[i, k] * B[k, j]

# Force evaluation of the kernel by writing out C.
#
# TODO: use sparse_tensor.out for output, so that C.tns becomes
#       a file in extended FROSTT format
#
with tempfile.TemporaryDirectory() as test_dir:
  golden_file = os.path.join(_SCRIPT_PATH, "data/gold_C.tns")
  out_file = os.path.join(test_dir, "C.tns")
  pt.write(out_file, C)
  #
# RUN: SUPPORTLIB=%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext %PYTHON %s | FileCheck %s

import numpy as np
import os
import sys

_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
sys.path.append(_SCRIPT_PATH)
from tools import mlir_pytaco_api as pt

i, j = pt.get_index_vars(2)
# Both tensors are true dense tensors.
A = pt.from_array(np.full([2, 3], 1, dtype=np.float64))
B = pt.from_array(np.full([2, 3], 2, dtype=np.float64))
# Define the result tensor as a true dense tensor. The parameter is_dense=True
# is an MLIR-PyTACO extension.
C = pt.tensor([2, 3], dtype=pt.float64, is_dense=True)

C[i, j] = A[i, j] + B[i, j]

# CHECK: [3. 3. 3. 3. 3. 3.]
print(C.to_array().reshape(6))
# RUN: SUPPORTLIB=%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext %PYTHON %s | FileCheck %s

import numpy as np
import os
import sys

_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
sys.path.append(_SCRIPT_PATH)
from tools import mlir_pytaco_api as pt

i, j = pt.get_index_vars(2)
# Both tensors are true dense tensors.
A = pt.from_array(np.full([2,3], 1, dtype=np.float64))
B = pt.from_array(np.full([2,3], 2, dtype=np.float64))
# Define the result tensor as a true dense tensor. The parameter is_dense=True
# is an MLIR-PyTACO extension.
C = pt.tensor([2, 3], is_dense=True)

C[i, j] = A[i, j] + B[i, j]

# CHECK: [3. 3. 3. 3. 3. 3.]
print(C.to_array().reshape(6))