Exemple #1
0
def allequal(tensor1, tensor2, **kwargs):
    """Returns True if two tensors are element-wise equal along a given axis.

    This function is equivalent to calling ``np.all(tensor1 == tensor2, **kwargs)``,
    but allows for ``tensor1`` and ``tensor2`` to differ in type.

    Args:
        tensor1 (tensor_like): tensor to compare
        tensor2 (tensor_like): tensor to compare
        **kwargs: Accepts any keyword argument that is accepted by ``np.all``,
            such as ``axis``, ``out``, and ``keepdims``. See the `NumPy documentation
            <https://numpy.org/doc/stable/reference/generated/numpy.all.html>`__ for
            more details.

    Returns:
        ndarray, bool: If ``axis=None``, a logical AND reduction is applied to all elements
        and a boolean will be returned, indicating if all elements evaluate to ``True``. Otherwise,
        a boolean NumPy array will be returned.

    **Example**

    >>> a = torch.tensor([1, 2])
    >>> b = np.array([1, 2])
    >>> allequal(a, b)
    True
    """
    t1 = ar.to_numpy(tensor1)
    t2 = ar.to_numpy(tensor2)
    return np.all(t1 == t2, **kwargs)
Exemple #2
0
def test_translator_random_normal(backend):
    from autoray import numpy as anp
    x = anp.random.normal(100.0, 0.1, size=(4, 5), like=backend)

    if backend == 'sparse':
        assert (x.data > 90.0).all()
        assert (x.data < 110.0).all()
        return

    assert (ar.to_numpy(x) > 90.0).all()
    assert (ar.to_numpy(x) < 110.0).all()

    if backend == 'tensorflow':
        x32 = ar.do('random.normal',
                    100.0,
                    0.1,
                    dtype='float32',
                    size=(4, 5),
                    like=backend)
        assert x32.dtype == 'float32'
        assert (ar.to_numpy(x32) > 90.0).all()
        assert (ar.to_numpy(x32) < 110.0).all()

    # test default single scalar
    x = anp.random.normal(loc=1500, scale=10, like=backend)
    assert 1000 <= ar.to_numpy(x) < 2000
Exemple #3
0
def test_translator_random_uniform(backend):
    from autoray import numpy as anp

    if backend == 'sparse':
        pytest.xfail("Sparse will have zeros")

    x = anp.random.uniform(low=-10, size=(4, 5), like=backend)
    assert (ar.to_numpy(x) > -10).all()
    assert (ar.to_numpy(x) < 1.0).all()

    # test default single scalar
    x = anp.random.uniform(low=1000, high=2000, like=backend)
    assert 1000 <= ar.to_numpy(x) < 2000
Exemple #4
0
def test_mgs(backend):
    if backend == 'sparse':
        pytest.xfail("Sparse doesn't support linear algebra yet...")
    x = gen_rand((3, 5), backend)
    Ux = modified_gram_schmidt(x)
    y = ar.do('sum', Ux @ ar.dag(Ux))
    assert ar.to_numpy(y) == pytest.approx(3)
Exemple #5
0
def test_linalg_svd_square(backend):
    if backend == 'sparse':
        pytest.xfail("Sparse doesn't support linear algebra yet...")
    x = gen_rand((5, 4), backend)
    U, s, V = ar.do('linalg.svd', x)
    assert (ar.infer_backend(x) == ar.infer_backend(U) == ar.infer_backend(s)
            == ar.infer_backend(V) == backend)
    y = U @ ar.do('diag', s, like=x) @ V
    diff = ar.do('sum', abs(y - x))
    assert ar.to_numpy(diff) < 1e-8
Exemple #6
0
def allclose(a, b, rtol=1e-05, atol=1e-08, **kwargs):
    """Wrapper around np.allclose, allowing tensors ``a`` and ``b``
    to differ in type"""
    try:
        # Some frameworks may provide their own allclose implementation.
        # Try and use it if available.
        res = np.allclose(a, b, rtol=rtol, atol=atol, **kwargs)
    except (TypeError, AttributeError):
        # Otherwise, convert the input to NumPy arrays.
        #
        # TODO: replace this with a bespoke, framework agnostic
        # low-level implementation to avoid the NumPy conversion:
        #
        #    np.abs(a - b) <= atol + rtol * np.abs(b)
        #
        t1 = ar.to_numpy(a)
        t2 = ar.to_numpy(b)
        res = np.allclose(t1, t2, rtol=rtol, atol=atol, **kwargs)

    return res
Exemple #7
0
def test_dtype_specials(backend, creation, dtype):
    import numpy as np
    x = ar.do(creation, shape=(2, 3), like=backend)

    if backend == 'torch' and 'complex' in dtype:
        pytest.xfail("Pytorch doesn't support complex numbers yet...")

    x = ar.astype(x, dtype)
    assert ar.get_dtype_name(x) == dtype
    x = ar.to_numpy(x)
    assert isinstance(x, np.ndarray)
    assert ar.get_dtype_name(x) == dtype
Exemple #8
0
def test_triu(backend):
    x = gen_rand((4, 4), backend)
    xl = ar.do('triu', x)
    xln = ar.to_numpy(xl)
    assert xln[1, 0] == 0.0
    if backend != 'sparse':
        # this won't work for sparse because density < 1
        assert (xln > 0.0).sum() == 10
    xl = ar.do('triu', x, k=-1)
    xln = ar.to_numpy(xl)
    if backend != 'sparse':
        # this won't work for sparse because density < 1
        assert xln[1, 0] != 0.0
    assert xln[2, 0] == 0.0
    if backend != 'sparse':
        # this won't work for sparse because density < 1
        assert (xln > 0.0).sum() == 13

    if backend == 'tensorflow':
        with pytest.raises(ValueError):
            ar.do('triu', x, 1)
Exemple #9
0
def test_count_nonzero(backend, array_dtype):

    if backend == 'mars':
        import mars
        if mars._version.version_info < (0, 4, 0, ''):
            pytest.xfail('mars count_nonzero bug fixed in version 0.4.')

    if array_dtype == 'int':
        x = ar.do('array', [0, 1, 2, 0, 3], like=backend)
    elif array_dtype == 'float':
        x = ar.do('array', [0., 1., 2., 0., 3.], like=backend)
    elif array_dtype == 'bool':
        x = ar.do('array', [False, True, True, False, True], like=backend)
    nz = ar.do('count_nonzero', x)
    assert ar.to_numpy(nz) == 3
Exemple #10
0
def cast_like(tensor1, tensor2):
    """Casts a tensor to the same dtype as another.

    Args:
        tensor1 (tensor_like): tensor to cast
        tensor2 (tensor_like): tensor with corresponding dtype to cast to

    Returns:
        tensor_like: a tensor with the same shape and values as ``tensor1`` and the
        same dtype as ``tensor2``

    **Example**

    >>> x = torch.tensor([1, 2])
    >>> y = torch.tensor([3., 4.])
    >>> cast_like(x, y)
    tensor([1., 2.])
    """
    dtype = ar.to_numpy(tensor2).dtype.type
    return cast(tensor1, dtype)
Exemple #11
0
def allclose(a, b, rtol=1e-05, atol=1e-08, **kwargs):
    """Wrapper around np.allclose, allowing tensors ``a`` and ``b``
    to differ in type"""
    t1 = ar.to_numpy(a)
    t2 = ar.to_numpy(b)
    return np.allclose(t1, t2, rtol=rtol, atol=atol, **kwargs)