예제 #1
0
def test_einsum_static_fwd(optimize):
    """ Check all einsum examples from numpy doc"""
    a = mg.arange(25).reshape(5, 5)
    b = mg.arange(5)
    c = mg.arange(6).reshape(2, 3)

    compare_einsum("ii", a, optimize=optimize)
    compare_einsum(a, [0, 0], optimize=optimize)

    compare_einsum("ii->i", a, optimize=optimize)
    compare_einsum(a, [0, 0], [0], optimize=optimize)

    compare_einsum("ij,j", a, b, optimize=optimize)
    compare_einsum(a, [0, 1], b, [1], optimize=optimize)

    compare_einsum("...j,j", a, b, optimize=optimize)
    compare_einsum(a, [Ellipsis, 0], b, [Ellipsis, 0], optimize=optimize)

    compare_einsum("ji", c, optimize=optimize)
    compare_einsum(c, [1, 0], optimize=optimize)

    compare_einsum("..., ...", 3, c, optimize=optimize)
    compare_einsum(3, [Ellipsis], c, [Ellipsis], optimize=optimize)

    compare_einsum("i,i", b, b, optimize=optimize)
    compare_einsum(b, [0], b, [0], optimize=optimize)

    compare_einsum("i,j", np.arange(2) + 1, b, optimize=optimize)
    compare_einsum("i...->...", a, optimize=optimize)

    a = np.arange(60.0).reshape(3, 4, 5)
    b = np.arange(24.0).reshape(4, 3, 2)
    compare_einsum("ijk,jil->kl", a, b, optimize=optimize)
    compare_einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3], optimize=optimize)

    a = np.arange(6).reshape((3, 2))
    b = np.arange(12).reshape((4, 3))
    compare_einsum("ki,jk->ij", a, b, optimize=optimize)
    compare_einsum(a, [0, 1], b, [2, 0], [1, 2], optimize=optimize)

    compare_einsum("ki,...k->i...", a, b, optimize=optimize)
    compare_einsum(a, [0, 1],
                   b, [Ellipsis, 0], [1, Ellipsis],
                   optimize=optimize)

    compare_einsum("k...,jk", a, b, optimize=optimize)
    compare_einsum(a, [0, Ellipsis], b, [2, 0], optimize=optimize)
def test_bad_label_type(type):
    """
    Ensures that softmax_crossentropy checks integer-type `y_true`
    """
    scores = mg.arange(12).reshape(3, 4)
    labels = np.zeros((3, ), dtype=type)
    with raises(TypeError):
        softmax_crossentropy(scores, labels)
def test_bad_label_shape(shape):
    """
    Ensures that softmax_crossentropy checks for shape-(N,) `y_true`
    """
    scores = mg.arange(12).reshape(3, 4)
    labels = mg.zeros(shape, dtype=int)
    with raises(ValueError):
        softmax_crossentropy(scores, labels)
예제 #4
0
def test_bad_label_type(type):
    """
    Ensures that `multiclass_hinge` checks integer-type `y_true`
    """
    scores = mg.arange(12).reshape(3, 4)
    labels = np.zeros((3, ), dtype=type)
    with raises(TypeError):
        multiclass_hinge(scores, labels)
예제 #5
0
def test_bad_label_shape(shape):
    """
    Ensures that `multiclass_hinge` checks for shape-(N,) `y_true`
    """
    scores = mg.arange(12).reshape(3, 4)
    labels = mg.zeros(shape, dtype=int)
    with raises(ValueError):
        multiclass_hinge(scores, labels)
예제 #6
0
        nd_tensor.item()

    for size1_tensor in (Tensor(1), Tensor([[1]])):
        assert float(size1_tensor) == 1.0
        assert int(size1_tensor) == 1
        assert size1_tensor.item() == 1.0


@pytest.mark.parametrize(
    ("tensor", "repr_"),
    [
        (Tensor(1), "Tensor(1)"),
        (Tensor([1]), "Tensor([1])"),
        (Tensor([1, 2]), "Tensor([1, 2])"),
        (
            mg.arange(9).reshape((3, 3)),
            "Tensor([[0, 1, 2],\n        [3, 4, 5],\n        [6, 7, 8]])",
        ),
    ],
)
def test_repr(tensor, repr_):
    assert repr(tensor) == repr_


@given(constant=st.booleans())
def test_invalid_gradient_raises(constant: bool):
    x = Tensor(3, constant=constant) * 2
    with (pytest.raises(InvalidGradient)
          if not constant else does_not_raise()):
        x.backward("bad")
예제 #7
0
def test_redundant_args():
    """
    Test behavior for when einsum receives redundant inputs. An optimization
    was added such that einsum will only compute the gradient for such an entry
    once and scale it accordingly.
    """
    a = mg.arange(4).reshape(2, 2)
    a_copy = copy(a)

    # check standard summation
    o = einsum("ij,ij", a, a)
    assert len(o.creator.cache) == 1
    o.sum().backward()

    o = einsum("ij,ij", a_copy, a_copy * 1)
    assert len(o.creator.cache) == 2
    o.sum().backward()
    assert_allclose(a.grad, a_copy.grad)

    a = Tensor(np.arange(4).reshape(2, 2))
    a_copy = copy(a)

    # check standard summation using alt signature
    o = einsum(a, [0, 1], a, [0, 1])
    assert len(o.creator.cache) == 1
    o.sum().backward()

    o = einsum(a_copy, [0, 1], a_copy * 1, [0, 1])
    assert len(o.creator.cache) == 2
    o.sum().backward()
    assert_allclose(a.grad, a_copy.grad)

    a = Tensor(np.arange(4).reshape(2, 2))
    a_copy = copy(a)

    # check matmul (no redundant indices)
    o = einsum("ij,jk", a, a)
    assert len(o.creator.cache) == 2
    o.sum().backward()

    o = a_copy @ a_copy
    o.sum().backward()
    assert_allclose(a.grad, a_copy.grad)

    a = Tensor(np.arange(4).reshape(2, 2))
    a_copy = copy(a)

    # check traces
    o = einsum("ii,ii", a, a)
    assert len(o.creator.cache) == 1
    o.sum().backward()

    o = einsum("ii,ii", a_copy, a_copy * 1)
    assert len(o.creator.cache) == 2
    o.sum().backward()
    assert_allclose(a.grad, a_copy.grad)

    a = Tensor(np.arange(4).reshape(2, 2))
    a_copy = copy(a)

    b = Tensor(-1 * np.arange(2).reshape(2, 1))
    b_copy = copy(b)

    # check broadcasting and multiply-redundant input tensors
    # with distinct einsum labels
    o = einsum("ii,ii,i...,i...,...i,...i", a, a, b, b, a, a)
    assert len(o.creator.cache) == 3
    o.sum().backward()

    o = einsum(
        "ii,ii,i...,i...,...i,...i",
        a_copy,
        a_copy * 1,
        b_copy,
        b_copy * 1,
        a_copy,
        1 * a_copy,
    )
    assert len(o.creator.cache) == 6
    o.sum().backward()
    assert_allclose(a.grad, a_copy.grad)
    assert_allclose(b.grad, b_copy.grad)
예제 #8
0
 def wrapped_func(exp):
     out = mg.arange(2)**exp
     assert isinstance(out.creator, op)
예제 #9
0
def test_repr():
    assert repr(Tensor(1)) == 'Tensor(1)'
    assert repr(Tensor([1])) == 'Tensor([1])'
    assert repr(Tensor([1, 2])) == 'Tensor([1, 2])'
    tmp_rep = 'Tensor([[0, 1, 2],\n        [3, 4, 5],\n        [6, 7, 8]])'
    assert repr(mg.arange(9).reshape((3, 3))) == tmp_rep