Пример #1
0
def make_model(dtype="float64"):
    return Model(
        layers=(
            TanhLayer(MA(6, 9, dtype=dtype), zeros(1, 9, dtype=dtype)),
            TanhLayer(MB(9, 10, dtype=dtype), zeros(1, 10, dtype=dtype)),
            TanhLayer(MC(10, 8, dtype=dtype), zeros(1, 8, dtype=dtype)),
        )
    )
Пример #2
0
from myia.operations import einsum
from myia.testing.common import MA, MB
from myia.testing.multitest import infer, mt, run_debug


@run_debug(MA(1, 4)[0])
def test_einsum_view1d(a):
    return einsum("i", a)


@run_debug(MA(1, 4)[0])
def test_einsum_sum1d(a):
    return einsum("i->", a)


@run_debug(MA(4, 1)[0], MB(4, 1)[0])
def test_einsum_elemwise1d(a, b):
    return einsum("i,i->i", a, b)


@run_debug(MA(3, 3)[0], MA(3, 3)[1])
def test_einsum_inner(a, b):
    return einsum("j,j", a, b)


@run_debug(MA(3, 3)[0], MA(3, 3)[1])
def test_einsum_outer(a, b):
    return einsum("i,k->ik", a, b)


@run_debug(MA(2, 4))
Пример #3
0
@mark.xfail(reason="A DummyInferrer ends up being called")
@gradient(4.5, 6.7, backend=backend_all)
def test_closures_in_tuples(x, y):
    def f():
        return x * y

    def g():
        return x + y

    tup = f, g
    ff, gg = tup
    return ff() + gg()


@gradient(MA(2, 3), MB(2, 3), backend=backend_all)
def test_array_operations(xs, ys):
    div = array_map(scalar_div, xs, ys)
    sm = array_reduce(scalar_add, div, ())
    return array_to_scalar(sm)


@gradient(3.1, 7.6, backend=backend_all)
def test_array_operations_distribute(x, y):
    xs = distribute(scalar_to_array(x, AA), (4, 3))
    ys = distribute(scalar_to_array(y, AA), (4, 3))
    div = array_map(scalar_div, xs, ys)
    sm = array_reduce(scalar_add, div, ())
    return array_to_scalar(sm)

Пример #4
0
    },
                 result=InferenceError),
    # Generic broadcasting tests
    infer_scalar([f64], f64, result=[f64]),
    infer_scalar([[f64]], [[f64]], result=[[f64]]),
    infer_scalar((i64, i64), i64, result=(i64, i64)),
    infer_scalar(i64, (i64, i64), result=(i64, i64)),
    infer_scalar(Point(i64, i64), i64, result=Point(i64, i64)),
    # Various errors
    infer_scalar((i64, i64), (i64, i64, i64), result=InferenceError),
    infer_scalar(Point(i64, i64),
                 Point3D(i64, i64, i64),
                 result=InferenceError),
    infer_scalar((i64, i64), [i64], result=InferenceError),
    # Full tests
    run(MA(2, 3), MB(2, 3)),
    run(Point(1, 2), Point(3, 4)),
    run((MA(2, 3), 7.5, MB(1, 3)), 3.5),
)
def test_hyper_map(x, y):
    return hyper_map(scalar_add, x, y)


@mt(
    infer_scalar((i64, f64), (i64, f64), result=InferenceError),
    infer_scalar([f64], f64, result=[f64]),
)
def test_hyper_map_notuple(x, y):
    return hyper_map_notuple(scalar_add, x, y)

Пример #5
0
        return tagged(z)


@mt(run("hey", 2), run("idk", 5))
def test_string_eq(s, x):
    if s == "idk":
        x = x + 1
    return x


@mt(run("hey", 2), run("idk", 5))
def test_string_ne(s, x):
    if s != "idk":
        x = x + 1
    return x


@run("hey")
def test_string_return(s):
    return s


@run(MA(4, 5))
def test_array_getitem(x):
    return array_getitem(x, (0, 1), (3, 5), (2, 3))


@run(MA(4, 5), MB(2, 2))
def test_array_setitem(x, v):
    return array_setitem(x, (0, 1), (3, 5), (2, 3), v)
Пример #6
0
@mark.xfail(reason="A DummyInferrer ends up being called")
@gradient(4.5, 6.7, backend=backend_all)
def test_closures_in_tuples(x, y):
    def f():
        return x * y

    def g():
        return x + y

    tup = f, g
    ff, gg = tup
    return ff() + gg()


@gradient(MA(2, 3), MB(2, 3), backend=backend_all)
def test_array_operations(xs, ys):
    div = array_map(scalar_div, xs, ys)
    sm = array_reduce(scalar_add, div, ())
    return array_to_scalar(sm)


@gradient(3.1, 7.6, backend=backend_all)
def test_array_operations_distribute(x, y):
    xs = distribute(scalar_to_array(x, AA), (4, 3))
    ys = distribute(scalar_to_array(y, AA), (4, 3))
    div = array_map(scalar_div, xs, ys)
    sm = array_reduce(scalar_add, div, ())
    return array_to_scalar(sm)

Пример #7
0
            ]]])),
        False,
    ),
    broad_specs=(True, False, False, False, False, False, True),
)
def test_torch_max_pool2d(x, ri):
    return torch.nn.functional.max_pool2d(x, (2, 2), (1, 1), 0, 1, False, ri)


@fwd_and_bwd(nn.Parameter(torch.Tensor(MA(2, 3))))
def test_torch_mean(x):
    return torch.mean(x)


@fwd_and_bwd(nn.Parameter(torch.Tensor(MA(2, 3))),
             nn.Parameter(torch.Tensor(MB(2, 3))))
def test_torch_mse_loss(x, y):
    return torch.nn.functional.mse_loss(x, y)


@fwd_and_bwd(nn.Parameter(torch.Tensor(MA(2, 3))), torch.tensor([1, 2]))
def test_torch_nll_loss(x, y):
    return torch.nn.functional.nll_loss(x, y)


@mt(
    fwd_and_bwd(nn.Parameter(torch.Tensor(MA(2, 3))), torch.tensor([1, 2]),
                "none"),
    fwd_and_bwd(nn.Parameter(torch.Tensor(MA(2, 3))), torch.tensor([1, 2]),
                "sum"),
    fwd_and_bwd(nn.Parameter(torch.Tensor(MA(2, 3))), torch.tensor([1, 2]),