Exemplo n.º 1
0
def make_model(dtype="float64"):
    return Model(
        layers=(
            TanhLayer(MA(6, 9, dtype=dtype), zeros(1, 9, dtype=dtype)),
            TanhLayer(MB(9, 10, dtype=dtype), zeros(1, 10, dtype=dtype)),
            TanhLayer(MC(10, 8, dtype=dtype), zeros(1, 8, dtype=dtype)),
        )
    )
Exemplo n.º 2
0
def test_module_linear_seq_bwd(_backend_fixture):
    backend = _backend_fixture
    backend_options = get_backend_options(args, backend)

    torch.manual_seed(123)

    inp = torch.Tensor(MA(2, 4, dtype=args.dtype))
    model = Linear_Seq(4, 2, 3)
    target = torch.Tensor([2.5])

    # Leave this here, it will be needed again for when we investigate how
    # to automatically remove duplicates from grads of sequential
    """
    from myia.abstract.aliasing import find_aliases
    al = find_aliases(model, aliasable=tensor_pytorch_aliasable)
    print("alias", al)
    # """
    def mse(value, target):
        diff = value - target
        return (diff * diff).sum()

    def cost(model, inp, target):
        value = model(inp)
        loss = mse(value, target)
        return loss

    pt_cost = cost(model, inp, target)

    @myia(
        backend=backend,
        backend_options=backend_options,
        alias_tracker=tensor_pytorch_aliasable,
    )
    def step(model, inp, target):
        _cost, dmodel = value_and_grad(cost, "model")(model, inp, target)
        return _cost, dmodel

    loss, grad = step(model, inp, target)

    pt_cost = cost(model, inp, target)
    if model.f1.weight.grad is not None:
        model.f1.weight.grad.data.zero_()
    if model.f1.bias.grad is not None:
        model.f1.bias.grad.data.zero_()
    pt_cost.backward()

    for n, p in model.named_parameters():
        m_p = grad
        for a in tuple(n.split(".")):
            m_p = getattr(m_p, a)
        assert torch.allclose(p.grad.data, m_p)
Exemplo n.º 3
0
def test_module_2_layer_mlp_update__to_device(_backend_fixture):
    backend = _backend_fixture
    backend_options = get_backend_options(args, backend)

    torch.manual_seed(123)

    inp = torch.Tensor(MA(2, 4, dtype=args.dtype))
    model = MLP_2_Layers(4, 2, 3)
    target = torch.Tensor([2.5])

    inp = to_device(inp, backend, backend_options)
    model = to_device(model, backend, backend_options)
    target = to_device(target, backend, backend_options)

    def mse(value, target):
        diff = value - target
        return sum(diff * diff)

    def cost(model, inp, target):
        value = model(inp)
        loss = mse(value, target)
        return loss

    @myia(backend=backend, backend_options=backend_options)
    def step(model, inp, target):
        _cost, dmodel = value_and_grad(cost, "model")(model, inp, target)
        return _cost, model - dmodel

    loss, model = step(model, inp, target)

    assert loss == 42.759910583496094

    expected_model = [
        torch.Tensor([
            [1.31208074, 7.52942896, -3.48841572, -2.12911177],
            [4.61794090, -5.96872425, 3.26280975, -16.41462517],
        ]),
        torch.Tensor([-2.16651487, -1.72582722]),
        torch.Tensor([
            [0.39250314, -4.12741709],
            [3.85490060, -1.67493737],
            [1.51745880, -5.04526806],
        ]),
        torch.Tensor([7.15553093, 6.48739338, 9.37104797]),
    ]

    for p, ep in zip(model.parameters(), expected_model):
        assert torch.allclose(p, ep)
Exemplo n.º 4
0
def test_module_2_layer_mlp_bwd(_backend_fixture):
    backend = _backend_fixture
    backend_options = get_backend_options(args, backend)

    torch.manual_seed(123)

    inp = torch.Tensor(MA(2, 4, dtype=args.dtype))
    model = MLP_2_Layers(4, 2, 3)
    target = torch.Tensor([2.5])

    def mse(value, target):
        diff = value - target
        return sum(diff * diff)

    def cost(model, inp, target):
        value = model(inp)
        loss = mse(value, target)
        return loss

    @myia(backend=backend, backend_options=backend_options)
    def step(model, inp, target):
        _cost, dmodel = value_and_grad(cost, "model")(model, inp, target)
        return _cost, dmodel

    loss, grad = step(model, inp, target)

    assert loss == 42.759910583496094

    expected_grads = [
        torch.Tensor([
            [-1.51596880, -7.51286650, 3.24008656, 2.31766868],
            [-5.04396868, 6.33524609, -3.62623000, 16.01710510],
        ]),
        torch.Tensor([1.85057139, 1.95227396]),
        torch.Tensor([
            [-0.65377355, 4.39202595],
            [-4.45504284, 1.24591899],
            [-1.77709150, 4.90630770],
        ]),
        torch.Tensor([-7.69495678, -6.02438641, -9.53780556]),
    ]

    for g, eg in zip(grad.parameters(), expected_grads):
        assert torch.allclose(g, eg)
Exemplo n.º 5
0
def test_module_2_layer_mlp_seq_fwd(_backend_fixture):
    backend = _backend_fixture
    backend_options = get_backend_options(args, backend)

    torch.manual_seed(123)

    inp = torch.Tensor(MA(2, 4, dtype=args.dtype))
    model = MLP_2_Layers_Seq(4, 2, 3)

    @myia(backend=backend, backend_options=backend_options)
    def step(model, inp):
        return model(inp)

    output = step(model, inp)

    output_expected = torch.Tensor([
        [-0.55702960, 0.85518718, 0.13796528],
        [-0.67215765, -0.09247651, -0.38900381],
    ])

    assert torch.allclose(output, output_expected)
Exemplo n.º 6
0
def test_module_matmul_bwd(_backend_fixture):
    backend = _backend_fixture
    backend_options = get_backend_options(args, backend)

    torch.manual_seed(123)

    inp = torch.Tensor(MA(2, 4, dtype=args.dtype))
    model = Tiny(4, 3)
    target = torch.Tensor([2.5])

    def mse(value, target):
        diff = value - target
        return sum(diff * diff)

    def cost(model, inp, target):
        value = model(inp)
        loss = mse(value, target)
        return loss

    @myia(backend=backend, backend_options=backend_options)
    def step(model, inp, target):
        _cost, dmodel = value_and_grad(cost, "model")(model, inp, target)
        return _cost, dmodel

    loss, grad = step(model, inp, target)

    loss_expected = 161.0585479736328

    assert np.isclose(loss, loss_expected)

    grad_expected = torch.Tensor([
        [-15.79940414, 34.22111893, -16.79670525],
        [82.41101074, -39.50494003, 100.31848145],
        [-40.12714767, 23.00367165, -48.50212097],
        [66.75647736, -107.74736023, 74.33836365],
    ])

    assert torch.allclose(grad.W, grad_expected)
Exemplo n.º 7
0
def test_module_matmul_update(_backend_fixture):
    backend = _backend_fixture
    backend_options = get_backend_options(args, backend)

    torch.manual_seed(123)

    inp = torch.Tensor(MA(2, 4, dtype=args.dtype))
    model = Tiny(4, 3)
    target = torch.Tensor([2.5])

    def mse(value, target):
        diff = value - target
        return sum(diff * diff)

    def cost(model, inp, target):
        value = model(inp)
        loss = mse(value, target)
        return loss

    @myia(backend=backend, backend_options=backend_options)
    def step(model, inp, target):
        _cost, dmodel = value_and_grad(cost, "model")(model, inp, target)
        return _cost, model - dmodel

    loss, model = step(model, inp, target)

    loss_expected = 161.0585479736328

    assert np.isclose(loss, loss_expected)

    model_expected = torch.Tensor([
        [15.42187691, -34.19045258, 16.33688736],
        [-82.06187439, 38.71609116, -99.63981628],
        [39.45422363, -23.73973656, 47.91710663],
        [-66.33718109, 107.40527344, -73.99190521],
    ])

    assert torch.allclose(model.W, model_expected)
Exemplo n.º 8
0
    def format():
        return {}

    def f():
        raise ValueError("test")

    register_backend(name, f, format)

    with pytest.raises(LoadingError):
        load_backend(name)

    del _backends[name]


@run(MA(2, 3))
def test_reshape2(x):
    return reshape(x, (6, ))


@mt(run(MA(2, 3)), run(MA(1, 3)))
def test_array_reduce(x):
    return array_reduce(scalar_add, x, (1, 3))


@run(MA(2, 3))
def test_array_reduce2(x):
    return array_reduce(scalar_add, x, (3, ))


@run_gpu(MA(1, 1))
Exemplo n.º 9
0
    },
                 result=InferenceError),
    # Generic broadcasting tests
    infer_scalar([f64], f64, result=[f64]),
    infer_scalar([[f64]], [[f64]], result=[[f64]]),
    infer_scalar((i64, i64), i64, result=(i64, i64)),
    infer_scalar(i64, (i64, i64), result=(i64, i64)),
    infer_scalar(Point(i64, i64), i64, result=Point(i64, i64)),
    # Various errors
    infer_scalar((i64, i64), (i64, i64, i64), result=InferenceError),
    infer_scalar(Point(i64, i64),
                 Point3D(i64, i64, i64),
                 result=InferenceError),
    infer_scalar((i64, i64), [i64], result=InferenceError),
    # Full tests
    run(MA(2, 3), MB(2, 3)),
    run(Point(1, 2), Point(3, 4)),
    run((MA(2, 3), 7.5, MB(1, 3)), 3.5),
)
def test_hyper_map(x, y):
    return hyper_map(scalar_add, x, y)


@mt(
    infer_scalar((i64, f64), (i64, f64), result=InferenceError),
    infer_scalar([f64], f64, result=[f64]),
)
def test_hyper_map_notuple(x, y):
    return hyper_map_notuple(scalar_add, x, y)

Exemplo n.º 10
0
def test_optim_setitem(_backend_fixture):
    from myia.abstract import macro
    from myia.operations import primitives as P
    from myia.ir import sexp_to_node
    from myia.lib import setter_from_getter

    def update_sgd(p, g):
        return p - 0.01 * g

    @macro
    async def update(info, model_ref, dmodel_ref, update_rule_ref):
        new_model = model_ref.node
        dmodel = dmodel_ref.node
        update_rule = update_rule_ref.node

        p = new_model
        g = dmodel

        p = (P.record_getitem, p, "W")
        g = (P.record_getitem, g, "W")

        p_node = sexp_to_node(p, info.graph)
        g_node = sexp_to_node(g, info.graph)

        pn = info.graph.apply(update_rule, p_node, g_node)

        new_model = sexp_to_node(setter_from_getter(p, pn), info.graph)

        return new_model

    backend = _backend_fixture
    backend_options = get_backend_options(args, backend)

    torch.manual_seed(123)

    inp = torch.Tensor(MA(2, 4, dtype=args.dtype))
    model = Tiny(4, 3)
    target = torch.Tensor([2.5])

    def mse(value, target):
        diff = value - target
        return sum(diff * diff)

    def cost(model, inp, target):
        value = model(inp)
        loss = mse(value, target)
        return loss

    @myia(backend=backend, backend_options=backend_options)
    def step(model, inp, target):
        _cost, dmodel = value_and_grad(cost, "model")(model, inp, target)
        return _cost, update(model, dmodel, update_sgd)

    loss, model_new = step(model, inp, target)

    expected_loss = torch.Tensor([161.05856323242188])
    assert torch.allclose(loss, expected_loss)

    expected_param = torch.Tensor([
        [-0.21953332, -0.31154382, -0.29184943],
        [-0.47497076, -0.39380032, -0.32451797],
        [-0.27165186, -0.96610248, -0.09999254],
        [-0.24826682, 0.73539025, -0.39692938],
    ])

    assert torch.allclose(model_new.W, expected_param)
Exemplo n.º 11
0
 def __init__(self, in_f, out_f):
     super(Tiny, self).__init__()
     self.W = nn.Parameter(torch.Tensor(MA(in_f, out_f, dtype=args.dtype)))
     self.W = nn.init.xavier_uniform_(self.W)
Exemplo n.º 12
0

@mt(
    run(-2.7, result=-2),
    run(7.8, result=7),
    run(np.float16(7.8), result=7),
    run(np.float32(7.8), result=7),
    run(np.float64(7.8), result=7),
)
def test_trunc(a):
    return math.trunc(a)


@mt(
    # sin seems not supported for pytorch/CPU/float16
    run(MA(3, 3, dtype="float32"), result=np.sin(MA(3, 3, dtype="float32"))),
    run(MA(3, 3, dtype="float64"), result=np.sin(MA(3, 3, dtype="float64"))),
)
def test_elemwise_sin(a):
    return np.sin(a)


@mt(
    # cos seems not supported for pytorch/CPU/float16
    run(MA(3, 3, dtype="float32"), result=np.cos(MA(3, 3, dtype="float32"))),
    run(MA(3, 3, dtype="float64"), result=np.cos(MA(3, 3, dtype="float64"))),
)
def test_elemwise_cos(a):
    return np.cos(a)

Exemplo n.º 13
0
@mark.xfail(reason="A DummyInferrer ends up being called")
@gradient(4.5, 6.7, backend=backend_all)
def test_closures_in_tuples(x, y):
    def f():
        return x * y

    def g():
        return x + y

    tup = f, g
    ff, gg = tup
    return ff() + gg()


@gradient(MA(2, 3), MB(2, 3), backend=backend_all)
def test_array_operations(xs, ys):
    div = array_map(scalar_div, xs, ys)
    sm = array_reduce(scalar_add, div, ())
    return array_to_scalar(sm)


@gradient(3.1, 7.6, backend=backend_all)
def test_array_operations_distribute(x, y):
    xs = distribute(scalar_to_array(x, AA), (4, 3))
    ys = distribute(scalar_to_array(y, AA), (4, 3))
    div = array_map(scalar_div, xs, ys)
    sm = array_reduce(scalar_add, div, ())
    return array_to_scalar(sm)

Exemplo n.º 14
0
@mark.xfail(reason="A DummyInferrer ends up being called")
@gradient(4.5, 6.7, backend=backend_all)
def test_closures_in_tuples(x, y):
    def f():
        return x * y

    def g():
        return x + y

    tup = f, g
    ff, gg = tup
    return ff() + gg()


@gradient(MA(2, 3), MB(2, 3), backend=backend_all)
def test_array_operations(xs, ys):
    div = array_map(scalar_div, xs, ys)
    sm = array_reduce(scalar_add, div, ())
    return array_to_scalar(sm)


@gradient(3.1, 7.6, backend=backend_all)
def test_array_operations_distribute(x, y):
    xs = distribute(scalar_to_array(x, AA), (4, 3))
    ys = distribute(scalar_to_array(y, AA), (4, 3))
    div = array_map(scalar_div, xs, ys)
    sm = array_reduce(scalar_add, div, ())
    return array_to_scalar(sm)

Exemplo n.º 15
0

args = Args()


class Tiny(nn.Module):
    def __init__(self, in_f, out_f):
        super(Tiny, self).__init__()
        self.W = nn.Parameter(torch.Tensor(MA(in_f, out_f, dtype=args.dtype)))
        self.W = nn.init.xavier_uniform_(self.W)

    def forward(self, input):
        return input @ self.W


@run(Tiny(4, 3), torch.tensor(MA(2, 4, dtype="float32")))
def test_module_matmul_fwd(model, inp):
    return model(inp)


def test_module_matmul_bwd(_backend_fixture):
    backend = _backend_fixture
    backend_options = get_backend_options(args, backend)

    torch.manual_seed(123)

    inp = torch.Tensor(MA(2, 4, dtype=args.dtype))
    model = Tiny(4, 3)
    target = torch.Tensor([2.5])

    def mse(value, target):
Exemplo n.º 16
0
        return tagged(z)


@mt(run("hey", 2), run("idk", 5))
def test_string_eq(s, x):
    if s == "idk":
        x = x + 1
    return x


@mt(run("hey", 2), run("idk", 5))
def test_string_ne(s, x):
    if s != "idk":
        x = x + 1
    return x


@run("hey")
def test_string_return(s):
    return s


@run(MA(4, 5))
def test_array_getitem(x):
    return array_getitem(x, (0, 1), (3, 5), (2, 3))


@run(MA(4, 5), MB(2, 2))
def test_array_setitem(x, v):
    return array_setitem(x, (0, 1), (3, 5), (2, 3), v)
Exemplo n.º 17
0
from myia.lib import InferenceError
from myia.operations import dtype, scalar_cast
from myia.testing.common import MA, Ty, af32_of, f32, i64, to_abstract_test
from myia.testing.multitest import infer, mt, run


@mt(
    infer(i64, result=InferenceError),
    infer(af32_of(4, 5), result=Ty(to_abstract_test(f32))),
)
def test_dtype(arr):
    return dtype(arr)


@mt(infer(af32_of(4, 5), i64, result=f32), run(MA(2, 3), 7, result=7.0))
def test_cast_to_dtype(arr, x):
    return scalar_cast(x, dtype(arr))
Exemplo n.º 18
0
import pytest

from myia.lib import InferenceError
from myia.operations import einsum
from myia.testing.common import MA, MB
from myia.testing.multitest import infer, mt, run_debug


@run_debug(MA(1, 4)[0])
def test_einsum_view1d(a):
    return einsum("i", a)


@run_debug(MA(1, 4)[0])
def test_einsum_sum1d(a):
    return einsum("i->", a)


@run_debug(MA(4, 1)[0], MB(4, 1)[0])
def test_einsum_elemwise1d(a, b):
    return einsum("i,i->i", a, b)


@run_debug(MA(3, 3)[0], MA(3, 3)[1])
def test_einsum_inner(a, b):
    return einsum("j,j", a, b)


@run_debug(MA(3, 3)[0], MA(3, 3)[1])
def test_einsum_outer(a, b):
    return einsum("i,k->ik", a, b)
Exemplo n.º 19
0
fwd_and_bwd_no_numpy_compat = _fwd_and_bwd.configure(backend=backend_all,
                                                     numpy_compat=False)
run = basic_run.configure(numpy_compat=True)
run_no_numpy_compat = basic_run.configure(numpy_compat=False)

# THIS TEST ALL OPS that are in dir of "torch" or "torch.tensor"
# all_torch_ops = dir(torch)
# all_torch_tensor_ops = dir(torch.Tensor([5.49670]))

torch.manual_seed(123)

single_tensor_arg_tests = (
    fwd_and_bwd(nn.Parameter(torch.Tensor([2.1]).reshape(()))),
    fwd_and_bwd(nn.Parameter(torch.Tensor([2.1]))),
    fwd_and_bwd(nn.Parameter(torch.Tensor([-2.2]))),
    fwd_and_bwd(nn.Parameter(torch.Tensor(MA(2, 3)))),
)


@mt(*single_tensor_arg_tests)
def test_torch_abs(x):
    return torch.abs(x)


@mt(*single_tensor_arg_tests)
def test_torch_exp(x):
    return torch.exp(x)


@mt(*single_tensor_arg_tests)
def test_torch_log(x):