Exemple #1
0
def test__convert_arg_init_AbstractScalar(backend_opt):
    b = backend_opt

    model1 = 3.0
    m1 = to_device(model1, b)
    assert isinstance(m1, float)
    assert m1 == 3.0

    model2 = 14
    m2 = to_device(model2, b)
    assert isinstance(m2, int)
    assert m2 == 14
Exemple #2
0
def test__convert_arg_init_AbstractList(backend_opt):
    b = backend_opt

    model = [91.0, 51.0]
    m = to_device(model, b)

    assert isinstance(m, list)
    assert isinstance(m[0], float)
    assert isinstance(m[1], float)
    assert m[0] == 91.0
    assert m[1] == 51.0

    model2 = []
    m2 = to_device(model2, b)
    assert m2 == []
Exemple #3
0
def test_module_2_layer_mlp_update__to_device(_backend_fixture):
    backend = _backend_fixture
    backend_options = get_backend_options(args, backend)

    torch.manual_seed(123)

    inp = torch.Tensor(MA(2, 4, dtype=args.dtype))
    model = MLP_2_Layers(4, 2, 3)
    target = torch.Tensor([2.5])

    inp = to_device(inp, backend, backend_options)
    model = to_device(model, backend, backend_options)
    target = to_device(target, backend, backend_options)

    def mse(value, target):
        diff = value - target
        return sum(diff * diff)

    def cost(model, inp, target):
        value = model(inp)
        loss = mse(value, target)
        return loss

    @myia(backend=backend, backend_options=backend_options)
    def step(model, inp, target):
        _cost, dmodel = value_and_grad(cost, "model")(model, inp, target)
        return _cost, model - dmodel

    loss, model = step(model, inp, target)

    assert loss == 42.759910583496094

    expected_model = [
        torch.Tensor([
            [1.31208074, 7.52942896, -3.48841572, -2.12911177],
            [4.61794090, -5.96872425, 3.26280975, -16.41462517],
        ]),
        torch.Tensor([-2.16651487, -1.72582722]),
        torch.Tensor([
            [0.39250314, -4.12741709],
            [3.85490060, -1.67493737],
            [1.51745880, -5.04526806],
        ]),
        torch.Tensor([7.15553093, 6.48739338, 9.37104797]),
    ]

    for p, ep in zip(model.parameters(), expected_model):
        assert torch.allclose(p, ep)
Exemple #4
0
def run_helper(epochs, n, batch_size, layer_sizes):
    """Run a model with the specified layer sizes on n random batches.

    Arguments:
        epochs: How many epochs to run.
        n: Number of training batches to generate.
        batch_size: Number of samples per batch.
        layer_sizes: Sizes of the model's layers.
    """
    layers = []
    for W, b in mlp_parameters(*layer_sizes):
        layers.append(Linear(W, b))
        layers.append(Tanh())

    model = Sequential(tuple(layers))
    model = to_device(model, backend, backend_options)

    data = generate_data(n, batch_size, layer_sizes[0], layer_sizes[-1])

    for _ in range(epochs):
        costs = []
        t0 = time.time()
        for inp, target in data:
            cost, model = step(model, inp, target, lr)
            costs.append(cost)
        costs = [float(c.from_device()) for c in costs]
        c = sum(costs) / n
        t = time.time() - t0
        print(f'Cost: {c:15.10f}\tTime: {t:15.10f}')
Exemple #5
0
def test__convert_arg_init_AbstractTuple(backend_opt):
    b = backend_opt

    model = (9, 5.0)
    m = to_device(model, b)

    assert isinstance(m, tuple)
    assert isinstance(m[0], int)
    assert isinstance(m[1], float)
    assert m[0] == 9
    assert m[1] == 5.0
Exemple #6
0
def test__convert_arg_init_AbstractClass(backend_opt):
    b = backend_opt

    @dataclass(frozen=True)
    class A():

        s: 'scalar number'

        def apply(self, input):
            """Apply the layer."""
            return (input, self.s)

    model = A(2.0)
    m = to_device(model, b)

    assert isinstance(m, A)
    assert isinstance(m.s, float)
    assert m.s == 2.0
Exemple #7
0
def generate_data(n,
                  batch_size,
                  input_size,
                  target_size,
                  sequence_size,
                  backend,
                  backend_options,
                  *,
                  seed=91):
    """Generate inputs and targets.

    Generates n batches of samples of size input_size, matched with
    a single target.
    """
    R = RandomState(seed=seed)
    """
    return ([to_device(param(R, batch_size, input_size), backend, backend_options) for i in range(sequence_size)],
             to_device(param(R, batch_size, target_size), backend, backend_options))
             #"""

    return to_device(([param(R, batch_size, input_size) for i in range(sequence_size)], param(R, batch_size, target_size)), \
                        backend, backend_options)
Exemple #8
0
def test_to_canonical():
    def _convert(data, typ):
        return to_canonical(data, to_abstract_test(typ))

    # Leaves

    assert _convert(True, Bool) is True
    assert _convert(False, Bool) is False
    assert _convert(10, i64) == 10
    assert _convert(1.5, f64) == 1.5
    assert _convert([], []) == ()
    with pytest.raises(TypeError):
        _convert([], [f64])
    with pytest.raises(TypeError):
        _convert([1, 2], [])

    # Class -> Tuple conversion

    pt = Point(1, 2)
    pt3 = Point3D(1, 2, 3)
    assert list(_convert(pt, Point(i64, i64))) == [1, 2]
    with pytest.raises(TypeError):
        _convert((1, 2), Point(i64, i64))

    assert list(_convert(
        (pt, pt), (Point(i64, i64), Point(i64, i64)))) == [(1, 2), (1, 2)]

    li = _convert([1], [i64])
    assert (isinstance(li, tuple) and li[0] == 1
            and isinstance(li[1], TaggedValue) and li[1].value == ())

    # Arrays

    fmat = np.ones((5, 8))
    imat = np.ones((5, 8), dtype='int32')

    assert _convert(fmat, af64_of(5, 8)) is fmat
    assert _convert(imat, ai32_of(5, 8)) is imat
    with pytest.raises(TypeError):
        _convert(imat, ai64_of(5, 8))
    with pytest.raises(TypeError):
        _convert(imat, ai32_of(4, 8))

    # Misc errors

    with pytest.raises(TypeError):
        _convert(10, f64)
    with pytest.raises(TypeError):
        _convert(1.5, i64)
    with pytest.raises(TypeError):
        _convert(10, (i64, i64))
    with pytest.raises(TypeError):
        _convert((1, ), (i64, i64))
    with pytest.raises(TypeError):
        _convert((1, 2, 3), (i64, i64))
    with pytest.raises(TypeError):
        _convert((1, 2, 3), [i64])
    with pytest.raises(TypeError):
        _convert(pt3, Point(i64, i64))
    with pytest.raises(TypeError):
        _convert(10, ai64_of())
    with pytest.raises(TypeError):
        _convert(10, ai64_of())
    with pytest.raises(TypeError):
        _convert(1, Bool)
    with pytest.raises(TypeError):
        _convert(1, D(x=i64))
    with pytest.raises(TypeError):
        _convert({'x': 2.0}, D(x=i64))
    with pytest.raises(TypeError):
        _convert({'x': 2.0, 'y': 1}, D(x=i64))
    with pytest.raises(TypeError):
        _convert({'y': 2.0}, D(x=i64))
    with pytest.raises(TypeError):
        _convert('x', 1.0)
    with pytest.raises(TypeError):
        _convert(1.0, to_abstract_test('x'))
    with pytest.raises(TypeError):
        _convert(1.0, to_abstract_test(HandleInstance(1.0)))

    v = to_device(22, None)
    with pytest.raises(TypeError):
        _convert(v, f64)
Exemple #9
0
def run_helper(args, n, batch_size, layer_sizes):
    """Run a model with the specified layer sizes on n random batches.

    The first layer is an LSTM layer, the rest are linear+tanh.

    Arguments:
        iters: How many iters to run.
        n: Number of training batches to generate.
        batch_size: Number of samples per batch.
        layer_sizes: Sizes of the model's layers.
    """
    i = 0
    j = 0
    layers = []
    lstmp, *linp = lstm_parameters(*layer_sizes, batch_size=batch_size)
    layers.append(LSTMLayer(*lstmp))
    for W, b in linp:
        layers.append(Linear(W, b))
        layers.append(Tanh())
    model = Sequential(tuple(layers))

    if args.break_bm:
        print("break_bm")
        return

    model = to_device(model, backend, backend_options)

    if args.break_mod_on_d:
        print("break_mod_on_d")
        return

    lr = getattr(numpy, dtype)(args.lr)
    lr = to_device(lr, backend, backend_options)

    inp, target = generate_data(n, batch_size, layer_sizes[0], layer_sizes[-1],
                                args.timesteps, backend, backend_options)

    if args.break_mod_on_d_and_gen_data:
        print("break_mod_on_d_and_gen_data")
        return

    times = []
    if args.cuda_sync:
        import torch
    for _ in range(args.iters):
        costs = []
        if args.cuda_sync:
            torch.cuda.synchronize()
        t0 = time.time()
        #cost, model = step(model, inp, target, lr)
        cost, model = step(model, inp, target)
        if args.break_cr1:
            print("break_cr1", time.time() - t0)
            break

        if args.break_cr2 and i == 1:
            print("break_cr2", time.time() - t0)
            break

        if args.print_all_iters:
            print(i, cost.array)

        if args.cuda_sync:
            torch.cuda.synchronize()
        t_diff = time.time() - t0

        if args.break_cr1 or args.break_cr2:
            break

        i += 1
        if args.print_all_iters:
            print(i, t_diff, cost)

        if i > args.warmup:  # first 99 steps are warmup
            times.append(t_diff)

    if not args.break_cr1 and not args.break_cr2:
        print("times", times)
        print("\nMyia LSTM Stats")
        print("Avg Time: ", sum(times) / len(times))
        print("Min Time: ", min(times))
        print("Max Time: ", max(times))

    if args.save_txt:
        filename = ""
        for arg in vars(args):
            filename += str(arg)
            filename += "="
            filename += str(getattr(args, arg))
            filename += "."

        str_out = "Myia LSTM Stats\n" + \
                  "Avg Time: " + str(sum(times)/len(times)) + \
                  "Min Time: " + str(min(times)) + \
                  "Max Time: " + str(max(times)) + \
                  "Times" + str(times)

        f = open(filename + ".txt", "w+")
        f.write(str_out)
        f.close()
Exemple #10
0
 def convert_args(self, args):
     return tuple(to_device(arg, self.backend) for arg in args)
Exemple #11
0
def test__convert_arg_init_AbstractArray(backend_opt):
    b = backend_opt
    m = to_device(MA(2, 3), b)

    assert isinstance(m, ArrayWrapper)
    np.testing.assert_allclose(b.to_numpy(m.array), MA(2, 3))
Exemple #12
0
def run_model(args):
    i = 0
    j = 0
    layers = []
    for W, b in mlp_parameters():
        layers.append(Linear(W, b))
        layers.append(Tanh())
    model = Sequential(tuple(layers))
    model = to_device(model, backend, backend_options)

    inp, target = mlp_data()
    inp = to_device(inp, backend, backend_options)
    target = to_device(target, backend, backend_options)

    lr = getattr(numpy, dtype)(args.lr)
    lr = to_device(lr, backend, backend_options)

    if args.break_mod_on_d_and_gen_data:
        print("break_mod_on_d_and_gen_data")
        return

    times = []
    if args.cuda_sync:
        import torch
    for _ in range(args.iters):
        if args.cuda_sync:
            torch.cuda.synchronize()
        t0 = time.time()
        cost, model = step(model, inp, target)
        #cost, model = step(model, inp, target, lr)

        if args.cuda_sync:
            torch.cuda.synchronize()
        t_diff = time.time() - t0

        if args.break_cr1:
            print("break_cr1", time.time() - t0)
            break

        i += 1
        if args.break_cr2 and i == 2:
            print("break_cr2", time.time() - t0)
            break

        #"""
        if args.print_all_iters:
            print(i, t_diff, cost.array)
            #"""

        if i > args.warmup:  # first 99 steps are warmup
            times.append(t_diff)

    print("times", times)
    print("\nMyia MLP Stats")
    print("Avg Time: ", sum(times) / len(times))
    print("Min Time: ", min(times))
    print("Max Time: ", max(times))

    if args.save_txt:
        filename = ""
        for arg in vars(args):
            filename += str(arg)
            filename += "="
            filename += str(getattr(args, arg))
            filename += "."

        str_out = "Myia MLP Stats\n" + \
                  "Avg Time: " + str(sum(times)/len(times)) + \
                  "Min Time: " + str(min(times)) + \
                  "Max Time: " + str(max(times)) + \
                  "Times" + str(times)

        f = open(filename + ".txt", "w+")
        f.write(str_out)
        f.close()
Exemple #13
0
        return x


layer_sizes = (4, 24, 2)

mlp = mlp_parameters(*layer_sizes)
model = Sequential(
    (
        Linear(mlp[0][0], mlp[0][1]),
        Tanh(),
        Linear(mlp[1][0], mlp[1][1]),
        Softmax(1),
    )
)

model = to_device(model, backend, backend_options, broaden=False)


use_cuda = torch.cuda.is_available()
FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor


@myia(
    backend=backend,
    backend_options=backend_options,
    specialize_values=["model"],
)
def step_eval(model, data):
    output = model.apply(data)
    return output