Ejemplo n.º 1
0
def test_matmul():
    """Tests that mytorch's  matmul multiplication matches torch's"""

    # shape of tensor to test
    shape1 = (3, 4)
    shape2 = (4, 5)
    shape = (3, 5)

    # get mytorch and torch tensor: 'a'
    a = Tensor.randn(*shape1)
    a.requires_grad = True
    a_torch = get_same_torch_tensor(a)

    # get mytorch and torch tensor: 'b'
    b = Tensor.randn(*shape2)
    b.requires_grad = True
    b_torch = get_same_torch_tensor(b)

    # run mytorch and torch forward: 'c = a * b'
    ctx = ContextManager()
    c = matmul.forward(ctx, a, b)
    c_torch = torch.matmul(a_torch, b_torch)
    # run mytorch and torch multiplication backward
    back = matmul.backward(ctx, Tensor.ones(*shape))
    c_torch.sum().backward()

    # check that c matches
    assert check_val_and_grad(c, c_torch)
    # check that dc/da and dc/db respectively match
    assert check_val(back[0], a_torch.grad)
    assert check_val(back[1], b_torch.grad)

    return True
Ejemplo n.º 2
0
def test6():
    a = Tensor.randn(2, 3)
    a.requires_grad = True
    a_torch = get_same_torch_tensor(a)

    b = Tensor.randn(2, 3)
    b.requires_grad = True
    b_torch = get_same_torch_tensor(b)

    c = a / b
    c_torch = a_torch / b_torch

    d = a - b
    d_torch = a_torch - b_torch

    e = c + d
    e_torch = c_torch + d_torch

    e.backward()
    e_torch.sum().backward()

    assert check_val_and_grad(a, a_torch)
    assert check_val_and_grad(b, b_torch)
    assert check_val_and_grad(c, c_torch)
    assert check_val_and_grad(d, d_torch)
    assert check_val_and_grad(e, e_torch)
Ejemplo n.º 3
0
def test_mul():
    """Tests that mytorch's elementwise multiplication matches torch's"""

    # shape of tensor to test
    shape = (1, 2, 3)

    # get mytorch and torch tensor: 'a'
    a = Tensor.randn(*shape)
    a.requires_grad = True
    a_torch = get_same_torch_tensor(a)

    # get mytorch and torch tensor: 'b'
    b = Tensor.randn(*shape)
    b.requires_grad = True
    b_torch = get_same_torch_tensor(b)

    # run mytorch and torch forward: 'c = a * b'
    ctx = ContextManager()
    c = Mul.forward(ctx, a, b)
    c_torch = a_torch * b_torch
    # run mytorch and torch multiplication backward
    back = Mul.backward(ctx, Tensor.ones(*shape))
    c_torch.sum().backward()

    # check that c matches
    assert check_val_and_grad(c, c_torch)
    # check that dc/da and dc/db respectively match
    assert check_val(back[0], a_torch.grad)
    assert check_val(back[1], b_torch.grad)

    # ensure * is overridden
    c_using_override = a * b
    assert check_val(c_using_override, c_torch)

    return True
Ejemplo n.º 4
0
def testbroadcast():
    """Tests addition WITH broadcasting matches torch's"""

    # shape of tensor to test

    # get mytorch and torch tensor: 'a'
    a = Tensor.randn(3, 4)
    a.requires_grad = True
    a_torch = get_same_torch_tensor(a)

    # get mytorch and torch tensor: 'b'
    b = Tensor.randn(4)
    b.requires_grad = True
    b_torch = get_same_torch_tensor(b)

    # run mytorch and torch forward: 'c = a + b'
    c = a + b
    c_torch = a_torch + b_torch

    # run mytorch and torch addition backward
    c.backward()
    c_torch.sum().backward()

    # check that c matches
    assert check_val_and_grad(c, c_torch)
    # check that dc/da and dc/db respectively match
    assert check_val_and_grad(a, a_torch)
    assert check_val_and_grad(b, b_torch)
Ejemplo n.º 5
0
def test_slice_backward():
    # shape of tensor to test
    shape = (2, 4, 3)

    # Test 1
    a = Tensor.randn(*shape)  #mytorch tensor
    a.requires_grad = True
    a_torch = get_same_torch_tensor(a)  #pytorch tensor
    b = a[1, 2, 0]
    b_torch = a_torch[1, 2, 0]
    (b**2).sum().backward()
    (b_torch**2).sum().backward()
    assert check_grad(a, a_torch, eps=eps)

    # Test 2
    a = Tensor.randn(*shape)  #mytorch tensor
    a.requires_grad = True
    a_torch = get_same_torch_tensor(a)  #pytorch tensor
    b = a[0, 2, :]
    b_torch = a_torch[0, 2, :]
    (b**2).sum().backward()
    (b_torch**2).sum().backward()
    assert check_grad(a, a_torch, eps=eps)

    # Test 3
    a = Tensor.randn(*shape)  #mytorch tensor
    a.requires_grad = True
    a_torch = get_same_torch_tensor(a)  #pytorch tensor
    b = a[:, 3, :]
    b_torch = a_torch[:, 3, :]
    (b**2).sum().backward()
    (b_torch**2).sum().backward()
    assert check_grad(a, a_torch, eps=eps)

    # Test 4
    a = Tensor.randn(*shape)  #mytorch tensor
    a.requires_grad = True
    a_torch = get_same_torch_tensor(a)  #pytorch tensor
    b = a[:, :, 1]
    b_torch = a_torch[:, :, 1]
    (b**2).sum().backward()
    (b_torch**2).sum().backward()
    assert check_grad(a, a_torch, eps=eps)

    # Test 5
    a = Tensor.randn(*shape)  #mytorch tensor
    a.requires_grad = True
    a_torch = get_same_torch_tensor(a)  #pytorch tensor
    b = a[:, 1:3, :]
    b_torch = a_torch[:, 1:3, :]
    (b**2).sum().backward()
    (b_torch**2).sum().backward()
    assert check_grad(a, a_torch, eps=eps)

    return True
Ejemplo n.º 6
0
def test_concat_backward():
    # shape of tensor to test
    tensor_shapes = [[(1, 4, 3), (1, 8, 3), (1, 5, 3)], [(2, 3, 4), (1, 3, 4)],
                     [(6, 7, 8, 9), (6, 7, 8, 1), (6, 7, 8, 2)],
                     [(1, 2, 3), (1, 2, 4), (1, 2, 3), (1, 2, 4)]]

    cat_dims = [1, 0, 3, 2]

    for tensor_shapes_cur, d_cur in zip(tensor_shapes, cat_dims):
        # get mytorch and torch tensor: 'a'
        a = [Tensor.randn(*shape_i) for shape_i in tensor_shapes_cur]
        for i in range(len(a)):
            a[i].requires_grad = True

        a_torch = [get_same_torch_tensor(a_i) for a_i in a]

        c = cat(a, d_cur)
        c_torch = torch.cat(a_torch, dim=d_cur)

        l = (c**2).sum()
        l_torch = (c_torch**2).sum()

        l.backward()
        l_torch.backward()

        for a_i, a_torch_i in zip(a, a_torch):
            assert check_grad(a_i, a_torch_i, eps=eps)

    return True
Ejemplo n.º 7
0
def test_slice_forward():
    # shape of tensor to test
    shape = (2, 4, 3)

    # get mytorch and torch tensor: 'a'
    a = Tensor.randn(*shape)  #mytorch tensor
    a.requires_grad = True
    a_torch = get_same_torch_tensor(a)  #pytorch tensor

    # Test 1
    b = a[1, 2, 0]
    b_torch = a_torch[1, 2, 0]
    assert check_val(b, b_torch, eps=eps)

    # Test 2
    b = a[0, 2, :]
    b_torch = a_torch[0, 2, :]
    assert check_val(b, b_torch, eps=eps)

    # Test 3
    b = a[:, 3, :]
    b_torch = a_torch[:, 3, :]
    assert check_val(b, b_torch, eps=eps)

    # Test 4
    b = a[:, :, 1]
    b_torch = a_torch[:, :, 1]
    assert check_val(b, b_torch, eps=eps)

    # Test 5
    b = a[:, 1:3, :]
    b_torch = a_torch[:, 1:3, :]
    assert check_grad(b, b_torch, eps=eps)

    return True
Ejemplo n.º 8
0
def test_time_iterator_forward():

    input_sizes, hidden_sizes, data_lens = get_params()

    for input_size, hidden_size, data_len in zip(input_sizes, hidden_sizes,
                                                 data_lens):

        seq_mytorch = [
            Tensor.randn(data_len[i], input_size) for i in range(len(data_len))
        ]

        seq_torch = [get_same_torch_tensor(i) for i in seq_mytorch]

        mpack = mpack_sequence(seq_mytorch)
        tpack = nn.utils.rnn.pack_sequence(seq_torch, enforce_sorted=False)

        model_mytorch = RNN(input_size, hidden_size)
        model_torch = nn.RNN(input_size,
                             hidden_size,
                             num_layers=1,
                             batch_first=False).double()

        transfer_weights(model_torch, model_mytorch)

        resm, hm = model_mytorch(mpack)
        rest, ht = model_torch(tpack)

        assert check_val(resm.data, rest.data, eps=eps)

        t_idx = list(np.argsort(data_len)[::-1])
        # Torch returns data which can be bi-directional
        assert check_val(hm, ht[0][t_idx], eps=eps)

    return True
Ejemplo n.º 9
0
def test_rnn_unit_backward():

    input_sizes, hidden_sizes, data_lens = get_params()

    for input_size, hidden_size, data_len in zip(input_sizes, hidden_sizes,
                                                 data_lens):

        in_mytorch = Tensor.randn(data_len[0], input_size)
        in_torch = get_same_torch_tensor(in_mytorch)

        in_mytorch.requires_grad = True
        in_torch.requires_grad = True

        model_mytorch = RNNUnit(input_size, hidden_size)
        model_torch = nn.RNNCell(input_size, hidden_size).double()
        transfer_weights_rnn_unit(model_torch, model_mytorch)

        resm = model_mytorch(in_mytorch)
        rest = model_torch(in_torch)

        lm = (resm**2).sum()
        lt = (rest**2).sum()

        lm.backward()
        lt.backward()

        assert compare_rnn_unit_param_grad(model_torch, model_mytorch)
        assert check_grad(resm, rest, eps=eps)

    return True
Ejemplo n.º 10
0
def test_unpack_sequence_backward():

    test_shapes = [[(4, 1), (5, 1)], [(4, 3), (10, 3), (2, 3)]]
    a = True
    for shapes in test_shapes:
        # get mytorch and torch tensor: 'a'
        seq1 = [Tensor.randn(*shape) for shape in shapes]
        for t in seq1:
            t.requires_grad = True

        seq2 = [get_same_torch_tensor(t) for t in seq1]

        # run mytorch and torch forward: 'c = cat (a, b)'
        c_temp = pack_sequence(seq1)
        c_temp2 = unpack_sequence(c_temp)
        c = pack_sequence(c_temp2)

        c_torch = torch.nn.utils.rnn.pack_sequence(seq2, enforce_sorted=False)

        l = (c.data**2).sum()
        l_torch = (c_torch.data**2).sum()

        l.backward()
        l_torch.backward()

        for a1, a2 in zip(seq1, seq2):
            assert check_grad(a1, a2, eps=eps)
        #compare_ps(c_torch, c.data, "test_pack_sequence_backward")

    return True
Ejemplo n.º 11
0
def test1():
    a = Tensor.randn(1, 2, 3)
    a.requires_grad = True
    a_torch = get_same_torch_tensor(a)

    b = Tensor.randn(1, 2, 3)
    b.requires_grad = True
    b_torch = get_same_torch_tensor(b)

    c = a + b
    c_torch = a_torch + b_torch

    c_torch.sum().backward()
    c.backward()
    assert check_val_and_grad(a, a_torch)
    assert check_val_and_grad(b, b_torch)
    assert check_val_and_grad(c, c_torch)
Ejemplo n.º 12
0
def test_dropout_forward():
    np.random.seed(11785)
    
    # run on small model forward only
    x = Tensor.randn(5, 10)
    model = Sequential(Linear(10, 5), ReLU(), Dropout(p=0.6))
    my_output = model(x)

    test_output = load_numpy_array('autograder/hw1_bonus_autograder/outputs/dropout_forward.npy')
    return assertions_all(my_output.data, test_output, "test_dropout_forward", 1e-5, 1e-6)
Ejemplo n.º 13
0
def test_sub():
    """Tests that mytorch subtraction matches torch's subtraction"""

    # shape of tensor to test
    shape = (1, 2, 3)

    # get mytorch and torch tensor: 'a'
    a = Tensor.randn(*shape)
    a.requires_grad = True
    a_torch = get_same_torch_tensor(a)

    # get mytorch and torch tensor: 'b'
    b = Tensor.randn(*shape)
    b.requires_grad = True
    b_torch = get_same_torch_tensor(b)

    # run mytorch and torch forward: 'c = a - b'
    ctx = ContextManager()
    c = Sub.forward(ctx, a, b)
    c_torch = a_torch - b_torch

    # run mytorch and torch subtraction backward
    back = Sub.backward(ctx, Tensor.ones(*shape))
    c_torch.sum().backward()

    # check that c matches
    assert check_val_and_grad(c, c_torch)
    # check that dc/da and dc/db respectively match
    assert check_val(back[0], a_torch.grad)
    # print(f'Sub back[1] == {back[1]} and back[0] == {back[0]} and b_torch.grad == {b_torch.grad}')
    assert check_val(back[1], b_torch.grad)

    # ensure - is overridden
    c_using_override = a - b
    assert check_val(c_using_override, c_torch)

    return True
Ejemplo n.º 14
0
def test_unpack_sequence_forward():

    test_shapes = [[(4, 1), (5, 1), (2, 1), (2, 1), (3, 1)],
                   [(4, 3), (10, 3), (2, 3)]]

    for shapes in test_shapes:
        # get mytorch and torch tensor: 'a'
        seq1 = [Tensor.randn(*shape) for shape in shapes]
        # run mytorch and torch forward: 'c = cat (a, b)'
        c = pack_sequence(seq1)
        seq2 = unpack_sequence(c)

        for s1, s2 in zip(seq1, seq2):
            assert assertions_all(s1.data, s2.data, 'Unpack Forward')

    return True
Ejemplo n.º 15
0
def test_unsqueeze():
    # shape of tensor to test
    shape = (2, 2, 3)

    # get mytorch and torch tensor: 'a'
    a = Tensor.randn(*shape)  #mytorch tensor
    a.requires_grad = True
    a_torch = get_same_torch_tensor(a)  #pytorch tensor

    b = Tensor.unsqueeze(a)
    b_torch = torch.Tensor.unsqueeze(a_torch, 0)
    assert check_val(b, b_torch, eps=eps)

    b = Tensor.unsqueeze(a, 2)
    b_torch = torch.Tensor.unsqueeze(a_torch, 2)
    assert check_val(b, b_torch, eps=eps)

    return True
Ejemplo n.º 16
0
def test_debdas():
    predicted = Tensor.randn(4, 20)
    predicted.requires_grad = True
    predicted_torch = get_same_torch_tensor(predicted)

    target = Tensor(np.random.randint(20, size=(4, )))
    target.requires_grad = True
    targets = to_one_hot(target, 20)
    targets_torch = get_same_torch_tensor(targets)

    p_std = predicted - Tensor(np.max(predicted.data))
    p_std_torch = predicted_torch - torch.max(predicted_torch)

    p_exp = p_std.exp()
    p_exp_torch = torch.exp(p_std_torch)

    p_softmax = p_exp / p_exp.sumAxis(1)
    p_softmax_torch = p_exp_torch / torch.sum(p_exp_torch, 1, keepdim=True)

    p_log_softmax = p_softmax.log()
    p_log_softmax_torch = torch.log(p_softmax_torch)

    log_lik = targets * p_log_softmax
    log_lik_torch = targets_torch * p_log_softmax_torch

    log_lik_sum = log_lik.sumAxis(None)
    log_lik_sum_torch = torch.sum(log_lik_torch)

    ce = tensor.Tensor(-1) * log_lik_sum / tensor.Tensor(4)
    ce_torch = -1 * log_lik_sum_torch / 4

    ce_torch.sum().backward()
    ce.backward()

    #assert check_val_and_grad(predicted, predicted_torch)
    assert check_val_and_grad(targets, targets_torch)
    assert check_val_and_grad(p_std, p_std_torch)
    assert check_val_and_grad(p_exp, p_exp_torch)
    assert check_val_and_grad(p_softmax, p_softmax_torch)
    assert check_val_and_grad(p_log_softmax, p_log_softmax_torch)
    assert check_val_and_grad(log_lik, log_lik_torch)
    assert check_val_and_grad(log_lik_sum, log_lik_sum_torch)
    assert check_val_and_grad(ce, ce_torch)
Ejemplo n.º 17
0
def test_rnn_unit_forward():

    input_sizes, hidden_sizes, data_lens = get_params()

    for input_size, hidden_size, data_len in zip(input_sizes, hidden_sizes,
                                                 data_lens):

        in_mytorch = Tensor.randn(data_len[0], input_size)
        in_torch = get_same_torch_tensor(in_mytorch)

        model_mytorch = RNNUnit(input_size, hidden_size)
        model_torch = nn.RNNCell(input_size, hidden_size).double()
        transfer_weights_rnn_unit(model_torch, model_mytorch)

        resm = model_mytorch(in_mytorch)
        rest = model_torch(in_torch)

        assert check_val(resm, rest, eps=eps)

    return True
Ejemplo n.º 18
0
def test_time_iterator_backward():

    input_sizes, hidden_sizes, data_lens = get_params()

    for input_size, hidden_size, data_len in zip(input_sizes, hidden_sizes,
                                                 data_lens):

        seq_mytorch = [
            Tensor.randn(data_len[i], input_size) for i in range(len(data_len))
        ]

        seq_torch = [get_same_torch_tensor(i) for i in seq_mytorch]

        mpack = mpack_sequence(seq_mytorch)
        tpack = nn.utils.rnn.pack_sequence(seq_torch, enforce_sorted=False)

        model_mytorch = RNN(input_size, hidden_size)
        model_torch = nn.RNN(input_size,
                             hidden_size,
                             num_layers=1,
                             batch_first=False).double()

        transfer_weights(model_torch, model_mytorch)

        resm, hm = model_mytorch(mpack)
        rest, ht = model_torch(tpack)

        lm = (resm.data**2).sum()
        lt = (rest.data**2).sum()

        lm.backward()
        lt.backward()

        assert compare_rnn_param_grad(model_torch, model_mytorch)

        for ma, pa in zip(seq_mytorch, seq_torch):
            assert check_grad(ma, pa)

    return True
Ejemplo n.º 19
0
def test_pack_sequence_forward():

    test_shapes = [[(4, 1), (5, 1)], [(4, 3), (10, 3), (2, 3)]]

    a = True
    for shapes in test_shapes:
        # get mytorch and torch tensor: 'a'
        seq1 = [Tensor.randn(*shape) for shape in shapes]
        seq2 = [get_same_torch_tensor(t) for t in seq1]

        # run mytorch and torch forward: 'c = cat (a, b)'
        c = pack_sequence(seq1)
        c_torch = torch.nn.utils.rnn.pack_sequence(seq2, enforce_sorted=False)
        assert check_val(c.data, c_torch.data)
        #compare_ps(c_torch, c.data, "test_pack_sequence_forward")
        assert compare_ndarrays(c.batch_sizes,
                                c_torch.batch_sizes,
                                test_name='Testing batch_sizes')
        assert compare_ndarrays(c.sorted_indices,
                                c_torch.sorted_indices,
                                test_name='Testing sorted_indices')

    return True