Exemplo n.º 1
0
def test_sigmoid_backward():
    # Test input
    np.random.seed(0)
    autograd = autograd_engine.Autograd()

    l1 = nn.Linear(5, 5, autograd)
    x = np.random.random((1, 5))
    l1_out = l1(x)
    test_act = nn.Sigmoid(autograd)
    a1_out = test_act(l1_out)
    autograd.backward(1)

    # Torch input
    torch_l1 = torch.nn.Linear(5, 5)
    torch_l1.weight = torch.nn.Parameter(torch.DoubleTensor(l1.W))
    torch_l1.bias = torch.nn.Parameter(torch.DoubleTensor(l1.b.squeeze()))
    torch_x = torch.DoubleTensor(x)
    torch_l1_out = torch_l1(torch_x)
    torch_act = torch.nn.Sigmoid()
    torch_a1_out = torch_act(torch_l1_out)
    torch_a1_out.sum().backward()

    compare_np_torch(l1.dW, torch_l1.weight.grad)
    compare_np_torch(l1.db.squeeze(), torch_l1.bias.grad)

    return True
Exemplo n.º 2
0
def test_softmaxXentropy_forward():
    # Test input
    np.random.seed(0)
    autograd = autograd_engine.Autograd()

    l1 = nn.Linear(5, 5, autograd)
    x = np.random.random((1, 5))
    y = np.array([[0., 0., 1., 0., 0.]])
    l1_out = l1(x)

    test_loss = nn.SoftmaxCrossEntropy(autograd)
    a1_out = test_loss(y, l1_out)

    # Torch input
    torch_l1 = torch.nn.Linear(5, 5)
    torch_l1.weight = torch.nn.Parameter(torch.DoubleTensor(l1.W))
    torch_l1.bias = torch.nn.Parameter(torch.DoubleTensor(l1.b.squeeze()))
    torch_x = torch.DoubleTensor(x)
    torch_y = torch.LongTensor(np.array([2]))
    torch_l1_out = torch_l1(torch_x)

    torch_loss = torch.nn.CrossEntropyLoss()
    torch_a1_out = torch_loss(torch_l1_out, torch_y).reshape(1, )
    torch_a1_out.backward()

    compare_np_torch(a1_out, torch_a1_out)

    return True
Exemplo n.º 3
0
def test_identity_forward():
    # Test input
    np.random.seed(0)
    autograd = autograd_engine.Autograd()

    l1 = nn.Linear(5, 5, autograd)
    x = np.random.random(
        (1, 5))  # just use batch size 1 since broadcasting is not written yet
    l1_out = l1(x)
    test_act = nn.Identity(autograd)
    a1_out = test_act(l1_out)

    # Torch input
    torch_l1 = torch.nn.Linear(5, 5)
    torch_l1.weight = torch.nn.Parameter(torch.DoubleTensor(
        l1.W))  # note transpose here, probably should standardize
    torch_l1.bias = torch.nn.Parameter(torch.DoubleTensor(l1.b.squeeze()))
    torch_x = torch.DoubleTensor(x)
    torch_l1_out = torch_l1(torch_x)
    torch_act = torch.nn.Identity()
    torch_a1_out = torch_act(torch_l1_out)

    compare_np_torch(a1_out, torch_a1_out)

    return True
Exemplo n.º 4
0
def test_linear_skip_backward():
    np.random.seed(0)
    autograd = autograd_engine.Autograd()

    autograd.zero_grad()
    l1 = nn.Linear(5, 5, autograd)
    x = np.random.random((1, 5))
    l1_out = l1(x)
    output = l1_out + x
    autograd.add_operation(inputs=[l1_out, x], output=output, gradients_to_update=[None, None],
                      backward_operation=add_backward)
    autograd.backward(1)

    torch_l1 = torch.nn.Linear(5, 5)
    torch_l1.weight = torch.nn.Parameter(torch.DoubleTensor(l1.W))
    torch_l1.bias = torch.nn.Parameter(torch.DoubleTensor(l1.b.squeeze()))

    torch_x = torch.DoubleTensor(x)
    torch_x.requires_grad = True

    torch_l1_out = torch_l1(torch_x)
    torch_output = torch_l1_out + torch_x
    torch_output.sum().backward()

    compare_np_torch(l1_out, torch_l1_out)
    compare_np_torch(l1.dW, torch_l1.weight.grad)
    compare_np_torch(l1.db.squeeze(), torch_l1.bias.grad)
    compare_np_torch(autograd.memory_buffer.get_param(x), torch_x.grad)  # skip connections work'''
    return True
Exemplo n.º 5
0
def test_linear_skip_forward():
    np.random.seed(0)
    autograd = autograd_engine.Autograd()

    autograd.zero_grad()
    l1 = nn.Linear(5, 5, autograd)
    x = np.random.random((1, 5))
    l1_out = l1(x)
    output = l1_out + x
    autograd.add_operation(inputs=[l1_out, x], output=output, gradients_to_update=[None, None],
                      backward_operation=add_backward)

    torch_l1 = torch.nn.Linear(5, 5)
    torch_l1.weight = torch.nn.Parameter(torch.DoubleTensor(l1.W))
    torch_l1.bias = torch.nn.Parameter(torch.DoubleTensor(l1.b.squeeze()))

    torch_x = torch.DoubleTensor(x)
    torch_x.requires_grad = True

    torch_l1_out = torch_l1(torch_x)
    torch_output = torch_l1_out + torch_x

    compare_np_torch(output, torch_output)

    return True
Exemplo n.º 6
0
def test_linear_layer_forward():
    np.random.seed(0)
    x = np.random.random((1, 5))
    
    autograd = autograd_engine.Autograd()
    l1 = nn.Linear(5, 5, autograd)
    l1_out = l1(x)
    
    torch_l1 = torch.nn.Linear(5, 5)
    torch_l1.weight = torch.nn.Parameter(torch.DoubleTensor(l1.W))
    torch_l1.bias = torch.nn.Parameter(torch.DoubleTensor(l1.b.squeeze()))
    torch_x = torch.DoubleTensor(x)
    torch_l1_out = torch_l1(torch_x)

    compare_np_torch(l1_out, torch_l1_out)
    return True
Exemplo n.º 7
0
def test_tanh_forward():
    # Test input
    np.random.seed(0)
    autograd = autograd_engine.Autograd()

    l1 = nn.Linear(5, 5, autograd)
    x = np.random.random((1, 5))
    l1_out = l1(x)
    test_act = nn.Tanh(autograd)
    a1_out = test_act(l1_out)

    # Torch input
    torch_l1 = torch.nn.Linear(5, 5)
    torch_l1.weight = torch.nn.Parameter(torch.DoubleTensor(l1.W))
    torch_l1.bias = torch.nn.Parameter(torch.DoubleTensor(l1.b.squeeze()))
    torch_x = torch.DoubleTensor(x)
    torch_l1_out = torch_l1(torch_x)
    torch_act = torch.nn.Tanh()
    torch_a1_out = torch_act(torch_l1_out)

    compare_np_torch(a1_out, torch_a1_out)
    return True