예제 #1
0
def test_mse_tensor():
    true = torch.Tensor([11, 11])
    pred = torch.Tensor([10, 10])
    loss = mean_squared_error(true, pred)
    assert loss == 1

    true = torch.Tensor([10, 10])
    pred = torch.Tensor([10, 10])
    loss = mean_squared_error(true, pred)
    assert loss == 0
예제 #2
0
def test_adam():
    X = Variable(torch.rand(5, 3))
    y = Variable(torch.rand(5))
    w = Parameter(torch.rand(3))
    opt = Adam(params=[w])

    o = X @ w
    loss = mean_squared_error(y, o)
    loss.backward()
    opt.step()

    assert loss.data[0] > mean_squared_error(y, X @ w).data[0]
예제 #3
0
def test_sgd_momentum():
    X = Variable(torch.rand(5, 3))
    y = Variable(torch.rand(5))
    w = Parameter(torch.rand(3))
    opt = SGD(lr=0.1, momentum=0.99, params=[w])

    o = X @ w
    loss = mean_squared_error(y, o)
    loss.backward()
    opt.step()

    assert loss.data[0] > mean_squared_error(y, X @ w).data[0]
예제 #4
0
def test_sgd_learning_rate():
    X = Variable(torch.rand(5, 3))
    y = Variable(torch.rand(5))
    w = Variable(torch.rand(3), requires_grad=True)
    opt = SGD(lr=0.1, params=[w])

    o = X @ w
    loss = mean_squared_error(y, o)
    loss.backward()
    opt.step()

    assert loss > mean_squared_error(y, X @ w)
예제 #5
0
def test_mse_variable():
    true = Variable(torch.Tensor([11, 11]))
    pred = Variable(torch.Tensor([10, 10]))

    loss = mean_squared_error(true, pred)

    assert type(loss) is Variable
    assert loss == 1