Example #1
0
def test_gradient_descent_MSE2():
    X_data = np.array([1, 2, 3, 4, 5])  # Input x-data
    Y_true = 3 * X_data  # Actual y-values

    # Create initial weights for the data
    w = ad.autodiff('w', [1 for i in range(0, len(Y_true))])
    f1 = w * X_data  # Functional form

    # Run MSE-loss gradient descent
    g = ad.gradient_descent(f1,
                            Y_true,
                            loss='MSE',
                            beta=0.001,
                            max_iter=5000,
                            tol=0.05)

    assert g['loss_array'][-1] <= 0.05
Example #2
0
def test_gradient_descent_weightname():
    x = np.array([[2, 0], [5, 1]])  #Data
    w = ad.autodiff('t', [0.6, 0.4])  #Weights

    # Set up parameters for gradient descent
    max_iter = 40000
    beta = 0.00001
    f = 3 + w * x / 2.0
    y_act = [3, 4]
    tol = 0.2
    loss = "RMSE"
    with pytest.raises(ValueError):
        # Run gradient descent
        g = ad.gradient_descent(f,
                                y_act,
                                beta=beta,
                                loss=loss,
                                max_iter=max_iter,
                                tol=tol)
Example #3
0
def test_gradient_descent_MAE():
    x = np.array([[5, -2], [3, -4]])  #Data
    w = ad.autodiff('w', [3, 0.5])  #Weights

    # Set up parameters for gradient descent
    max_iter = 10000
    beta = 0.1
    f = 1 + admath.exp(-1 * w * x)
    y_act = [1.0, 1.05]
    tol = 1E-4
    loss = "MAE"

    # Run gradient descent
    g = ad.gradient_descent(f,
                            y_act,
                            beta=beta,
                            loss=loss,
                            max_iter=max_iter,
                            tol=tol)

    # Assert correct values within tolerance
    assert (np.absolute(g['f'].val[0] - y_act[0]) +
            np.absolute(g['f'].val[1] - y_act[1])) / len(y_act) <= tol
Example #4
0
def test_gradient_descent_RMSE():
    x = np.array([[2, 0], [5, 1]])  #Data
    w = ad.autodiff('w', [0.6, 0.4])  #Weights

    # Set up parameters for gradient descent
    max_iter = 40000
    beta = 0.00001
    f = 3 + w * x / 2.0
    y_act = [3, 4]
    tol = 0.2
    loss = "RMSE"

    # Run gradient descent
    g = ad.gradient_descent(f,
                            y_act,
                            beta=beta,
                            loss=loss,
                            max_iter=max_iter,
                            tol=tol)

    # Assert correct values within tolerance
    assert np.sqrt(((g['f'].val[0] - y_act[0])**2 +
                    (g['f'].val[1] - y_act[1])**2) / len(y_act)) <= tol
Example #5
0
def test_gradient_descent_MSE():
    x = np.array([[1, -2, 1], [3, 0, 4]])  #Data
    w = ad.autodiff('w', [3, -1, 0])  #Weights

    # Set up parameters for gradient descent
    max_iter = 5000
    beta = 0.005
    f = w * x
    y_act = [5.5, 9.5]
    tol = 1E-8
    loss = "MSE"

    # Run gradient descent
    g = ad.gradient_descent(f,
                            y_act,
                            beta=beta,
                            loss=loss,
                            max_iter=max_iter,
                            tol=tol)

    # Assert correct values within tolerance
    assert ((g['f'].val[0] - y_act[0])**2 +
            (g['f'].val[1] - y_act[1])**2) / len(y_act) <= tol