Пример #1
0
def test_eq_varnames():
    x = ad.autodiff('x', [10, 20])
    y = ad.autodiff('y', [10, 20])
    z = ad.autodiff('z', [10, 20])
    f1 = x * y
    f2 = x * z
    assert f1 != f2
Пример #2
0
def test_forwardprop2():
    x = ad.autodiff('x', 3)
    y = ad.autodiff('y', 1)
    z = ad.autodiff('z', 2)
    e = ad.autodiff('e', 5)
    f1 = 4 / x
    f2 = f1.forwardprop()

    f3 = 2**y
    f4 = f3.forwardprop()

    f5 = y - z
    f6 = f5.forwardprop()

    f7 = z**x
    f8 = f7.forwardprop()

    f9 = -x
    f10 = f9.forwardprop()

    assert f2 == f1
    assert f3 == f4
    assert f5 == f6
    assert f7 == f8
    assert f9 == f10
Пример #3
0
def test_log_result_base():
    x = ad.autodiff('x', 16)
    y = ad.autodiff('y', 2)
    f1 = admath.log(x * y, 2)
    assert f1.val == 5.0 and f1.der == {
        'x': 1 / (16 * np.log(2)),
        'y': 1 / (2 * np.log(2))
    }
Пример #4
0
def test_forwardprop():
    x = ad.autodiff('x', 3)
    y = ad.autodiff('y', 1)
    z = ad.autodiff('z', 2)
    e = ad.autodiff('e', 5)
    f1 = 4 * x * y
    f2 = f1.forwardprop()
    assert f2 == f1
Пример #5
0
def test_ad_der_vec():
    t = ad.autodiff('t', [1, 2, 3])
    assert all(t.der['t'] == [1, 1, 1])

    z = ad.autodiff('z', [1, 2, 3], [1, 1, 1])
    assert all(z.der['z'] == [1, 1, 1])

    e = ad.autodiff('e', [1, 2, 3], np.array([1, 1, 1]))
    assert all(e.der['e'] == [1, 1, 1])
Пример #6
0
def test_truediv_result_ad():
    x = ad.autodiff('x', 10)
    y = ad.autodiff('y', 2)
    f1 = x / y
    f2 = 2 * x / x
    assert f1.val == 5
    assert f1.der['x'] == 1 / 2
    assert f1.der['y'] == -10 / 4
    assert f2.val == 2
    assert f2.der['x'] == 0
Пример #7
0
def test_mul_result_ad():
    x = ad.autodiff('x', 10)
    y = ad.autodiff('y', 2)
    f1 = x * y
    f2 = x * x * x
    assert f1.val == 20
    assert f1.der['x'] == 2
    assert f1.der['y'] == 10
    assert f2.val == 1000
    assert f2.der['x'] == 300
Пример #8
0
def test_backprop_hyperbolic():
    x = ad.autodiff('x', 1)
    y = ad.autodiff('y', 2)
    z = ad.autodiff('z', 3)
    f1 = admath.sinh(x) * admath.cosh(y) * admath.tanh(z)
    print(f1.backprop(y_true=2))
    assert pytest.approx(
        f1.backprop(y_true=2)[0]['x'][0]) == f1.back_der * np.tanh(
            z.val) * np.cosh(y.val) * np.cosh(x.val)
    assert pytest.approx(
        f1.backprop(y_true=2)[0]['y'][0]) == f1.back_der * np.tanh(
            z.val) * np.sinh(x.val) * np.sinh(y.val)
Пример #9
0
def test_mul_result_vec():
    x = ad.autodiff('x', [1, 2, 3])
    y = ad.autodiff('y', [3, 4, 5])
    f1 = x * y
    f2 = [1, 2, 3] * y
    dt = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
    z = ad.autodiff('z', [1, 2, 3])
    f3 = z * dt

    assert all(f1.val == [3, 8, 15])
    assert all(f1.der['x'] == [3, 4, 5])
    assert all(f2.val == [3, 8, 15])
    assert all(f2.der['y'] == [1, 2, 3])
    assert all(f3.val == [14, 32])
    assert all(f3.der['z'][0] == [1, 2, 3])
    assert all(f3.der['z'][1] == [4, 5, 6])
Пример #10
0
def test_sin_dotproduct():
    x = np.array([[1, -2, 1], [3, 0, 4]])  #Data
    w = ad.autodiff('w', [3, -1, 0])  #Weights

    # Set up parameters for gradient descent
    f1 = admath.sin(w * x)
    assert f1 == f1.forwardprop()
Пример #11
0
def test_truediv_result_const():
    x = ad.autodiff('x', 10)
    y = ad.autodiff('y', 2)
    f1 = x / y
    f2 = x / 3
    assert f1.val == 5
    assert f1.der['x'] == 1 / 2
    assert f2.val == 10 / 3
    assert f2.der['x'] == 1 / 3

    t = ad.autodiff('t', [1, 2, 3])
    arr2 = [1, 2, 3]
    f1 = t / arr2
    f2 = arr2 / x
    assert all(f1.val == [1, 1, 1])
    assert all(f1.der['t'] == [1, 0.5, 1 / 3])
    assert all(f2.val == [1 / 10, 2 / 10, 3 / 10])
    assert all(f2.der['x'] == [-1 / 100, -2 / 100, -3 / 100])
Пример #12
0
def test_pow_result_adandconst():
    ad1 = ad.autodiff(name="x", val=2, der=1)
    ad2 = ad.autodiff(name="y", val=3, der=1)
    ad3 = ad1**(ad2**ad1)
    ad4 = (ad1**1.5)**ad2
    ad5 = (1.5**ad1)**ad2

    assert ad3.val == 512
    assert abs(ad3.der["x"] - 5812.9920480098718094) < 1E-10
    assert abs(ad3.der["y"] - 2129.3481386801519905) < 1E-10

    assert abs(ad4.val - 22.627416997969520780) < 1E-10
    assert abs(ad4.der["x"] - 50.911688245431421756) < 1E-10
    assert abs(ad4.der["y"] - 23.526195443245132601) < 1E-10

    assert abs(ad5.val - 11.390625) < 1E-10
    assert abs(ad5.der["x"] - 13.855502991133679740) < 1E-10
    assert abs(ad5.der["y"] - 9.2370019940891198269) < 1E-10
Пример #13
0
def test_logistic_types():
    x = ad.autodiff('x', 10)
    with pytest.raises(AttributeError):
        admath.logistic(1, k="w")
    with pytest.raises(TypeError):
        admath.logistic(x, A="wow", k=0, x0=1)
    with pytest.raises(TypeError):
        admath.logistic(x, A=3.0, k=None, x0=-1.0)
    with pytest.raises(TypeError):
        admath.logistic(x, A=0.0, k=0.0, x0="3")
Пример #14
0
def test_add_result_adandconst():
    ad1 = ad.autodiff(name="x", val=20.5, der=1)
    ad2 = ad.autodiff(name="y", val=3, der=1)

    ad3 = ad1 + ad2 + ad1
    ad4 = ad1 + 5.5 + ad2
    ad5 = 5.5 + ad1 + ad2

    assert ad3.val == 44
    assert ad3.der["x"] == 2
    assert ad3.der["y"] == 1

    assert ad4.val == 29
    assert ad4.der["x"] == 1
    assert ad4.der["y"] == 1

    assert ad5.val == 29
    assert ad5.der["x"] == 1
    assert ad5.der["y"] == 1
Пример #15
0
def test_sub_result_adandconst():
    ad1 = ad.autodiff(name="x", val=2.5, der=1)
    ad2 = ad.autodiff(name="y", val=3, der=1)

    ad3 = ad1 - ad2 - ad1
    ad4 = ad1 - 5.5 - ad2
    ad5 = 5.5 - ad1 - ad2

    assert ad3.val == -3
    assert ad3.der["x"] == 0
    assert ad3.der["y"] == -1

    assert ad4.val == -6
    assert ad4.der["x"] == 1
    assert ad4.der["y"] == -1

    assert ad5.val == 0
    assert ad5.der["x"] == -1
    assert ad5.der["y"] == -1
Пример #16
0
def test_jacobian():
    x = ad.autodiff('x', 10)
    y = ad.autodiff('y', 2)
    f1 = x * y
    assert f1.jacobian()["order"] == ['x', 'y']
    assert np.sum(f1.jacobian()["jacobian"] == np.array([2, 10])) == 2
    assert f1.jacobian(order=['y', 'x'])["order"] == ['y', 'x']
    assert np.sum(
        f1.jacobian(order=['y', 'x'])["jacobian"] == np.array([10, 2])) == 2

    a = ad.autodiff('a', [1, 2, 3])
    b = ad.autodiff('b', [-1, -2, -3])
    c = ad.autodiff('c', [-2, 5, 10])
    f2 = a * b - c
    assert f2.jacobian(order=['a', 'b'])["order"] == ['a', 'b']
    assert np.sum(
        f2.jacobian(order=['a', 'b'])["jacobian"] == np.array(
            [[-1, -2, -3], [1, 2, 3]])) == 6

    with pytest.raises(KeyError):
        assert f2.jacobian(order=['a', 'd'])
Пример #17
0
def test_logistic_result_dot():
    x = ad.autodiff('x', 10)

    assert admath.logistic(
        admath.logistic(x)).val == 1.0 / (1.0 + np.exp(-1 *
                                                       (1.0 /
                                                        (1 + np.exp(-10)))))
    assert admath.logistic(
        admath.logistic(x, A=-1, k=3.5, x0=-2), A=2, k=-1,
        x0=5).val == 2.0 / (1.0 + np.exp(-1 * -1 *
                                         ((-1.0 /
                                           (1.0 + np.exp(-1 * 3.5 *
                                                         (10 - -2)))) - 5)))
Пример #18
0
def test_gradient_descent_MSE2():
    X_data = np.array([1, 2, 3, 4, 5])  # Input x-data
    Y_true = 3 * X_data  # Actual y-values

    # Create initial weights for the data
    w = ad.autodiff('w', [1 for i in range(0, len(Y_true))])
    f1 = w * X_data  # Functional form

    # Run MSE-loss gradient descent
    g = ad.gradient_descent(f1,
                            Y_true,
                            loss='MSE',
                            beta=0.001,
                            max_iter=5000,
                            tol=0.05)

    assert g['loss_array'][-1] <= 0.05
Пример #19
0
def test_gradient_descent_weightname():
    x = np.array([[2, 0], [5, 1]])  #Data
    w = ad.autodiff('t', [0.6, 0.4])  #Weights

    # Set up parameters for gradient descent
    max_iter = 40000
    beta = 0.00001
    f = 3 + w * x / 2.0
    y_act = [3, 4]
    tol = 0.2
    loss = "RMSE"
    with pytest.raises(ValueError):
        # Run gradient descent
        g = ad.gradient_descent(f,
                                y_act,
                                beta=beta,
                                loss=loss,
                                max_iter=max_iter,
                                tol=tol)
Пример #20
0
def test_gradient_descent_MSE():
    x = np.array([[1, -2, 1], [3, 0, 4]])  #Data
    w = ad.autodiff('w', [3, -1, 0])  #Weights

    # Set up parameters for gradient descent
    max_iter = 5000
    beta = 0.005
    f = w * x
    y_act = [5.5, 9.5]
    tol = 1E-8
    loss = "MSE"

    # Run gradient descent
    g = ad.gradient_descent(f,
                            y_act,
                            beta=beta,
                            loss=loss,
                            max_iter=max_iter,
                            tol=tol)

    # Assert correct values within tolerance
    assert ((g['f'].val[0] - y_act[0])**2 +
            (g['f'].val[1] - y_act[1])**2) / len(y_act) <= tol
Пример #21
0
def test_gradient_descent_MAE():
    x = np.array([[5, -2], [3, -4]])  #Data
    w = ad.autodiff('w', [3, 0.5])  #Weights

    # Set up parameters for gradient descent
    max_iter = 10000
    beta = 0.1
    f = 1 + admath.exp(-1 * w * x)
    y_act = [1.0, 1.05]
    tol = 1E-4
    loss = "MAE"

    # Run gradient descent
    g = ad.gradient_descent(f,
                            y_act,
                            beta=beta,
                            loss=loss,
                            max_iter=max_iter,
                            tol=tol)

    # Assert correct values within tolerance
    assert (np.absolute(g['f'].val[0] - y_act[0]) +
            np.absolute(g['f'].val[1] - y_act[1])) / len(y_act) <= tol
Пример #22
0
def test_gradient_descent_RMSE():
    x = np.array([[2, 0], [5, 1]])  #Data
    w = ad.autodiff('w', [0.6, 0.4])  #Weights

    # Set up parameters for gradient descent
    max_iter = 40000
    beta = 0.00001
    f = 3 + w * x / 2.0
    y_act = [3, 4]
    tol = 0.2
    loss = "RMSE"

    # Run gradient descent
    g = ad.gradient_descent(f,
                            y_act,
                            beta=beta,
                            loss=loss,
                            max_iter=max_iter,
                            tol=tol)

    # Assert correct values within tolerance
    assert np.sqrt(((g['f'].val[0] - y_act[0])**2 +
                    (g['f'].val[1] - y_act[1])**2) / len(y_act)) <= tol
Пример #23
0
def test_tan_result_single():
    x = ad.autodiff('x', 10)
    assert admath.tan(admath.tan(x)).val == np.tan(np.tan(10))
    assert admath.tan(
        admath.tan(x)).der['x'] == 1 / np.cos(10)**2 * 1 / np.cos(
            np.tan(10))**2
Пример #24
0
def test_cos_result_single():
    x = ad.autodiff('x', 10)
    assert admath.cos(admath.cos(x)).val == np.cos(np.cos(10))
    assert admath.cos(
        admath.cos(x)).der['x'] == np.sin(10) * np.sin(np.cos(10))
Пример #25
0
def test_arctan_value():
    t = ad.autodiff('t', 0.3)
    m = ad.autodiff('m', 0.2)
    f2 = admath.arctan(t * m)
    assert f2.val == np.arctan(0.3 * 0.2)
    assert f2.der == {'t': 0.19928258270227184, 'm': 0.2989238740534077}
Пример #26
0
def test_arccos_dotproduct():
    x = np.array([[0.1, -0.2, 0.1], [0.3, 0, 0.4]])  #Data
    w = ad.autodiff('w', [0.1, 0.1, 0.1])  #Weights
    # Set up parameters for gradient descent
    f1 = admath.arccos(w * x)
    assert f1 == f1.forwardprop()
Пример #27
0
def test_arccos_error_nonpositive():
    x = ad.autodiff('x', 2)
    with pytest.raises(ValueError):
        admath.arccos(x)
Пример #28
0
def test_arccos_value():
    t = ad.autodiff('t', 0.3)
    m = ad.autodiff('m', 0.2)
    f2 = admath.arccos(t * m)
    assert f2.val == np.arccos(0.3 * 0.2)
    assert f2.der == {'t': -0.2003609749252153, 'm': -0.3005414623878229}
Пример #29
0
def test_logistic_dotproduct():
    x = np.array([[1, -2, 1], [3, 0, 4]])  #Data
    w = ad.autodiff('w', [3, -1, 0])  #Weights
    # Set up parameters for gradient descent
    f1 = admath.logistic(w * x, A=2.0, k=1.5, x0=0.7)
    assert f1 != f1.forwardprop()
Пример #30
0
def test_sqrt_error_nonpositive():
    x = ad.autodiff('x', -1)
    with pytest.raises(ValueError):
        admath.sqrt(x)