def test_sinh():

    # Scalar
    f = rop.sinh(x)
    assert f._val == np.sinh(x._val)
    assert f.compute_gradient() == x.compute_gradient() * np.cosh(x._val)
    #Constant
    assert rop.sinh(c) == np.sinh(c)
def test_log():

    # Scalar
    f = rop.log(x)
    assert f._val == np.log(x._val)
    assert f.compute_gradient() == x.compute_gradient() / x._val

    #Constant
    assert rop.log(c) == np.log(c)
def test_arccos():

    # Scalar
    f = rop.arccos(x)
    assert f._val == np.arccos(x._val)
    assert f.compute_gradient() == -x.compute_gradient() / (1 - x._val**2)**.5

    #Constant
    assert rop.arccos(c) == np.arccos(c)
def test_arctan():

    # Scalar
    f = rop.arctan(x)
    assert f._val == np.arctan(x._val)
    assert f.compute_gradient() == x.compute_gradient() / (1 + x._val**2)

    #Constant
    assert rop.arctan(c) == np.arctan(c)
def test_tan():

    # Scalar
    f = rop.tan(x)
    assert f._val == np.tan(x._val)
    assert f.compute_gradient() == x.compute_gradient() / np.cos(x._val)**2

    #Constant
    assert rop.tan(c) == np.tan(c)
def test_cos():

    # Scalar
    f = rop.cos(x)
    assert f._val == np.cos(x._val)
    assert f.compute_gradient() == -np.sin(x._val) * x.compute_gradient()

    #Constant
    assert rop.cos(c) == np.cos(c)
def test_arcsin():

    # Scalar
    f = rop.arcsin(x)
    assert f._val == np.arcsin(x._val)
    assert f.compute_gradient() == 1 / (x.compute_gradient() *
                                        (1 - x._val**2)**.5)

    #Constant
    assert rop.arcsin(c) == np.arcsin(c)
def test_sqrt():

    #scalar
    f = rop.sqrt(x)
    assert f._val == x._val**(0.5)
    assert round(f.compute_gradient(),
                 5) == round(x.compute_gradient() * (x._val**(-1 / 2) / 2), 5)

    #constant
    assert rop.sqrt(c) == c**0.5
def test_tanh():

    # Scalar
    f = rop.tanh(x)
    assert f._val == np.tanh(x._val)
    assert f.compute_gradient() == x.compute_gradient() * (1 -
                                                           np.tanh(x._val)**2)

    #Constant
    assert rop.tanh(c) == np.tanh(c)
Exemplo n.º 10
0
@author: weiruchen
"""

from VorDiff.reverse_operator import ReverseOperator as rop
from VorDiff.reverse_autodiff import ReverseAutoDiff as rad


def create_reverse_vector(array):
    x, y = rad.reverse_vector(array)
    return x, y


x, y = create_reverse_vector([[1, 2, 3], [1, 3, 6]])

# for scalar
f = 1 / (x[1]) + rop.sin(1 / x[1])
assert rad.partial_scalar(f) == -0.4693956404725932

# for vector
x, y = create_reverse_vector([[1, 2, 3], [1, 3, 6]])

a = x + 1
assert (rad.partial_vector(a, x) == [1.0, 1.0, 1.0]).all()

x, y = create_reverse_vector([[1, 2, 3], [1, 3, 6]])

h = rop.sin(x)
partial_derivative = rad.partial_vector(h, x)
answer = [0.54030231, -0.41614684, -0.9899925]
for i in range(len(partial_derivative)):
    partial_derivative[i] = round(partial_derivative[i], 3)
Exemplo n.º 11
0
def F2(array):
    x, y = create_reverse_vector(array)
    return rop.sin(x) + 2 * rop.sin(y), x, y
Exemplo n.º 12
0
def F1(array):
    x, y = create_reverse_vector(array)
    return 3 * x + rop.cos(y)**2 + 1, x, y
Exemplo n.º 13
0
@author: weiruchen
"""

from VorDiff.reverse_operator import ReverseOperator as rop
from VorDiff.reverse_autodiff import ReverseAutoDiff as rad


def create_reverse_vector(array):
    x, y = rad.reverse_vector(array)
    return x, y


x, y = create_reverse_vector([[1, 2, 3], [1, 3, 6]])

# for scalar
f = 1 / (x[1]) + rop.sin(1 / x[1])
print(rad.partial_scalar(f))

# for vector
x, y = create_reverse_vector([[1, 2, 3], [1, 3, 6]])

a = x + 1
print(rad.partial_vector(a, x))

x, y = create_reverse_vector([[1, 2, 3], [1, 3, 6]])

h = rop.sin(x)
print(rad.partial_vector(h, x))

x, y = create_reverse_vector([[1, 2, 3], [1, 3, 6]])