def test_rtruediv(): x = Var(3, np.array([1, 0])) y = 2 assert y / x == Var( pytest.approx(0.6666666666666666), np.array([pytest.approx(-0.22222222), pytest.approx(-0.)]))
def test_two_variables(): ad = AD(np.array([2, 2]), np.array([1, 1])) f1 = lambda x, y: Var.log(x)**Var.sin(y) assert AD.auto_diff(self=ad, func=f1) == Var( pytest.approx(0.7165772257590739), np.array([pytest.approx(0.47001694), pytest.approx(0.10929465)]))
def test_real_pow(): x = Var(3, np.array([1, 0])) y = 2 assert x**y == Var(9, np.array([6, 0])) assert y**x == Var( 8, np.array([pytest.approx(5.54517744), pytest.approx(0.)]))
def test_jac_matrix(): f1 = lambda x, y: Var.log(x)**Var.sin(y) f2 = lambda x, y: Var.sqrt(x) / y ad = AD(np.array([4.12, 5.13]), np.array([1, 1])) assert np.array_equal( ad.jac_matrix([f1, f2]), np.array([[pytest.approx(-0.11403015), pytest.approx(0.10263124)], [pytest.approx(0.048018), pytest.approx(-0.07712832)]]))
def __init__(self, vals, ders, mode=AD_Mode.FORWARD): """ init the AD class INPUT ======= self: an AD class vals: a list of initial value for a list of variables ders: a list of initial derivative for a list of variables mode: the mode for auto-diff, ie, FORWARD or REVERSE RETURNS ======= the output of automatic differentiation for the given vals and ders EXAMPLES ======= >>> ad = AD(np.array([2, 2]), np.array([1, 1]), AD_Mode.FORWARD) >>> print(vars(ad.vars[0]), vars(ad.vars[1])) {'val': 2, 'der': array([1, 0])} {'val': 2, 'der': array([0, 1])} >>> ad = AD(np.array([2, 2]), np.array([1, 1]), AD_Mode.REVERSE) >>> print(vars(ad.vars[0]), vars(ad.vars[1])) {'value': 2, 'children': [], 'grad_value': None} {'value': 2, 'children': [], 'grad_value': None} """ self.mode = mode if self.mode == AD_Mode.FORWARD: assert (len(vals) == len(ders)) self.vars = [] dimen = len(vals) cnt = 0 for val, der in zip(vals, ders): der_list = np.array([0 for i in range(dimen)]) der_list[cnt] = der self.vars.append(Var(val, der_list)) cnt += 1 else: self.vals = vals self.vars = [] for val in self.vals: self.vars.append(Rev_Var(val))
def auto_diff(self, func): """ Passing a function to a AD object, and return the final Var object with val and der. INPUT ======= a function RETURNS ======= returns the final Var object with val and der EXAMPLES ======= >>> f1 = lambda x, y: Var.log(x) ** Var.sin(y) >>> ad = AD(np.array([2, 2]), np.array([1, 1]), AD_Mode.FORWARD) >>> print("Var.log(x) ** Var.sin(y): {}".format(vars(ad.auto_diff(f1)))) Var.log(x) ** Var.sin(y): {'val': 0.7165772257590739, 'der': array([0.47001694, 0.10929465])} >>> f1 = lambda x: Var.log(x) ** 2 >>> ad = AD(np.array([2]), np.array([1]), AD_Mode.FORWARD) >>> print("Var.log(x) ** 2: {}".format(vars(ad.auto_diff(f1)))) Var.log(x) ** 2: {'val': 0.4804530139182014, 'der': array([0.69314718])} >>> f1 = lambda x, y: Rev_Var.log(x) ** Rev_Var.sin(y) >>> ad = AD(np.array([2, 2]), np.array([1, 1]), AD_Mode.REVERSE) >>> print("Rev_Var.log(x) ** Rev_Var.sin(y): {}".format(vars(ad.auto_diff(f1)))) Rev_Var.log(x) ** Rev_Var.sin(y): {'val': 0.7165772257590739, 'der': array([0.47001694, 0.10929465])} >>> f1 = lambda x: Rev_Var.log(x) ** 2 >>> ad = AD(np.array([2]), np.array([1]), AD_Mode.REVERSE) >>> print("Rev_Var.log(x) ** 2: {}".format(vars(ad.auto_diff(f1)))) Rev_Var.log(x) ** 2: {'val': 0.4804530139182014, 'der': array([0.69314718])} """ if self.mode == AD_Mode.FORWARD: return func(*self.vars) else: self.clear() z = func(*self.vars) z.grad_value = 1.0 res = list(map(lambda x: x.grad(), self.vars)) return Var(z.value, np.array(res)) # provide a unify interface
def test_var_sub(): x = Var(3, np.array([1, 0])) y = Var(2, np.array([0, 1])) assert x - y == Var(1, np.array([1, -1]))
def test_real_mul(): x = 3 y = Var(2, np.array([0, 1])) assert x * y == Var(6, np.array([0, 3]))
def test_notequal(): x = Var(3, np.array([1, 0])) y = 2 z = Var(2, np.array([0, 1])) assert x != z assert (x != y) == True
def test_truediv(): x = Var(3, np.array([1, 0])) y = Var(2, np.array([0, 1])) assert x / y == Var(1.5, np.array([0.5, -0.75]))
def test_tanh(): x = Var(0.5, np.array([1])) y = 0.5 assert Var.tanh(x) == Var(pytest.approx(0.46211715726000985), np.array([pytest.approx(0.78644773)])) assert Var.tanh(y) == (np.exp(y) - np.exp(-y)) / (np.exp(y) + np.exp(-y))
def test_var_mul(): x = Var(3, np.array([1, 0])) y = Var(2, np.array([0, 1])) assert x * y == Var(6, np.array([2, 3]))
def test_cosh(): x = Var(0.5, np.array([1])) y = 0.5 assert Var.cosh(x) == Var(pytest.approx(1.1276259652063807), np.array([pytest.approx(0.52109531)])) assert Var.cosh(y) == (np.exp(y) + np.exp(-y)) / 2
def test_sinh(): x = Var(0.5, np.array([1])) y = 0.5 assert Var.sinh(x) == Var(pytest.approx(0.5210953054937474), np.array([pytest.approx(1.12762597)])) assert Var.sinh(y) == (np.exp(y) - np.exp(-y)) / 2
def test_sin(): x = Var(3, np.array([1])) y = 2 assert Var.sin(x) == Var(pytest.approx(0.1411200080598672), np.array([pytest.approx(-0.9899925)])) assert Var.sin(y) == np.sin(y)
def test_sqrt(): x = Var(3, np.array([1])) y = 2 assert Var.sqrt(x) == Var(pytest.approx(1.7320508075688772), np.array([pytest.approx(0.28867513)])) assert Var.sqrt(y) == np.sqrt(y)
def test_logistic(): x = Var(3, np.array([1])) y = 3 assert Var.logistic(x) == Var(pytest.approx(0.9525741268224334), np.array([pytest.approx(0.04517666)])) assert Var.logistic(y) == pytest.approx(0.9525741268224334)
def test_real_sub(): x = 3 y = Var(2, np.array([0, 1])) assert x - y == Var(1, np.array([0, -1]))
def test_var_pow(): x = Var(3, np.array([1, 0])) y = Var(2, np.array([0, 1])) assert x**y == Var( 9, np.array([pytest.approx(6.0), pytest.approx(9.8875106)]))
def test_cos(): x = Var(3, np.array([1])) y = 2 assert Var.cos(x) == Var(pytest.approx(-0.9899924966004454), np.array([pytest.approx(-0.14112001)])) assert Var.cos(y) == np.cos(y)
def test_var_add(): x = Var(3, np.array([1, 0])) y = Var(2, np.array([0, 1])) assert x + y == Var(5, np.array([1, 1]))
def test_div(): x = Var(3, np.array([1, 0])) y = 2 assert x / y == Var(1.5, np.array([0.5, 0.]))
def test_tan(): x = Var(3, np.array([1])) y = 2 assert Var.tan(x) == Var(pytest.approx(-0.1425465430742778), np.array([pytest.approx(1.02031952)])) assert Var.tan(y) == np.tan(y)
def test_pos(): x = Var(3, np.array([1, 0])) assert +x == Var(3, np.array([1, 0]))
def test_neg(): x = Var(3, np.array([1, 0])) assert -x == Var(-3, np.array([-1, 0]))
def test_arcsin(): x = Var(0.5, np.array([1])) y = 0.5 assert Var.arcsin(x) == Var(pytest.approx(0.5235987755982988), np.array([pytest.approx(1.15470054)])) assert Var.arcsin(y) == np.arcsin(y)
def test_equal(): x = Var(3, np.array([1, 0])) z = Var(3, np.array([1, 0])) y = 2 assert x == z assert (x == y) == False
def test_arccos(): x = Var(0.5, np.array([1])) y = 0.5 assert Var.arccos(x) == Var(pytest.approx(1.0471975511965976), np.array([pytest.approx(-1.15470054)])) assert Var.arccos(y) == np.arccos(y)
from EasyDiff.ad import AD from EasyDiff.var import Var from EasyDiff.rev_var import Rev_Var from EasyDiff.ad import AD_Mode import numpy as np # test forward mode. # give it a function of your choice func = lambda x, y: Var.log(x)**Var.sin(y) # give the initial values to take the derivatives at ad = AD(vals=np.array([2, 2]), ders=np.array([1, 1]), mode=AD_Mode.FORWARD) # calculate and print the derivatives print("Var.log(x) ** Var.sin(y): {}".format(vars(ad.auto_diff(func)))) # test reverse mode. func = lambda x, y: Rev_Var.log(x)**Rev_Var.sin(y) ad = AD(vals=np.array([2, 2]), ders=np.array([1, 1]), mode=AD_Mode.REVERSE) print("Rev_Var.log(x) ** Rev_Var.sin(y): {}".format(vars(ad.auto_diff(func))))
def test_arctan(): x = Var(0.5, np.array([1])) y = 0.5 assert Var.arctan(x) == Var(pytest.approx(0.46364760900080615), np.array([pytest.approx(0.8)])) assert Var.arctan(y) == np.arctan(y)