def reference_func(): func.__globals__['np'] = ag_np if preserve_result: val, gradval = ag_value_and_grad(func)(*deepcopy(args)) return gradval, val else: return ag_grad(func)(*deepcopy(args))
def reference_func(): func.__globals__['np'] = ag_np if preserve_result: # Note: ag_value_and_grad returns (val, grad) but we need (grad, val) val, gradval = ag_value_and_grad(func)(*deepcopy(args)) return gradval, val else: return ag_grad(func)(*deepcopy(args))
def test_nested_dict(motion, optimized): p = dict(i=dict(j=3.0, k=4.0)) func = nested_dict df = tangent.grad(func, motion=motion, optimized=optimized, verbose=True) dx = df(p) df_ag = ag_grad(func) dx_ag = df_ag(p) for k in p['i']: assert np.allclose(dx['i'][k], dx_ag['i'][k])
def test_logistic_regression(motion, optimized): func = logistic_regression w = np.random.randn(3, 5) b = np.random.randn(5) input_ = np.random.rand(3) label = np.zeros(5) label[1] = 1 func.__globals__['np'] = np df = tangent.grad(func, wrt=(2, 3), motion=motion, optimized=optimized, verbose=True) dw, db = df(input_, label, w, b) func.__globals__['np'] = ag_np ag_dw = ag_grad(func, argnum=2)(input_, label, w, b) ag_db = ag_grad(func, argnum=3)(input_, label, w, b) assert np.allclose(ag_dw, dw) assert np.allclose(ag_db, db)
def test_inlining_contextmanager(motion, optimized, a): func = inlining_contextmanager func = tangent.tangent(func) func.__globals__['np'] = np df = tangent.grad(func, motion=motion, optimized=optimized, verbose=True) dx = df(a) func.__globals__['np'] = ag_np df_ag = ag_grad(func) df_ag(a) assert np.allclose(dx, 2.9 * a**2)
def test_nested_dict(motion, optimized): p = dict(i=dict(j=3.0, k=4.0)) func = nested_dict df = tangent.autodiff( func, motion=motion, optimized=optimized, verbose=True, input_derivative=INPUT_DERIVATIVE.DefaultOne) dx = df(p) df_ag = ag_grad(func) dx_ag = df_ag(p) for k in p['i']: assert np.allclose(dx['i'][k], dx_ag['i'][k])
def test_inlining_contextmanager(motion, optimized, a): func = inlining_contextmanager func = tangent.tangent(func) func.__globals__['np'] = np df = tangent.autodiff( func, motion=motion, optimized=optimized, verbose=True, input_derivative=INPUT_DERIVATIVE.DefaultOne) dx = df(a) func.__globals__['np'] = ag_np df_ag = ag_grad(func) df_ag(a) assert np.allclose(dx, 2.9 * a**2)
def _setdiff(self): self._subgradient = ag_grad(self.eval) self._jacobian = ag_jacobian(self.eval) self._hessian = ag_hessian(self.eval)
def _setbatchdiff(self): self._batch_subgradient = ag_grad(self._batch_eval) self._batch_jacobian = ag_jacobian(self._batch_eval) self._batch_hessian = ag_hessian(self._batch_eval)
def reference_func(): func.__globals__['np'] = ag_np return ag_grad(ag_grad(func))(*args)