def _initialize_reverse(self, x): # x = np.asarray(x, dtype=float) # self.x = x.copy() # STEP 1: trace the function evaluation cg = algopy.CGraph() if True: x = algopy.Function(x) # x = [x] else: x = np.array([algopy.Function(x[i]) for i in range(len(x))]) y = self.fun(x) cg.trace_off() cg.independentFunctionList = [x] cg.dependentFunctionList = [y] self._cg = cg
def d_f(x): """function""" return x[0] * x[1] * x[2] + exp(x[0]) * x[1] # x[differnce] # forward AD without building a computational graph x = UTPM.init_jacobian([3, 5, 7]) y = d_f(x) algopy_jacobian = UTPM.extract_jacobian(y) print('jacobian = ', algopy_jacobian) # reverse mode using a computational graph # Step 1/2 - trace the evaluation function cg = algopy.CGraph() x = algopy.Function([1, 2, 3]) y = d_f(x) cg.trace_off() cg.independentFunctionList = [x] cg.dependentFunctionList = [y] # Step 2/2 - use the graph to evaluate derivatives print('gradient =', cg.gradient([3., 5, 7])) print( 'Jacobian =', cg.jacobian([3., 5, 7]) ) # a square matrix of first order partial derivatives, the derivative of f at all possible points wrt x print( 'Hessian =', cg.hessian([3., 5., 7.]) ) # a matrix of second order partial derivatives of the function in question (square), can use optimisation for local min/max/saddle of a critical value. print('Hessian vector product =', cg.hess_vec([3., 5., 7.], [4, 5, 6]))
def cal_algopy_cgraph(eval_f, var_num): cg = algopy.CGraph() x = algopy.Function(range(var_num)) y = eval_f(x) cg.trace_off() cg.independentFunctionList = [x] cg.dependentFunctionList = [y] return cg
def test_function(x): x = numpy.array([1., 2., 3.]) cg = algopy.CGraph() fx = algopy.Function(x) fy = 1. / fx cg.trace_off() cg.independentFunctionList = [fx] cg.dependentFunctionList = [fy]
def trace_eval_g(self, x): cg2 = algopy.CGraph() x = algopy.Function(x) y = self.eval_g(x) cg2.trace_off() cg2.independentFunctionList = [x] cg2.dependentFunctionList = [y] self.cg2 = cg2
def __enter__(self): if self._entered: raise RuntimeError("cannot enter %r twice" % self) self._entered = True cg = algopy.CGraph() args = [algopy.Function(arg) for arg in self._args] cg.independentFunctionList = args self._cg = cg return (cg, ) + tuple(args)
def _trace_cons(self, x): "Trace the constraint function evaluation." cg = algopy.CGraph() x = algopy.Function(x) y = self.cons(x) cg.trace_off() cg.independentFunctionList = [x] cg.dependentFunctionList = [y] self._cg_cons = cg
def _trace_obj(self, x): "Trace the objective function evaluation." cg = algopy.CGraph() x = algopy.Function(x) y = self.obj(x) cg.trace_off() cg.independentFunctionList = [x] cg.dependentFunctionList = [y] self._cg_obj = cg
def _trace_cons(self, x): """Trace the constraints evaluation.""" if self._cg_cons is not None or self.m == 0: return cg = algopy.CGraph() x = algopy.Function(x) y = self.cons(x) cg.trace_off() cg.independentFunctionList = [x] cg.dependentFunctionList = [y] self._cg_cons = cg
def gauss_algopy(eval_f, x0, tol=10e-5): x0 = np.array(x0, dtype=float) cg = algopy.CGraph() x = algopy.Function(x0) y = eval_f(x) cg.trace_off() cg.independentFunctionList = [x] cg.dependentFunctionList = [y] sol = gauss_solver(eval_f, cg.jacobian, x0, tol) return sol
def __init__(self, f, x, test='f'): self.f = f self.x = x.copy() cg = algopy.CGraph() x = np.array([algopy.Function(x[i]) for i in range(len(x))]) y = f(x) # print 'y=',y cg.trace_off() cg.independentFunctionList = x cg.dependentFunctionList = [y] self.cg = cg
def __init__(self, f, x, test='f'): self.f = f self.x = x.copy() if test != 'fg' and test != 'fh': cg = algopy.CGraph() x = algopy.Function(x) y = f(x) cg.trace_off() cg.independentFunctionList = [x] cg.dependentFunctionList = [y] self.cg = cg
def computational_graph(self, x, *args, **kwds): if self._computational_graph is None: # STEP 1: trace the function evaluation cg = algopy.CGraph() x = algopy.Function(x) y = self.f(x, *args, **kwds) # y = UTPM.as_utpm(z) cg.trace_off() cg.independentFunctionList = [x] cg.dependentFunctionList = [y] self._computational_graph = cg return self._computational_graph
def computational_graph(self, x, *args, **kwds): if self._computational_graph is None: # STEP 1: trace the function evaluation cg = algopy.CGraph() tmp = algopy.Function(x) y = self.fun(tmp, *args, **kwds) cg.trace_off() cg.independentFunctionList = [tmp] cg.dependentFunctionList = [y] self._computational_graph = cg return self._computational_graph
def eval_grad_reverse_mode(f, x): cg = algopy.CGraph() fx = algopy.Function(x) fy = f(x) cg.trace_off() cg.indepndentFunctionList = [fx] cg.dependentFunctionList = [fy] #cg.plot('omg.png') #result = cg.gradient([x]) #print 'reverse mode gradient:' result = cg.jacobian(fx) print 'reverse mode jacobian:' print result return result
def _trace_lag(self, x, z): """Trace the Lagrangian evaluation.""" if self._cg_lag is not None: return self._trace_obj(x) self._trace_cons(x) unconstrained = self.m == 0 and self.nbounds == 0 if unconstrained: self._cg_lag = self._cg_obj return cg = algopy.CGraph() xz = np.concatenate((x, z)) xz = algopy.Function(xz) l = self.lag(xz[:self.nvar], xz[self.nvar:]) cg.independentFunctionList = [xz] cg.dependentFunctionList = [l] self._cg_lag = cg
# right a = np.linspace(1, 10, 1000) # plt.plot(a, f([a])) # xopt = spo.brenth(lambda x: gradient(x), 0, 20, xtol = 10e-7, full_output = True) h = gradient(a) plt.plot(a, h) plt.plot(a, f([a])) # print('jacobian = ',algopy_jacobian) # reverse mode using a computational graph # ---------------------------------------- # STEP 1: trace the function evaluation cg = algopy.CGraph() x = algopy.Function([1, 2]) y = eval_f(x) cg.trace_off() cg.independentFunctionList = [x] cg.dependentFunctionList = [y] # STEP 2: use the computational graph to evaluate derivatives print('gradient =', cg.gradient([10., 10])) print('Jacobian =', cg.jacobian([10., 10])) print('Hessian =', cg.hessian([10., 10.])) print('Hessian vector product =', cg.hess_vec([3., 5.], [4, 5])) print("")
def eval_f(x): """ some function """ return x[0] * x[1] * x[2] + exp(x[0]) * x[1] # forward mode without building the computational graph # ----------------------------------------------------- x = UTPM.init_jacobian([3, 5, 7]) y = eval_f(x) algopy_jacobian = UTPM.extract_jacobian(y) print('jacobian = ', algopy_jacobian) # reverse mode using a computational graph # ---------------------------------------- # STEP 1: trace the function evaluation cg = algopy.CGraph() x = algopy.Function([1, 2, 3]) y = eval_f(x) cg.trace_off() cg.independentFunctionList = [x] cg.dependentFunctionList = [y] # STEP 2: use the computational graph to evaluate derivatives print('gradient =', cg.gradient([3., 5, 7])) print('Jacobian =', cg.jacobian([3., 5, 7])) print('Hessian =', cg.hessian([3., 5., 7.])) print('Hessian vector product =', cg.hess_vec([3., 5., 7.], [4, 5, 6]))
# INITIAL VALUES M = 30 h = 1. / M u = numpy.zeros((M, M), dtype=float) u[0, :] = [numpy.sin(numpy.pi * j * h / 2.) for j in range(M)] u[-1, :] = [ numpy.exp(numpy.pi / 2) * numpy.sin(numpy.pi * j * h / 2.) for j in range(M) ] u[:, 0] = 0 u[:, -1] = [numpy.exp(i * h * numpy.pi / 2.) for i in range(M)] # trace the function evaluation and store it in cg cg = algopy.CGraph() Fu = algopy.Function(u) Fy = O_tilde(Fu) cg.trace_off() cg.independentFunctionList = [Fu] cg.dependentFunctionList = [Fy] def dO_tilde(u): # use ALGOPY to compute the gradient g = cg.gradient([u])[0] # on the edge the analytical solution is fixed # so search direction must be zero on the boundary g[:, 0] = 0 g[0, :] = 0