def test_expm(self): def f(x): x = x.reshape((2,2)) return sum(expm(x)) x = numpy.random.random(2*2) # forward mode ax = UTPM.init_jacobian(x) ay = f(ax) g1 = UTPM.extract_jacobian(ay) # reverse mode cg = CGraph() ax = Function(x) ay = f(ax) cg.independentFunctionList = [ax] cg.dependentFunctionList = [ay] g2 = cg.gradient(x) assert_array_almost_equal(g1, g2)
def test_tracer_on_mixed_utpm_ndarray_mul(self): D, P = 1, 1 A = numpy.arange(2 * 2, dtype=float).reshape(2, 2) x = UTPM(numpy.zeros((D, P, 2, 2))) def f(x): return sum(A * x) cg = CGraph() ax = Function(x) ay = f(ax) cg.independentFunctionList = [ax] cg.dependentFunctionList = [ay] assert_array_almost_equal(A, cg.gradient(x))
def test_expm(self): def f(x): x = x.reshape((2, 2)) return sum(expm(x)) x = numpy.random.random(2 * 2) # forward mode ax = UTPM.init_jacobian(x) ay = f(ax) g1 = UTPM.extract_jacobian(ay) # reverse mode cg = CGraph() ax = Function(x) ay = f(ax) cg.independentFunctionList = [ax] cg.dependentFunctionList = [ay] g2 = cg.gradient(x) assert_array_almost_equal(g1, g2)
""" This example shows that most computations can be performed by numpy functions on arrays of UTPM objects. Just bear in mind that is much faster use UTPM instances of matrices than numpy.ndarrays with UTPM elements. """ import numpy, os from algopy import CGraph, Function, UTPM, dot, qr, eigh, inv N, D, P = 2, 2, 1 cg = CGraph() x = numpy.array([Function(UTPM(numpy.random.rand(*(D, P)))) for n in range(N)]) A = numpy.outer(x, x) A = numpy.exp(A) y = numpy.dot(A, x) cg.independentFunctionList = list(x) cg.dependentFunctionList = list(y) cg.plot(os.path.join(os.path.dirname(__file__), 'numpy_dot_graph.svg'))
# N number of cols of A D,M,N = 2,3,3 P = M*N # generate badly conditioned matrix A A = UTPM(numpy.zeros((D,P,M,N))) A.data[0,:] = numpy.random.rand(*(M,N)) for m in range(M): for n in range(N): p = m*N + n A.data[1,p,m,n] = 1. cg = CGraph() A = Function(A) y = trace(inv(A)) cg.trace_off() cg.independentFunctionList = [A] cg.dependentFunctionList = [y] ybar = y.x.zeros_like() ybar.data[0,:] = 1. cg.pullback([ybar]) # check gradient g_forward = numpy.zeros(N*N) g_reverse = numpy.zeros(N*N)
def eval_covariance_matrix_qr(J1, J2): M,N = J1.shape K,N = J2.shape Q,R = qr_full(J2.T) Q2 = Q[:,K:].T J1_tilde = dot(J1,Q2.T) Q,R = qr(J1_tilde) V = solve(R.T, Q2) return dot(V.T,V) # dimensions of the involved matrices D,P,M,N,K,Nx = 2,1,5,3,1,1 # trace the function evaluation of METHOD 1: nullspace method cg1 = CGraph() J1 = Function(UTPM(numpy.random.rand(*(D,P,M,N)))) J2 = Function(UTPM(numpy.random.rand(*(D,P,K,N)))) C = eval_covariance_matrix_qr(J1, J2) y = C[0,0] cg1.trace_off() cg1.independentFunctionList = [J1, J2] cg1.dependentFunctionList = [y] print('covariance matrix: C =\n',C) # trace the function evaluation of METHOD 2: naive method (potentially numerically unstable) cg2 = CGraph() J1 = Function(J1.x) J2 = Function(J2.x) C2 = eval_covariance_matrix_naive(J1, J2) y = C2[0,0]
import numpy import algopy from algopy import CGraph, UTPM, Function def eval_g(x, y): """ some vector-valued function """ retval = algopy.zeros(3, dtype=x) retval[0] = algopy.sin(x**2 + y) retval[1] = algopy.cos(x+y) - x retval[2] = algopy.sin(x)**2 + algopy.cos(x)**2 return retval # trace the function evaluation # and store the computational graph in cg cg = CGraph() ax = 3. ay = 5. fx = Function(ax) fy = Function(ay) fz = eval_g(fx, fy) cg.independentFunctionList = [fx, fy] cg.dependentFunctionList = [fz] # compute Taylor series # # Jx( 1. + 2.*t + 3.*t**2 + 4.*t**3 + 5.*t**5, # 6. + 7.*t + 8.*t**2 + 9.*t**3 + 10.*t**5 ) # Jy( 1. + 2.*t + 3.*t**2 + 4.*t**3 + 5.*t**5, # 6. + 7.*t + 8.*t**2 + 9.*t**3 + 10.*t**5 ) # # where
from algopy import CGraph, Function cg = CGraph() cg.trace_on() x = Function(1) y = Function(3) z = x * y + x cg.trace_off() cg.independentFunctionList = [x,y] cg.dependentFunctionList = [z] print cg cg.plot('example_tracer_cgraph.png')
""" import numpy from algopy import CGraph, Function, UTPM, dot, qr, eigh, inv, solve # first order derivatives, one directional derivative # D - 1 is the degree of the Taylor polynomial # P directional derivatives at once # M number of rows of J1 # N number of cols of J1 # K number of rows of J2 (must be smaller than N) D,P,M,N,K,Nx = 2,1,100,3,1,1 # METHOD 1: nullspace method cg1 = CGraph() J1 = Function(UTPM(numpy.random.rand(*(D,P,M,N)))) J2 = Function(UTPM(numpy.random.rand(*(D,P,K,N)))) Q,R = Function.qr_full(J2.T) Q2 = Q[:,K:].T J1_tilde = dot(J1,Q2.T) Q,R = qr(J1_tilde) V = solve(R.T, Q2) C = dot(V.T,V) cg1.trace_off() cg1.independentFunctionList = [J1, J2]
# create an UTPM instance D,N,M = 2,3,2 P = 2*N x = UTPM(numpy.zeros((D,P,2*N,1))) x.data[0,:] = numpy.random.rand(2*N,1) x.data[1,:,:,0] = numpy.eye(P) y = x[N:] x = x[:N] # wrap the UTPM instance in a Function instance to trace all operations # that have x as an argument # create a CGraph instance that to store the computational trace cg = CGraph().trace_on() x = Function(x) y = Function(y) z = f(x,y) cg.trace_off() # define dependent and independent variables in the computational procedure cg.independentFunctionList = [x,y] cg.dependentFunctionList = [z] # Since the UTPM instrance is wrapped in a Function instance we have to access it # by y.x. That means the Jacobian is grad1 = z.x.data[1,:,0] print 'forward gradient g(x) = \n', grad1
def f(x,y): return dot(x,y) - x*(x-y) We want to compute the Hessian of that function. """ import numpy from algopy import CGraph, Function, UTPM, dot, qr, eigh, inv, zeros def f(x,N): return dot(x[:N],x[N:])*x[N:] - x[:N]*(x[:N]-x[N:]) # create a CGraph instance that to store the computational trace cg = CGraph() # create an UTPM instance D,N,M = 2,3,2 P = N A = UTPM(numpy.zeros((D,P,M,N))) x = UTPM(numpy.zeros((D,P,N,1))) x.data[0,:] = numpy.random.rand(N,1) A.data[0,:] = numpy.random.rand(M,N) x.data[1,:,:,0] = numpy.eye(P) x = Function(x)
sigma = 1.2 N = 35 x = numpy.random.normal(actual_mu, sigma, size = N) mu = UTPM([[3.5],[1],[0]]) #unknown variable print 'function evaluation =\n',logp(x,3.5,sigma) # forward mode with ALGOPY utp = logp(x, mu, sigma).data[:,0] print 'function evaluation = %f\n1st directional derivative = %f\n2nd directional derivative = %f'%(utp[0], 1.*utp[1], 2.*utp[2]) # finite differences solution: print 'finite differences derivative =\n',(logp(x,3.5+10**-8,sigma) - logp(x, 3.5, sigma))/10**-8 # trace function evaluation cg = CGraph() mu = Function(UTPM([[3.5],[1],[0]])) #unknown variable out = logp(x, mu, sigma) cg.trace_off() cg.independentFunctionList = [mu] cg.dependentFunctionList = [out] cg.plot(os.path.join(os.path.dirname(os.path.realpath(__file__)),'posterior_log_probability_cgraph.png')) # reverse mode with ALGOPY outbar = UTPM([[1.],[0],[0]]) cg.pullback([outbar]) gradient = mu.xbar.data[0,0] Hess_vec = mu.xbar.data[1,0] print 'gradient = ', gradient
return dot(x,y) - x*(x-y) We want to compute the Hessian of that function. """ import numpy from algopy import CGraph, Function, UTPM, dot, qr, eigh, inv, zeros def f(x, N): return dot(x[:N], x[N:]) * x[N:] - x[:N] * (x[:N] - x[N:]) # create a CGraph instance that to store the computational trace cg = CGraph() # create an UTPM instance D, N, M = 2, 3, 2 P = N A = UTPM(numpy.zeros((D, P, M, N))) x = UTPM(numpy.zeros((D, P, N, 1))) x.data[0, :] = numpy.random.rand(N, 1) A.data[0, :] = numpy.random.rand(M, N) x.data[1, :, :, 0] = numpy.eye(P) x = Function(x) A = Function(A)
matrices can be used. """ import numpy from algopy import CGraph, Function, UTPM, dot, qr, eigh, inv, solve # first order derivatives, one directional derivative # D - 1 is the degree of the Taylor polynomial # P directional derivatives at once # M number of rows of J1 # N number of cols of J1 # K number of rows of J2 (must be smaller than N) D, P, M, N, K, Nx = 2, 1, 100, 3, 1, 1 # METHOD 1: nullspace method cg1 = CGraph() J1 = Function(UTPM(numpy.random.rand(*(D, P, M, N)))) J2 = Function(UTPM(numpy.random.rand(*(D, P, K, N)))) Q, R = Function.qr_full(J2.T) Q2 = Q[:, K:].T J1_tilde = dot(J1, Q2.T) Q, R = qr(J1_tilde) V = solve(R.T, Q2) C = dot(V.T, V) cg1.trace_off() cg1.independentFunctionList = [J1, J2] cg1.dependentFunctionList = [C]
""" This example shows that most computations can be performed by numpy functions on arrays of UTPM objects. Just bear in mind that is much faster use UTPM instances of matrices than numpy.ndarrays with UTPM elements. """ import numpy, os from algopy import CGraph, Function, UTPM, dot, qr, eigh, inv N,D,P = 2,2,1 cg = CGraph() x = numpy.array([ Function(UTPM(numpy.random.rand(*(D,P)))) for n in range(N)]) A = numpy.outer(x,x) A = numpy.exp(A) y = numpy.dot(A,x) cg.independentFunctionList = list(x) cg.dependentFunctionList = list(y) cg.plot(os.path.join(os.path.dirname(__file__),'numpy_dot_graph.svg'))
mu = UTPM([[3.5], [1], [0]]) #unknown variable print('function evaluation =\n', logp(x, 3.5, sigma)) # forward mode with ALGOPY utp = logp(x, mu, sigma).data[:, 0] print( 'function evaluation = %f\n1st directional derivative = %f\n2nd directional derivative = %f' % (utp[0], 1. * utp[1], 2. * utp[2])) # finite differences solution: print('finite differences derivative =\n', (logp(x, 3.5 + 10**-8, sigma) - logp(x, 3.5, sigma)) / 10**-8) # trace function evaluation cg = CGraph() mu = Function(UTPM([[3.5], [1], [0]])) #unknown variable out = logp(x, mu, sigma) cg.trace_off() cg.independentFunctionList = [mu] cg.dependentFunctionList = [out] cg.plot( os.path.join(os.path.dirname(os.path.realpath(__file__)), 'posterior_log_probability_cgraph.png')) # reverse mode with ALGOPY outbar = UTPM([[1.], [0], [0]]) cg.pullback([outbar]) gradient = mu.xbar.data[0, 0] Hess_vec = mu.xbar.data[1, 0]
# M number of rows of A # N number of cols of A D, M, N = 2, 3, 3 P = M * N # generate badly conditioned matrix A A = UTPM(numpy.zeros((D, P, M, N))) A.data[0, :] = numpy.random.rand(*(M, N)) for m in range(M): for n in range(N): p = m * N + n A.data[1, p, m, n] = 1. cg = CGraph() A = Function(A) y = trace(inv(A)) cg.trace_off() cg.independentFunctionList = [A] cg.dependentFunctionList = [y] ybar = y.x.zeros_like() ybar.data[0, :] = 1. cg.pullback([ybar]) # check gradient g_forward = numpy.zeros(N * N) g_reverse = numpy.zeros(N * N)
from algopy import CGraph, Function cg = CGraph() cg.trace_on() x = Function(1) y = Function(3) z = x * y + x cg.trace_off() cg.independentFunctionList = [x, y] cg.dependentFunctionList = [z] print(cg) cg.plot('example_tracer_cgraph.png')
def eval_covariance_matrix_qr(J1, J2): M, N = J1.shape K, N = J2.shape Q, R = qr_full(J2.T) Q2 = Q[:, K:].T J1_tilde = dot(J1, Q2.T) Q, R = qr(J1_tilde) V = solve(R.T, Q2) return dot(V.T, V) # dimensions of the involved matrices D, P, M, N, K, Nx = 2, 1, 5, 3, 1, 1 # trace the function evaluation of METHOD 1: nullspace method cg1 = CGraph() J1 = Function(UTPM(numpy.random.rand(*(D, P, M, N)))) J2 = Function(UTPM(numpy.random.rand(*(D, P, K, N)))) C = eval_covariance_matrix_qr(J1, J2) y = C[0, 0] cg1.trace_off() cg1.independentFunctionList = [J1, J2] cg1.dependentFunctionList = [y] print('covariance matrix: C =\n', C) # trace the function evaluation of METHOD 2: naive method (potentially numerically unstable) cg2 = CGraph() J1 = Function(J1.x) J2 = Function(J2.x) C2 = eval_covariance_matrix_naive(J1, J2) y = C2[0, 0]
c = 0.4 Msat = 1.35e5 # Eingangsgrößen r = 2.0e-2 # m, Radius i_hat = 20.0 # A, Strom f = 1000.0 # Hz, Frequenz n = 3 # Anzahl Perioden n_p = 512 # Datenpunkte pro Periode t = np.arange(n * n_p) / (n_p * f) # Zeitvektor current = i_hat * (np.sin(2 * np.pi * f * t) + 0.7 * np.sin(6 * np.pi * f * t + 1)) # Stromvorgabe H = current / (2 * np.pi * r) # Resultierende Feldvorgabe graph = CGraph() graph.trace_on() x = Function([alpha, a, k, c, Msat]) # Parametervektor p = {'alpha': x[0], 'a': x[1], 'k': x[2], 'c': x[3], 'm_sat': x[4]} model = JilesAthertonModel.from_dict(p) M = model.integrate_rk4(t, H) H = H[::2] t = t[::2] B = mu_0 * (H + M) dB_dt = np.zeros(np.size(B)) new = np.append([0.0], (B[1:] - B[0:-1]) / (t[1:] - t[0:-1]))
# create an UTPM instance D, N, M = 2, 3, 2 P = 2 * N x = UTPM(numpy.zeros((D, P, 2 * N, 1))) x.data[0, :] = numpy.random.rand(2 * N, 1) x.data[1, :, :, 0] = numpy.eye(P) y = x[N:] x = x[:N] # wrap the UTPM instance in a Function instance to trace all operations # that have x as an argument # create a CGraph instance that to store the computational trace cg = CGraph().trace_on() x = Function(x) y = Function(y) z = f(x, y) cg.trace_off() # define dependent and independent variables in the computational procedure cg.independentFunctionList = [x, y] cg.dependentFunctionList = [z] # Since the UTPM instrance is wrapped in a Function instance we have to access it # by y.x. That means the Jacobian is grad1 = z.x.data[1, :, 0] print('forward gradient g(x) = \n', grad1)
import algopy from algopy import CGraph, UTPM, Function def eval_g(x, y): """ some vector-valued function """ retval = algopy.zeros(3, dtype=x) retval[0] = algopy.sin(x**2 + y) retval[1] = algopy.cos(x + y) - x retval[2] = algopy.sin(x)**2 + algopy.cos(x)**2 return retval # trace the function evaluation # and store the computational graph in cg cg = CGraph() ax = 3. ay = 5. fx = Function(ax) fy = Function(ay) fz = eval_g(fx, fy) cg.independentFunctionList = [fx, fy] cg.dependentFunctionList = [fz] # compute Taylor series # # Jx( 1. + 2.*t + 3.*t**2 + 4.*t**3 + 5.*t**5, # 6. + 7.*t + 8.*t**2 + 9.*t**3 + 10.*t**5 ) # Jy( 1. + 2.*t + 3.*t**2 + 4.*t**3 + 5.*t**5, # 6. + 7.*t + 8.*t**2 + 9.*t**3 + 10.*t**5 ) #