def test_tracer_on_mixed_utpm_ndarray_mul(self): D, P = 1, 1 A = numpy.arange(2 * 2, dtype=float).reshape(2, 2) x = UTPM(numpy.zeros((D, P, 2, 2))) def f(x): return sum(A * x) cg = CGraph() ax = Function(x) ay = f(ax) cg.independentFunctionList = [ax] cg.dependentFunctionList = [ay] assert_array_almost_equal(A, cg.gradient(x))
def test_expm(self): def f(x): x = x.reshape((2, 2)) return sum(expm(x)) x = numpy.random.random(2 * 2) # forward mode ax = UTPM.init_jacobian(x) ay = f(ax) g1 = UTPM.extract_jacobian(ay) # reverse mode cg = CGraph() ax = Function(x) ay = f(ax) cg.independentFunctionList = [ax] cg.dependentFunctionList = [ay] g2 = cg.gradient(x) assert_array_almost_equal(g1, g2)
# create an UTPM instance D, N, M = 2, 3, 2 P = 2 * N x = UTPM(numpy.zeros((D, P, 2 * N, 1))) x.data[0, :] = numpy.random.rand(2 * N, 1) x.data[1, :, :, 0] = numpy.eye(P) y = x[N:] x = x[:N] # wrap the UTPM instance in a Function instance to trace all operations # that have x as an argument # create a CGraph instance that to store the computational trace cg = CGraph().trace_on() x = Function(x) y = Function(y) z = f(x, y) cg.trace_off() # define dependent and independent variables in the computational procedure cg.independentFunctionList = [x, y] cg.dependentFunctionList = [z] # Since the UTPM instrance is wrapped in a Function instance we have to access it # by y.x. That means the Jacobian is grad1 = z.x.data[1, :, 0] print('forward gradient g(x) = \n', grad1)
return dot(x,y) - x*(x-y) We want to compute the Hessian of that function. """ import numpy from algopy import CGraph, Function, UTPM, dot, qr, eigh, inv, zeros def f(x, N): return dot(x[:N], x[N:]) * x[N:] - x[:N] * (x[:N] - x[N:]) # create a CGraph instance that to store the computational trace cg = CGraph() # create an UTPM instance D, N, M = 2, 3, 2 P = N A = UTPM(numpy.zeros((D, P, M, N))) x = UTPM(numpy.zeros((D, P, N, 1))) x.data[0, :] = numpy.random.rand(N, 1) A.data[0, :] = numpy.random.rand(M, N) x.data[1, :, :, 0] = numpy.eye(P) x = Function(x) A = Function(A)
def eval_covariance_matrix_qr(J1, J2): M, N = J1.shape K, N = J2.shape Q, R = qr_full(J2.T) Q2 = Q[:, K:].T J1_tilde = dot(J1, Q2.T) Q, R = qr(J1_tilde) V = solve(R.T, Q2) return dot(V.T, V) # dimensions of the involved matrices D, P, M, N, K, Nx = 2, 1, 5, 3, 1, 1 # trace the function evaluation of METHOD 1: nullspace method cg1 = CGraph() J1 = Function(UTPM(numpy.random.rand(*(D, P, M, N)))) J2 = Function(UTPM(numpy.random.rand(*(D, P, K, N)))) C = eval_covariance_matrix_qr(J1, J2) y = C[0, 0] cg1.trace_off() cg1.independentFunctionList = [J1, J2] cg1.dependentFunctionList = [y] print('covariance matrix: C =\n', C) # trace the function evaluation of METHOD 2: naive method (potentially numerically unstable) cg2 = CGraph() J1 = Function(J1.x) J2 = Function(J2.x) C2 = eval_covariance_matrix_naive(J1, J2) y = C2[0, 0]
c = 0.4 Msat = 1.35e5 # Eingangsgrößen r = 2.0e-2 # m, Radius i_hat = 20.0 # A, Strom f = 1000.0 # Hz, Frequenz n = 3 # Anzahl Perioden n_p = 512 # Datenpunkte pro Periode t = np.arange(n * n_p) / (n_p * f) # Zeitvektor current = i_hat * (np.sin(2 * np.pi * f * t) + 0.7 * np.sin(6 * np.pi * f * t + 1)) # Stromvorgabe H = current / (2 * np.pi * r) # Resultierende Feldvorgabe graph = CGraph() graph.trace_on() x = Function([alpha, a, k, c, Msat]) # Parametervektor p = {'alpha': x[0], 'a': x[1], 'k': x[2], 'c': x[3], 'm_sat': x[4]} model = JilesAthertonModel.from_dict(p) M = model.integrate_rk4(t, H) H = H[::2] t = t[::2] B = mu_0 * (H + M) dB_dt = np.zeros(np.size(B)) new = np.append([0.0], (B[1:] - B[0:-1]) / (t[1:] - t[0:-1]))