def test_expm(self): def f(x): x = x.reshape((2,2)) return sum(expm(x)) x = numpy.random.random(2*2) # forward mode ax = UTPM.init_jacobian(x) ay = f(ax) g1 = UTPM.extract_jacobian(ay) # reverse mode cg = CGraph() ax = Function(x) ay = f(ax) cg.independentFunctionList = [ax] cg.dependentFunctionList = [ay] g2 = cg.gradient(x) assert_array_almost_equal(g1, g2)
def test_tracer_on_mixed_utpm_ndarray_mul(self): D, P = 1, 1 A = numpy.arange(2 * 2, dtype=float).reshape(2, 2) x = UTPM(numpy.zeros((D, P, 2, 2))) def f(x): return sum(A * x) cg = CGraph() ax = Function(x) ay = f(ax) cg.independentFunctionList = [ax] cg.dependentFunctionList = [ay] assert_array_almost_equal(A, cg.gradient(x))
def test_expm(self): def f(x): x = x.reshape((2, 2)) return sum(expm(x)) x = numpy.random.random(2 * 2) # forward mode ax = UTPM.init_jacobian(x) ay = f(ax) g1 = UTPM.extract_jacobian(ay) # reverse mode cg = CGraph() ax = Function(x) ay = f(ax) cg.independentFunctionList = [ax] cg.dependentFunctionList = [ay] g2 = cg.gradient(x) assert_array_almost_equal(g1, g2)
cg1.independentFunctionList = [J1, J2] cg1.dependentFunctionList = [y] print('covariance matrix: C =\n', C) # trace the function evaluation of METHOD 2: naive method (potentially numerically unstable) cg2 = CGraph() J1 = Function(J1.x) J2 = Function(J2.x) C2 = eval_covariance_matrix_naive(J1, J2) y = C2[0, 0] cg2.trace_off() cg2.independentFunctionList = [J1, J2] cg2.dependentFunctionList = [y] print('covariance matrix: C =\n', C2) # check that both algorithms returns the same result print('difference between naive and nullspace method:\n', C - C2) # compute the gradient for another value of J1 and J2 J1 = numpy.random.rand(*(M, N)) J2 = numpy.random.rand(*(K, N)) g1 = cg1.gradient([J1, J2]) g2 = cg2.gradient([J1, J2]) print('naive approach: dy/dJ1 = ', g1[0]) print('naive approach: dy/dJ2 = ', g1[1]) print('nullspace approach: dy/dJ1 = ', g2[0]) print('nullspace approach: dy/dJ2 = ', g2[1])
cg2 = CGraph() J1 = Function(J1.x) J2 = Function(J2.x) C2 = eval_covariance_matrix_naive(J1, J2) y = C2[0,0] cg2.trace_off() cg2.independentFunctionList = [J1, J2] cg2.dependentFunctionList = [y] print('covariance matrix: C =\n',C2) # check that both algorithms returns the same result print('difference between naive and nullspace method:\n',C - C2) # compute the gradient for another value of J1 and J2 J1 = numpy.random.rand(*(M,N)) J2 = numpy.random.rand(*(K,N)) g1 = cg1.gradient([J1,J2]) g2 = cg2.gradient([J1,J2]) print('naive approach: dy/dJ1 = ', g1[0]) print('naive approach: dy/dJ2 = ', g1[1]) print('nullspace approach: dy/dJ1 = ', g2[0]) print('nullspace approach: dy/dJ2 = ', g2[1])
n_p = 512 # Datenpunkte pro Periode t = np.arange(n * n_p) / (n_p * f) # Zeitvektor current = i_hat * (np.sin(2 * np.pi * f * t) + 0.7 * np.sin(6 * np.pi * f * t + 1)) # Stromvorgabe H = current / (2 * np.pi * r) # Resultierende Feldvorgabe graph = CGraph() graph.trace_on() x = Function([alpha, a, k, c, Msat]) # Parametervektor p = {'alpha': x[0], 'a': x[1], 'k': x[2], 'c': x[3], 'm_sat': x[4]} model = JilesAthertonModel.from_dict(p) M = model.integrate_rk4(t, H) H = H[::2] t = t[::2] B = mu_0 * (H + M) dB_dt = np.zeros(np.size(B)) new = np.append([0.0], (B[1:] - B[0:-1]) / (t[1:] - t[0:-1])) P = np.sum(0.5 * H * new) graph.trace_off() graph.independentFunctionList = [x] graph.dependentFunctionList = [P] a = graph.gradient([alpha, a, k, c, Msat]) print(a)