def test_rwms(): path = './tests//data/openqcd_test/' prefix = 'sfqcd' postfix = '.rwms' # sfqcd-1.6: Trajectories instead of confignumbers are printed to file. rwfo = pe.input.openQCD.read_rwms(path, prefix, version='1.6', postfix=postfix) repname = list(rwfo[0].idl.keys())[0] assert(rwfo[0].idl[repname] == range(1, 13)) rwfo = pe.input.openQCD.read_rwms(path, prefix, version='1.6', postfix=postfix, r_start=[1], r_stop=[12]) assert(rwfo[0].idl[repname] == range(1, 13)) rwfo = pe.input.openQCD.read_rwms(path, prefix, version='1.6', postfix=postfix, r_start=[3], r_stop=[8]) assert(rwfo[0].idl[repname] == range(3, 9)) rwfo = pe.input.openQCD.read_rwms(path, prefix, version='1.6', postfix=postfix, r_start=[2], r_stop=[6]) assert(rwfo[0].idl[repname] == range(2, 7)) rwfs = pe.input.openQCD.read_rwms(path, prefix, version='1.6', postfix=postfix, r_start=[1], r_stop=[12], r_step=2) assert(rwfs[0].idl[repname] == range(1, 12, 2)) rwfs = pe.input.openQCD.read_rwms(path, prefix, version='1.6', postfix=postfix, r_start=[2], r_stop=[12], r_step=2) assert(rwfs[0].idl[repname] == range(2, 13, 2)) rwfo = pe.input.openQCD.read_rwms(path, prefix, version='1.6', postfix=postfix) assert((rwfo[0].r_values[repname] + rwfo[0].deltas[repname][1]) == (rwfs[0].r_values[repname] + rwfs[0].deltas[repname][0])) o = pe.pseudo_Obs(1., .01, repname, samples=12) pe.reweight(rwfo[0], [o]) o = pe.pseudo_Obs(1., .01, repname, samples=6) pe.reweight(rwfo[0], [o]) o.idl[repname] = range(2, 13, 2) pe.reweight(rwfo[0], [o]) pe.reweight(rwfs[0], [o]) files = ['openqcd2r1.ms1.dat'] names = ['openqcd2|r1'] # TM with 2 Hasenbusch factors and 2 sources each + RHMC with one source, openQCD 2.0 rwfo = pe.input.openQCD.read_rwms(path, prefix, version='2.0', files=files, names=names) assert(len(rwfo) == 2) assert(rwfo[0].value == 0.9999974970236312) assert(rwfo[1].value == 1.184681251089919) repname = list(rwfo[0].idl.keys())[0] assert(rwfo[0].idl[repname] == range(1, 10)) rwfo = pe.input.openQCD.read_rwms(path, prefix, version='2.0', files=files, names=names, r_start=[1], r_stop=[8], print_err=True) assert(rwfo[0].idl[repname] == range(1, 9)) # t0 prefix = 'openqcd' t0 = pe.input.openQCD.extract_t0(path, prefix, dtr_read=3, xmin=0, spatial_extent=4) files = ['openqcd2r1.ms.dat'] names = ['openqcd2|r1'] t0 = pe.input.openQCD.extract_t0(path, '', dtr_read=3, xmin=0, spatial_extent=4, files=files, names=names, fit_range=2) t0 = pe.input.openQCD.extract_t0(path, prefix, dtr_read=3, xmin=0, spatial_extent=4, r_start=[1]) repname = list(rwfo[0].idl.keys())[0] assert(t0.idl[repname] == range(1, 10)) t0 = pe.input.openQCD.extract_t0(path, prefix, dtr_read=3, xmin=0, spatial_extent=4, r_start=[2], r_stop=[8]) repname = list(rwfo[0].idl.keys())[0] assert(t0.idl[repname] == range(2, 9)) t0 = pe.input.openQCD.extract_t0(path, prefix, dtr_read=3, xmin=0, spatial_extent=4, fit_range=2, plaquette=True, assume_thermalization=True) pe.input.openQCD.extract_t0(path, '', dtr_read=3, xmin=0, spatial_extent=4, files=files, names=names, fit_range=2, plot_fit=True)
def test_reweighting(): my_corr = pe.correlators.Corr( [pe.pseudo_Obs(10, 0.1, 't'), pe.pseudo_Obs(0, 0.05, 't')]) assert my_corr.reweighted is False r_my_corr = my_corr.reweight(pe.pseudo_Obs(1, 0.1, 't')) assert r_my_corr.reweighted is True
def test_function_overloading(): a = pe.pseudo_Obs(17, 2.9, 'e1') b = pe.pseudo_Obs(4, 0.8, 'e1') fs = [ lambda x: x[0] + x[1], lambda x: x[1] + x[0], lambda x: x[0] - x[1], lambda x: x[1] - x[0], lambda x: x[0] * x[1], lambda x: x[1] * x[0], lambda x: x[0] / x[1], lambda x: x[1] / x[0], lambda x: np.exp(x[0]), lambda x: np.sin(x[0]), lambda x: np.cos(x[0]), lambda x: np.tan(x[0]), lambda x: np.log(x[0]), lambda x: np.sqrt(np.abs(x[0])), lambda x: np.sinh(x[0]), lambda x: np.cosh(x[0]), lambda x: np.tanh(x[0]) ] for i, f in enumerate(fs): t1 = f([a, b]) t2 = pe.derived_observable(f, [a, b]) c = t2 - t1 assert c.is_zero() assert np.log(np.exp(b)) == b assert np.exp(np.log(b)) == b assert np.sqrt(b**2) == b assert np.sqrt(b)**2 == b np.arcsin(1 / b) np.arccos(1 / b) np.arctan(1 / b) np.arctanh(1 / b) np.sinc(1 / b)
def test_function_overloading(): corr_content_a = [] corr_content_b = [] for t in range(24): corr_content_a.append( pe.pseudo_Obs(np.random.normal(1e-10, 1e-8), 1e-4, 't')) corr_content_b.append( pe.pseudo_Obs(np.random.normal(1e8, 1e10), 1e7, 't')) corr_a = pe.correlators.Corr(corr_content_a) corr_b = pe.correlators.Corr(corr_content_b) fs = [ lambda x: x[0] + x[1], lambda x: x[1] + x[0], lambda x: x[0] - x[1], lambda x: x[1] - x[0], lambda x: x[0] * x[1], lambda x: x[1] * x[0], lambda x: x[0] / x[1], lambda x: x[1] / x[0], lambda x: np.exp(x[0]), lambda x: np.sin(x[0]), lambda x: np.cos(x[0]), lambda x: np.tan(x[0]), lambda x: np.log(x[0] + 0.1), lambda x: np.sqrt(np.abs(x[0])), lambda x: np.sinh(x[0]), lambda x: np.cosh(x[0]), lambda x: np.tanh(x[0]) ] for i, f in enumerate(fs): t1 = f([corr_a, corr_b]) for o_a, o_b, con in zip(corr_content_a, corr_content_b, t1.content): t2 = f([o_a, o_b]) t2.gamma_method() assert np.isclose(con[0].value, t2.value) assert np.isclose(con[0].dvalue, t2.dvalue) assert np.allclose(con[0].deltas['t'], t2.deltas['t'])
def test_odr_derivatives(n): x = [] y = [] x_err = 0.01 y_err = 0.01 for n in np.arange(1, 9, 2): loc_xvalue = n + np.random.normal(0.0, x_err) x.append(pe.pseudo_Obs(loc_xvalue, x_err, str(n))) y.append( pe.pseudo_Obs((lambda x: x**2 - 1)(loc_xvalue) + np.random.normal(0.0, y_err), y_err, str(n))) def func(a, x): return a[0] + a[1] * x**2 fit1 = pe.fits.odr_fit(x, y, func) tfit = pe.fits.fit_general(x, y, func, base_step=0.1, step_ratio=1.1, num_steps=20) assert np.abs( np.max( np.array(list(fit1[1].deltas.values())) - np.array(list(tfit[1].deltas.values())))) < 10e-8
def test_covariance_symmetry(): value1 = np.random.normal(5, 10) dvalue1 = np.abs(np.random.normal(0, 1)) test_obs1 = pe.pseudo_Obs(value1, dvalue1, 't') test_obs1.gamma_method() value2 = np.random.normal(5, 10) dvalue2 = np.abs(np.random.normal(0, 1)) test_obs2 = pe.pseudo_Obs(value2, dvalue2, 't') test_obs2.gamma_method() cov_ab = pe.covariance(test_obs1, test_obs2) cov_ba = pe.covariance(test_obs2, test_obs1) assert np.abs(cov_ab - cov_ba) <= 10 * np.finfo(np.float64).eps assert np.abs(cov_ab) < test_obs1.dvalue * test_obs2.dvalue * ( 1 + 10 * np.finfo(np.float64).eps) N = 100 arr = np.random.normal(1, .2, size=N) configs = np.ones_like(arr) for i in np.random.uniform(0, len(arr), size=int(.8 * N)): configs[int(i)] = 0 zero_arr = [arr[i] for i in range(len(arr)) if not configs[i] == 0] idx = [i + 1 for i in range(len(configs)) if configs[i] == 1] a = pe.Obs([zero_arr], ['t'], idl=[idx]) a.gamma_method() assert np.isclose(a.dvalue**2, pe.covariance(a, a), atol=100, rtol=1e-4) cov_ab = pe.covariance(test_obs1, a) cov_ba = pe.covariance(a, test_obs1) assert np.abs(cov_ab - cov_ba) <= 10 * np.finfo(np.float64).eps assert np.abs(cov_ab) < test_obs1.dvalue * test_obs2.dvalue * ( 1 + 10 * np.finfo(np.float64).eps)
def test_comparison(): value1 = np.random.normal(0, 100) test_obs1 = pe.pseudo_Obs(value1, 0.1, 't') value2 = np.random.normal(0, 100) test_obs2 = pe.pseudo_Obs(value2, 0.1, 't') assert (value1 > value2) == (test_obs1 > test_obs2) assert (value1 < value2) == (test_obs1 < test_obs2)
def test_r_value_persistence(): def f(a, x): return a[0] + a[1] * x a = pe.pseudo_Obs(1.1, .1, 'a') assert np.isclose(a.value, a.r_values['a']) a_2 = a**2 assert np.isclose(a_2.value, a_2.r_values['a']) b = pe.pseudo_Obs(2.1, .2, 'b') y = [a, b] [o.gamma_method() for o in y] fitp = pe.fits.least_squares([1, 2], y, f) assert np.isclose(fitp[0].value, fitp[0].r_values['a']) assert np.isclose(fitp[0].value, fitp[0].r_values['b']) assert np.isclose(fitp[1].value, fitp[1].r_values['a']) assert np.isclose(fitp[1].value, fitp[1].r_values['b']) fitp = pe.fits.total_least_squares(y, y, f) assert np.isclose(fitp[0].value, fitp[0].r_values['a']) assert np.isclose(fitp[0].value, fitp[0].r_values['b']) assert np.isclose(fitp[1].value, fitp[1].r_values['a']) assert np.isclose(fitp[1].value, fitp[1].r_values['b']) fitp = pe.fits.least_squares([1, 2], y, f, priors=y) assert np.isclose(fitp[0].value, fitp[0].r_values['a']) assert np.isclose(fitp[0].value, fitp[0].r_values['b']) assert np.isclose(fitp[1].value, fitp[1].r_values['a']) assert np.isclose(fitp[1].value, fitp[1].r_values['b'])
def test_json_corr_2d_io(): obs_list = [ np.array([[ pe.pseudo_Obs(1.0 + i, 0.1 * i, 'test'), pe.pseudo_Obs(0.0, 0.1 * i, 'test') ], [ pe.pseudo_Obs(0.0, 0.1 * i, 'test'), pe.pseudo_Obs(1.0 + i, 0.1 * i, 'test') ]]) for i in range(4) ] for tag in [None, "test"]: obs_list[3][0, 1].tag = tag for padding in [0, 1]: for prange in [None, [3, 6]]: my_corr = pe.Corr(obs_list, padding=[padding, padding], prange=prange) my_corr.tag = tag pe.input.json.dump_to_json(my_corr, 'corr') recover = pe.input.json.load_json('corr') os.remove('corr.json.gz') assert np.all([ np.all([o.is_zero() for o in q]) for q in [x.ravel() for x in (my_corr - recover) if x is not None] ]) for index, entry in enumerate(my_corr): if entry is None: assert recover[index] is None assert my_corr.tag == recover.tag assert my_corr.prange == recover.prange
def test_correlate(): my_corr = pe.correlators.Corr( [pe.pseudo_Obs(10, 0.1, 't'), pe.pseudo_Obs(0, 0.05, 't')]) corr1 = my_corr.correlate(my_corr) corr2 = my_corr.correlate(my_corr[0]) with pytest.raises(Exception): corr3 = my_corr.correlate(7.3)
def test_merge_obs_r_values(): a1 = pe.pseudo_Obs(1.1, .1, 'a|1') a2 = pe.pseudo_Obs(1.2, .1, 'a|2') a = pe.merge_obs([a1, a2]) assert np.isclose(a.r_values['a|1'], a1.value) assert np.isclose(a.r_values['a|2'], a2.value) assert np.isclose(a.value, np.mean([a1.value, a2.value]))
def test_plateau(): my_corr = pe.correlators.Corr([ pe.pseudo_Obs(1.01324, 0.05, 't'), pe.pseudo_Obs(1.042345, 0.008, 't') ]) my_corr.plateau([0, 1], method="fit") my_corr.plateau([0, 1], method="mean") with pytest.raises(Exception): my_corr.plateau()
def test_covariance_is_variance(): value = np.random.normal(5, 10) dvalue = np.abs(np.random.normal(0, 1)) test_obs = pe.pseudo_Obs(value, dvalue, 't') test_obs.gamma_method() assert np.abs(test_obs.dvalue**2 - pe.covariance(test_obs, test_obs) ) <= 10 * np.finfo(np.float64).eps test_obs = test_obs + pe.pseudo_Obs(value, dvalue, 'q', 200) test_obs.gamma_method() assert np.abs(test_obs.dvalue**2 - pe.covariance(test_obs, test_obs) ) <= 10 * np.finfo(np.float64).eps
def test_m_eff(): my_corr = pe.correlators.Corr([ pe.pseudo_Obs(10, 0.1, 't'), pe.pseudo_Obs(9, 0.05, 't'), pe.pseudo_Obs(8, 0.1, 't'), pe.pseudo_Obs(7, 0.05, 't') ]) my_corr.m_eff('log') my_corr.m_eff('cosh') my_corr.m_eff('sinh') my_corr.m_eff('arccosh')
def test_fit_correlator(): my_corr = pe.correlators.Corr([ pe.pseudo_Obs(1.01324, 0.05, 't'), pe.pseudo_Obs(2.042345, 0.0004, 't') ]) def f(a, x): y = a[0] + a[1] * x return y fit_res = my_corr.fit(f) assert fit_res[0] == my_corr[0] assert fit_res[1] == my_corr[1] - my_corr[0]
def test_covariance_symmetry(): value1 = np.random.normal(5, 10) dvalue1 = np.abs(np.random.normal(0, 1)) test_obs1 = pe.pseudo_Obs(value1, dvalue1, 't') test_obs1.gamma_method() value2 = np.random.normal(5, 10) dvalue2 = np.abs(np.random.normal(0, 1)) test_obs2 = pe.pseudo_Obs(value2, dvalue2, 't') test_obs2.gamma_method() cov_ab = pe.covariance(test_obs1, test_obs2) cov_ba = pe.covariance(test_obs2, test_obs1) assert np.abs(cov_ab - cov_ba) <= 10 * np.finfo(np.float64).eps assert np.abs(cov_ab) < test_obs1.dvalue * test_obs2.dvalue * ( 1 + 10 * np.finfo(np.float64).eps)
def test_overloaded_functions(): funcs = [ np.exp, np.log, np.sin, np.cos, np.tan, np.sinh, np.cosh, np.arcsinh, np.arccosh ] deriv = [ np.exp, lambda x: 1 / x, np.cos, lambda x: -np.sin(x), lambda x: 1 / np.cos(x)**2, np.cosh, np.sinh, lambda x: 1 / np.sqrt(x**2 + 1), lambda x: 1 / np.sqrt(x**2 - 1) ] val = 3 + 0.5 * np.random.rand() dval = 0.3 + 0.4 * np.random.rand() test_obs = pe.pseudo_Obs(val, dval, 't', int(1000 * (1 + np.random.rand()))) for i, item in enumerate(funcs): ad_obs = item(test_obs) fd_obs = pe.derived_observable(lambda x, **kwargs: item(x[0]), [test_obs], num_grad=True) ad_obs.gamma_method(S=0.01) assert np.max((ad_obs.deltas['t'] - fd_obs.deltas['t']) / ad_obs.deltas['t']) < 1e-8, item.__name__ assert np.abs( (ad_obs.value - item(val)) / ad_obs.value) < 1e-10, item.__name__ assert np.abs(ad_obs.dvalue - dval * np.abs(deriv[i](val))) < 1e-6, item.__name__
def test_derived_observables(): # Construct pseudo Obs with random shape test_obs = pe.pseudo_Obs(2, 0.1 * (1 + np.random.rand()), 't', int(1000 * (1 + np.random.rand()))) # Check if autograd and numgrad give the same result d_Obs_ad = pe.derived_observable( lambda x, **kwargs: x[0] * x[1] * np.sin(x[0] * x[1]), [test_obs, test_obs]) d_Obs_ad.gamma_method() d_Obs_fd = pe.derived_observable( lambda x, **kwargs: x[0] * x[1] * np.sin(x[0] * x[1]), [test_obs, test_obs], num_grad=True) d_Obs_fd.gamma_method() assert d_Obs_ad.value == d_Obs_fd.value assert np.abs(4.0 * np.sin(4.0) - d_Obs_ad.value) < 1000 * np.finfo( np.float64).eps * np.abs(d_Obs_ad.value) assert np.abs(d_Obs_ad.dvalue - d_Obs_fd.dvalue) < 1000 * np.finfo( np.float64).eps * d_Obs_ad.dvalue i_am_one = pe.derived_observable(lambda x, **kwargs: x[0] / x[1], [d_Obs_ad, d_Obs_ad]) i_am_one.gamma_method() assert i_am_one.value == 1.0 assert i_am_one.dvalue < 2 * np.finfo(np.float64).eps assert i_am_one.e_dvalue['t'] <= 2 * np.finfo(np.float64).eps assert i_am_one.e_ddvalue['t'] <= 2 * np.finfo(np.float64).eps
def test_modify_correlator(): corr_content = [] for t in range(24): exponent = np.random.normal(3, 5) corr_content.append( pe.pseudo_Obs(2 + 10**exponent, 10**(exponent - 1), 't')) corr = pe.Corr(corr_content) with pytest.warns(RuntimeWarning): corr.symmetric() with pytest.warns(RuntimeWarning): corr.anti_symmetric() for pad in [0, 2]: corr = pe.Corr(corr_content, padding=[pad, pad]) corr.roll(np.random.randint(100)) corr.deriv(variant="forward") corr.deriv(variant="symmetric") corr.deriv(variant="improved") corr.deriv().deriv() corr.second_deriv(variant="symmetric") corr.second_deriv(variant="improved") corr.second_deriv().second_deriv() for i, e in enumerate(corr.content): corr.content[i] = None for func in [pe.Corr.deriv, pe.Corr.second_deriv]: for variant in ["symmetric", "improved", "forward", "gibberish", None]: with pytest.raises(Exception): func(corr, variant=variant)
def test_utility(): corr_content = [] for t in range(8): exponent = np.random.normal(3, 5) corr_content.append( pe.pseudo_Obs(2 + 10**exponent, 10**(exponent - 1), 't')) corr = pe.correlators.Corr(corr_content) corr.gamma_method() corr.print() corr.print([2, 4]) corr.show() corr.show(comp=corr) corr.dump('test_dump', datatype="pickle", path='.') corr.dump('test_dump', datatype="pickle") new_corr = pe.load_object('test_dump.p') new_corr.gamma_method() os.remove('test_dump.p') for o_a, o_b in zip(corr.content, new_corr.content): assert np.isclose(o_a[0].value, o_b[0].value) assert np.isclose(o_a[0].dvalue, o_b[0].dvalue) assert np.allclose(o_a[0].deltas['t'], o_b[0].deltas['t']) corr.dump('test_dump', datatype="json.gz", path='.') corr.dump('test_dump', datatype="json.gz") new_corr = pe.input.json.load_json('test_dump') new_corr.gamma_method() os.remove('test_dump.json.gz') for o_a, o_b in zip(corr.content, new_corr.content): assert np.isclose(o_a[0].value, o_b[0].value) assert np.isclose(o_a[0].dvalue, o_b[0].dvalue) assert np.allclose(o_a[0].deltas['t'], o_b[0].deltas['t'])
def test_renorm_deriv_of_corr(tmp_path): c = pe.Corr([pe.pseudo_Obs(i, .1, 'test') for i in range(10)]) c *= pe.cov_Obs(1., .1, '#ren') c = c.deriv() pe.input.json.dump_to_json(c, (tmp_path / 'test').as_posix()) recover = pe.input.json.load_json((tmp_path / 'test').as_posix()) assert np.all([o == 0 for o in (c - recover)[1:-1]])
def test_error_band(): def f(a, x): return a[0] + a[1] * x a = pe.pseudo_Obs(0.0, 0.1, 'a') b = pe.pseudo_Obs(1.0, 0.2, 'a') x = [0, 1] y = [a, b] fitp = pe.fits.least_squares(x, y, f) with pytest.raises(Exception): pe.fits.error_band(x, f, fitp.fit_parameters) fitp.gamma_method() pe.fits.error_band(x, f, fitp.fit_parameters)
def test_alternative_solvers(): dim = 192 x = np.arange(dim) y = 2 * np.exp(-0.06 * x) + np.random.normal(0.0, 0.15, dim) yerr = 0.1 + 0.1 * np.random.rand(dim) oy = [] for i, item in enumerate(x): oy.append(pe.pseudo_Obs(y[i], yerr[i], 'test')) def func(a, x): y = a[0] * np.exp(-a[1] * x) return y chisquare_values = [] out = pe.least_squares(x, oy, func, method='migrad') chisquare_values.append(out.chisquare) out = pe.least_squares(x, oy, func, method='Powell') chisquare_values.append(out.chisquare) out = pe.least_squares(x, oy, func, method='Nelder-Mead') chisquare_values.append(out.chisquare) out = pe.least_squares(x, oy, func, method='Levenberg-Marquardt') chisquare_values.append(out.chisquare) chisquare_values = np.array(chisquare_values) assert np.all(np.isclose(chisquare_values, chisquare_values[0]))
def test_covobs_name_collision(): covobs = pe.cov_Obs(0.5, 0.002, 'test') my_obs = pe.pseudo_Obs(2.3, 0.2, 'test') with pytest.raises(Exception): summed_obs = my_obs + covobs covobs2 = pe.cov_Obs(0.3, 0.001, 'test') with pytest.raises(Exception): summed_obs = covobs + covobs2
def test_mpm(): corr_content = [] for t in range(8): f = 0.8 * np.exp(-0.4 * t) corr_content.append( pe.pseudo_Obs(np.random.normal(f, 1e-2 * f), 1e-2 * f, 't')) res = pe.mpm.matrix_pencil_method(corr_content)
def test_dump(): value = np.random.normal(5, 10) dvalue = np.abs(np.random.normal(0, 1)) test_obs = pe.pseudo_Obs(value, dvalue, 't') test_obs.dump('test_dump') new_obs = pe.load_object('test_dump.p') os.remove('test_dump.p') assert test_obs.deltas['t'].all() == new_obs.deltas['t'].all()
def test_thin(): c = pe.Corr([pe.pseudo_Obs(i, .1, 'test') for i in range(10)]) c *= pe.cov_Obs(1., .1, '#ren') thin = c.thin() thin.gamma_method() thin.fit(lambda a, x: a[0] * x) c.thin(offset=1) c.thin(3, offset=1)
def test_matrix_functions(n): dim = 3 + int(4 * np.random.rand()) print(dim) matrix = [] for i in range(dim): row = [] for j in range(dim): row.append( pe.pseudo_Obs(np.random.rand(), 0.2 + 0.1 * np.random.rand(), 'e1')) matrix.append(row) matrix = np.array(matrix) @ np.identity(dim) # Check inverse of matrix inv = pe.linalg.mat_mat_op(np.linalg.inv, matrix) check_inv = matrix @ inv for (i, j), entry in np.ndenumerate(check_inv): entry.gamma_method() if (i == j): assert math.isclose( entry.value, 1.0, abs_tol=1e-9), 'value ' + str(i) + ',' + str(j) + ' ' + str( entry.value) else: assert math.isclose( entry.value, 0.0, abs_tol=1e-9), 'value ' + str(i) + ',' + str(j) + ' ' + str( entry.value) assert math.isclose( entry.dvalue, 0.0, abs_tol=1e-9), 'dvalue ' + str(i) + ',' + str(j) + ' ' + str( entry.dvalue) # Check Cholesky decomposition sym = np.dot(matrix, matrix.T) cholesky = pe.linalg.mat_mat_op(np.linalg.cholesky, sym) check = cholesky @ cholesky.T for (i, j), entry in np.ndenumerate(check): diff = entry - sym[i, j] diff.gamma_method() assert math.isclose(diff.value, 0.0, abs_tol=1e-9), 'value ' + str(i) + ',' + str(j) assert math.isclose(diff.dvalue, 0.0, abs_tol=1e-9), 'dvalue ' + str(i) + ',' + str(j) # Check eigh e, v = pe.linalg.eigh(sym) for i in range(dim): tmp = sym @ v[:, i] - v[:, i] * e[i] for j in range(dim): tmp[j].gamma_method() assert math.isclose(tmp[j].value, 0.0, abs_tol=1e-9), 'value ' + str(i) + ',' + str(j) assert math.isclose( tmp[j].dvalue, 0.0, abs_tol=1e-9), 'dvalue ' + str(i) + ',' + str(j)