def test_set(self): " set " new_defaults = dict( map=AdaptiveMap([[1, 2], [0, 1]]), neval=100, # number of evaluations per iteration maxinc_axis=100, # number of adaptive-map increments per axis nhcube_batch=10, # number of h-cubes per batch max_nhcube=5e2, # max number of h-cubes max_neval_hcube=1e1, # max number of evaluations per h-cube nitn=100, # number of iterations alpha=0.35, beta=0.25, adapt_to_errors=True, rtol=0.1, atol=0.2, analyzer=reporter(5), ) I = Integrator([[1, 2]]) old_defaults = I.set(**new_defaults) for k in new_defaults: if k == 'map': np_assert_allclose([[I.map.grid[0, 0], I.map.grid[0, -1]], [I.map.grid[1, 0], I.map.grid[1, -1]]], new_defaults['map'].grid) else: self.assertEqual(getattr(I, k), new_defaults[k])
def test_ravgdict_unwgtd(self): " unweighted RAvgDict " # scalar mean_s = np.random.uniform(-10., 10.) sdev_s = 0.1 x_s = gv.gvar(mean_s, sdev_s) # array mean_a = np.random.uniform(-10., 10., (2, )) cov_a = np.array([[1., 0.5], [0.5, 2.]]) / 10. x_a = gv.gvar(mean_a, cov_a) N = 30 r_a = gv.raniter(x_a, N) ravg = RAvgDict(dict(scalar=1.0, array=[[2., 3.]]), weighted=False) for ri in r_a: ravg.add( dict(scalar=gv.gvar(x_s(), sdev_s), array=[gv.gvar(ri, cov_a)])) np_assert_allclose(ravg['scalar'].sdev, x_s.sdev / (N**0.5)) self.assertLess(abs(ravg['scalar'].mean - mean_s), 5 * ravg['scalar'].sdev) np_assert_allclose(gv.evalcov(ravg['array'].flat), cov_a / N) for i in range(2): self.assertLess(abs(mean_a[i] - ravg['array'][0, i].mean), 5 * ravg['array'][0, i].sdev) self.assertEqual(ravg.dof, 2 * N - 2 + N - 1) self.assertGreater(ravg.Q, 1e-3)
def test_ravgdict_wgtd(self): " weighted RAvgDict " # scalar mean_s = np.random.uniform(-10., 10.) xbig_s = gv.gvar(mean_s, 1.) xsmall_s = gv.gvar(mean_s, 0.1) # array mean_a = np.random.uniform(-10., 10., (2, )) cov_a = np.array([[1., 0.5], [0.5, 2.]]) invcov = np.linalg.inv(cov_a) N = 30 xbig_a = gv.gvar(mean_a, cov_a) rbig_a = gv.raniter(xbig_a, N) xsmall_a = gv.gvar(mean_a, cov_a / 10.) rsmall_a = gv.raniter(xsmall_a, N) ravg = RAvgDict(dict(scalar=1.0, array=[[2., 3.]])) for rb, rw in zip(rbig_a, rsmall_a): ravg.add( dict(scalar=gv.gvar(xbig_s(), 1.), array=[gv.gvar(rb, cov_a)])) ravg.add( dict(scalar=gv.gvar(xsmall_s(), 0.1), array=[gv.gvar(rw, cov_a / 10.)])) np_assert_allclose( ravg['scalar'].sdev, 1 / (N * (1. / xbig_s.var + 1. / xsmall_s.var))**0.5) self.assertLess(abs(ravg['scalar'].mean - mean_s), 5 * ravg['scalar'].sdev) np_assert_allclose(gv.evalcov(ravg['array'].flat), cov_a / (10. + 1.) / N) for i in range(2): self.assertLess(abs(mean_a[i] - ravg['array'][0, i].mean), 5 * ravg['array'][0, i].sdev) self.assertEqual(ravg.dof, 4 * N - 2 + 2 * N - 1) self.assertGreater(ravg.Q, 0.5e-3)
def test_set(self): " set " new_defaults = dict( map=AdaptiveMap([[1,2],[0,1]]), neval=100, # number of evaluations per iteration maxinc_axis=100, # number of adaptive-map increments per axis nhcube_batch=10, # number of h-cubes per batch max_nhcube=5e2, # max number of h-cubes max_neval_hcube=1e1, # max number of evaluations per h-cube nitn=100, # number of iterations alpha=0.35, beta=0.25, adapt_to_errors=True, rtol=0.1, atol=0.2, analyzer=reporter(5), ) I = Integrator([[1,2]]) old_defaults = I.set(**new_defaults) for k in new_defaults: if k == 'map': np_assert_allclose( [ [I.map.grid[0, 0], I.map.grid[0, -1]], [I.map.grid[1, 0], I.map.grid[1, -1]] ], new_defaults['map'].grid) else: self.assertEqual(getattr(I,k), new_defaults[k])
def test_math(self): " test math involving GVars " x = gvar.gvar(2., 5.) cases = [ (x + 10., 12., 5.), (10 + x, 12., 5.), (+x, 2., 5.), (x - 6., -4., 5.), (6 - x, 4., 5.), (-x, -2., 5.), (3 * x, 6., 15.), (x * 3., 6., 15.), (x / 4., 0.5, 1.25), (10. / x, 5., 12.5), (x**2, 4., 20.), (2**x, 4., math.log(2) * 20.), (np.log(x), math.log(2.), 2.5), (np.exp(x), math.exp(2.), math.exp(2.) * 5.), (np.exp(np.log(x)), 2., 5.), (np.sqrt(x), math.sqrt(2.), math.sqrt(2.) * 1.25), (np.sqrt(x**2), 2., 5.), ] for y, ymean, ysdev in cases: np_assert_allclose(y.mean, ymean) np_assert_allclose(y.sdev, ysdev)
def test_ravgdict_unwgtd(self): " unweighted RAvgDict " # scalar mean_s = np.random.uniform(-10., 10.) sdev_s = 0.1 x_s = gv.gvar(mean_s, sdev_s) # array mean_a = np.random.uniform(-10., 10., (2,)) cov_a = np.array([[1., 0.5], [0.5, 2.]]) / 10. x_a = gv.gvar(mean_a, cov_a) N = 30 r_a = gv.raniter(x_a, N) ravg = RAvgDict(dict(scalar=1.0, array=[[2., 3.]]), weighted=False) for ri in r_a: ravg.add(dict( scalar=gv.gvar(x_s(), sdev_s), array=[gv.gvar(ri, cov_a)] )) np_assert_allclose( ravg['scalar'].sdev, x_s.sdev / (N ** 0.5)) self.assertLess( abs(ravg['scalar'].mean - mean_s), 5 * ravg['scalar'].sdev ) np_assert_allclose(gv.evalcov(ravg['array'].flat), cov_a / N) for i in range(2): self.assertLess( abs(mean_a[i] - ravg['array'][0, i].mean), 5 * ravg['array'][0, i].sdev ) self.assertEqual(ravg.dof, 2 * N - 2 + N - 1) self.assertGreater(ravg.Q, 1e-3)
def test_make_sample_rmat_chi0_ome0(make_sample_rmat_impl, module_name): # when chi = 0.0 and ome = 0.0 the resulting sample rotation matrix should # be the identity chi = 0.0 ome = 0.0 result = make_sample_rmat_impl(0.0, 0.0) np_assert_allclose(xf_cnst.identity_3x3, result)
def test_pickle(self): " pickle AdaptiveMap " m1 = AdaptiveMap(grid=[[0, 1, 3], [-2, 0, 6]]) with open('test_map.p', 'wb') as ofile: pickle.dump(m1, ofile) with open('test_map.p', 'rb') as ifile: m2 = pickle.load(ifile) np_assert_allclose(m2.grid, m1.grid) np_assert_allclose(m2.inc, m1.inc)
def test_pickle(self): " pickle AdaptiveMap " m1 = AdaptiveMap(grid=[[0, 1, 3], [-2, 0, 6]]) with open('test_map.p', 'wb') as ofile: pickle.dump(m1, ofile) with open('test_map.p', 'rb') as ifile: m2 = pickle.load(ifile) np_assert_allclose(m2.grid, m1.grid) np_assert_allclose(m2.inc, m1.inc)
def test_gammaQ(self): " gammaQ(a, x) " cases = [ (2.371, 5.243, 0.05371580082389009, 0.9266599665892222), (20.12, 20.3, 0.4544782602230986, 0.4864172139106905), (100.1, 105.2, 0.29649013488390663, 0.6818457585776236), (1004., 1006., 0.4706659307021259, 0.5209695379094582), ] for a, x, gax, gxa in cases: np_assert_allclose(gax, gvar.gammaQ(a, x), rtol=0.01) np_assert_allclose(gxa, gvar.gammaQ(x, a), rtol=0.01)
def test_adapt_to_samples(self): " adapt_to_samples(...) " m1 = AdaptiveMap([[0, 2], [0, 1]]) x = np.random.normal(0, .1, (1000, 2)) def F(x): return np.exp(-np.sum(x**2, axis=1) * 100 / 2) Fx = F(x) m1.adapt_to_samples(x, Fx, nitn=5) m1.adapt(ninc=2) np_assert_allclose(m1.grid, [[0., 0.071, 2.0], [0., 0.073, 1.]], rtol=0.4)
def test_ravg_unwgtd(self): " unweighted RAvg " if not have_gvar: return mean = np.random.uniform(-10., 10.) x = gv.gvar(mean, 0.1) ravg = RAvg(weighted=False) N = 30 for i in range(N): ravg.add(gv.gvar(x(), x.sdev)) np_assert_allclose(ravg.sdev, x.sdev / (N**0.5)) self.assertLess(abs(ravg.mean - mean), 5 * ravg.sdev) self.assertGreater(ravg.Q, 1e-3) self.assertEqual(ravg.dof, N - 1)
def test_ravg_unwgtd(self): " unweighted RAvg " # if not have_gvar: # return mean = np.random.uniform(-10., 10.) x = gv.gvar(mean, 0.1) ravg = RAvg(weighted=False) N = 30 for i in range(N): ravg.add(gv.gvar(x(), x.sdev)) np_assert_allclose( ravg.sdev, x.sdev / (N ** 0.5)) self.assertLess(abs(ravg.mean - mean), 5 * ravg.sdev) self.assertGreater(ravg.Q, 1e-3) self.assertEqual(ravg.dof, N - 1)
def test_pickle(self): I1 = Integrator([[0., 1.], [-1., 1.]], neval=234, nitn=123) with open('test_integ.p', 'wb') as ofile: pickle.dump(I1, ofile) with open('test_integ.p', 'rb') as ifile: I2 = pickle.load(ifile) assert isinstance(I2, Integrator) for k in Integrator.defaults: if k == 'map': np_assert_allclose(I1.map.grid, I2.map.grid) np_assert_allclose(I1.map.inc, I2.map.inc) elif k in ['ran_array_generator']: continue else: self.assertEqual(getattr(I1, k), getattr(I2, k))
def test_pickle(self): I1 = Integrator([[0.,1.],[-1.,1.]], neval=234, nitn=123) with open('test_integ.p', 'wb') as ofile: pickle.dump(I1, ofile) with open('test_integ.p', 'rb') as ifile: I2 = pickle.load(ifile) assert isinstance(I2, Integrator) for k in Integrator.defaults: if k == 'map': np_assert_allclose(I1.map.grid, I2.map.grid) np_assert_allclose(I1.map.inc, I2.map.inc) elif k in ['ran_array_generator']: continue else: self.assertEqual(getattr(I1, k), getattr(I2, k))
def test_ravgarray_unwgtd(self): " unweighted RAvgArray " # if not have_gvar: # return mean = np.random.uniform(-10., 10., (2,)) cov = np.array([[1., 0.5], [0.5, 2.]]) / 10. N = 30 x = gv.gvar(mean, cov) r = gv.raniter(x, N) ravg = RAvgArray((1, 2), weighted=False) for ri in r: ravg.add([gv.gvar(ri, cov)]) np_assert_allclose(gv.evalcov(ravg.flat), cov / N) for i in range(2): self.assertLess(abs(mean[i] - ravg[0, i].mean), 5 * ravg[0, i].sdev) self.assertEqual(ravg.dof, 2 * N - 2) self.assertGreater(ravg.Q, 1e-3)
def test_ravg_wgtd(self): " weighted RAvg " if not have_gvar: return mean = np.random.uniform(-10., 10.) xbig = gv.gvar(mean, 1.) xsmall = gv.gvar(mean, 0.1) ravg = RAvg() N = 30 for i in range(N): ravg.add(gv.gvar(xbig(), xbig.sdev)) ravg.add(gv.gvar(xsmall(), xsmall.sdev)) np_assert_allclose(ravg.sdev, 1 / (N * (1. / xbig.var + 1. / xsmall.var))**0.5) self.assertLess(abs(ravg.mean - mean), 5 * ravg.sdev) self.assertGreater(ravg.Q, 1e-3) self.assertEqual(ravg.dof, 2 * N - 1)
def test_ravgarray_unwgtd(self): " unweighted RAvgArray " # if not have_gvar: # return mean = np.random.uniform(-10., 10., (2,)) cov = np.array([[1., 0.5], [0.5, 2.]]) / 10. N = 30 x = gv.gvar(mean, cov) r = gv.raniter(x, N) ravg = RAvgArray((1, 2), weighted=False) for ri in r: ravg.add([gv.gvar(ri, cov)]) np_assert_allclose(gv.evalcov(ravg.flat), cov / N) for i in range(2): self.assertLess(abs(mean[i] - ravg[0, i].mean), 5 * ravg[0, i].sdev) self.assertEqual(ravg.dof, 2 * N - 2) self.assertGreater(ravg.Q, 1e-3)
def test_ravg_wgtd(self): " weighted RAvg " # if not have_gvar: # return mean = np.random.uniform(-10., 10.) xbig = gv.gvar(mean, 1.) xsmall = gv.gvar(mean, 0.1) ravg = RAvg() N = 30 for i in range(N): ravg.add(gv.gvar(xbig(), xbig.sdev)) ravg.add(gv.gvar(xsmall(), xsmall.sdev)) np_assert_allclose( ravg.sdev, 1/ (N * ( 1. / xbig.var + 1. / xsmall.var)) ** 0.5 ) self.assertLess(abs(ravg.mean - mean), 5 * ravg.sdev) self.assertGreater(ravg.Q, 1e-3) self.assertEqual(ravg.dof, 2 * N - 1)
def assert_allclose(actual, desired, rtol=1e-05, atol=1e-05, ratio_tol=None): if not isinstance(actual, np.ndarray): actual = to_output_type(actual, 'numpy') if not isinstance(desired, np.ndarray): desired = to_output_type(desired, 'numpy') if ratio_tol: assert actual.shape == desired.shape diff_ratio = (actual != desired).sum() / actual.size assert diff_ratio <= ratio_tol else: return np_assert_allclose(actual, desired, rtol=rtol, atol=atol)
def test_map(self): " map(...) " m = AdaptiveMap(grid=[[0, 1, 3], [-2, 0, 6]]) # 5 y values y = np.array([[0, 0], [0.25, 0.25], [0.5, 0.5], [0.75, 0.75], [1.0, 1.0]]) x = np.empty(y.shape, float) jac = np.empty(y.shape[0], float) m.map(y, x, jac) np_assert_allclose(x, [[0, -2], [0.5, -1], [1, 0], [2, 3], [3, 6]]) np_assert_allclose(jac, [8, 8, 48, 48, 48]) np_assert_allclose(m(y), x) np_assert_allclose(m.jac(y), jac)
def test_ravgarray_wgtd(self): " weighted RAvgArray " # if not have_gvar: # return mean = np.random.uniform(-10., 10., (2,)) cov = np.array([[1., 0.5], [0.5, 2.]]) invcov = np.linalg.inv(cov) N = 30 xbig = gv.gvar(mean, cov) rbig = gv.raniter(xbig, N) xsmall = gv.gvar(mean, cov / 10.) rsmall = gv.raniter(xsmall, N) ravg = RAvgArray((1, 2)) for rb, rs in zip(rbig, rsmall): ravg.add([gv.gvar(rb, cov)]) ravg.add([gv.gvar(rs, cov / 10.)]) np_assert_allclose(gv.evalcov(ravg.flat), cov / (10. + 1.) / N) for i in range(2): self.assertLess(abs(mean[i] - ravg[0, i].mean), 5 * ravg[0, i].sdev) self.assertEqual(ravg.dof, 4 * N - 2) self.assertGreater(ravg.Q, 1e-3)
def test_ravgarray_wgtd(self): " weighted RAvgArray " if not have_gvar: return mean = np.random.uniform(-10., 10., (2, )) cov = np.array([[1., 0.5], [0.5, 2.]]) invcov = np.linalg.inv(cov) N = 30 xbig = gv.gvar(mean, cov) rbig = gv.raniter(xbig, N) xsmall = gv.gvar(mean, cov / 10.) rsmall = gv.raniter(xsmall, N) ravg = RAvgArray(2) for rb, rs in zip(rbig, rsmall): ravg.add(gv.gvar(rb, cov)) ravg.add(gv.gvar(rs, cov / 10.)) np_assert_allclose(gv.evalcov(ravg), cov / (10. + 1.) / N) for i in range(2): self.assertLess(abs(mean[i] - ravg[i].mean), 5 * ravg[i].sdev) self.assertEqual(ravg.dof, 4 * N - 2) self.assertGreater(ravg.Q, 1e-3)
def test_ravgdict_wgtd(self): " weighted RAvgDict " # scalar mean_s = np.random.uniform(-10., 10.) xbig_s = gv.gvar(mean_s, 1.) xsmall_s = gv.gvar(mean_s, 0.1) # array mean_a = np.random.uniform(-10., 10., (2,)) cov_a = np.array([[1., 0.5], [0.5, 2.]]) invcov = np.linalg.inv(cov_a) N = 30 xbig_a = gv.gvar(mean_a, cov_a) rbig_a = gv.raniter(xbig_a, N) xsmall_a = gv.gvar(mean_a, cov_a / 10.) rsmall_a = gv.raniter(xsmall_a, N) ravg = RAvgDict(dict(scalar=1.0, array=[[2., 3.]])) for rb, rw in zip(rbig_a, rsmall_a): ravg.add(dict( scalar=gv.gvar(xbig_s(), 1.), array=[gv.gvar(rb, cov_a)] )) ravg.add(dict( scalar=gv.gvar(xsmall_s(), 0.1), array=[gv.gvar(rw, cov_a / 10.)] )) np_assert_allclose( ravg['scalar'].sdev, 1/ (N * ( 1. / xbig_s.var + 1. / xsmall_s.var)) ** 0.5 ) self.assertLess( abs(ravg['scalar'].mean - mean_s), 5 * ravg['scalar'].sdev ) np_assert_allclose(gv.evalcov(ravg['array'].flat), cov_a / (10. + 1.) / N) for i in range(2): self.assertLess( abs(mean_a[i] - ravg['array'][0, i].mean), 5 * ravg['array'][0, i].sdev ) self.assertEqual(ravg.dof, 4 * N - 2 + 2 * N - 1) self.assertGreater(ravg.Q, 1e-3)
def test_invmap(self): " invmap(...) " m = AdaptiveMap(grid=[[0., 1., 3.], [-2., 0., 6.]]) # 5 x values x = np.array([[0., -2.], [0.5, -1], [1, 0], [2, 3], [3, 6]]) ytrue = np.array([[0, 0], [0.25, 0.25], [0.5, 0.5], [0.75, 0.75], [1.0, 1.0]]) jactrue = np.array([8., 8, 48, 48, 48]) y = np.empty(x.shape, float) jac = np.empty(x.shape[0], float) m.invmap(x, y, jac) np_assert_allclose(y, ytrue) np_assert_allclose(jac, jactrue) np_assert_allclose(m(y), x) np_assert_allclose(m.jac(y), jac)
def test_map(self): " map(...) " m = AdaptiveMap(grid=[[0, 1, 3], [-2, 0, 6]]) # 5 y values y = np.array( [[0, 0], [0.25, 0.25], [0.5, 0.5], [0.75, 0.75], [1.0, 1.0]] ) x = np.empty(y.shape, float) jac = np.empty(y.shape[0], float) m.map(y, x, jac) np_assert_allclose( x, [[0, -2], [0.5, -1], [1, 0], [2, 3], [3, 6]] ) np_assert_allclose( jac, [8, 8, 48, 48, 48] ) np_assert_allclose(m(y), x) np_assert_allclose(m.jac(y), jac)
def test_all(self): " RWavg " a = RAvg() a.add(gvar.gvar(1, 1)) a.add(gvar.gvar(2, 2)) a.add(gvar.gvar(3, 3)) np_assert_allclose(a.mean, 1.346938775510204) np_assert_allclose(a.sdev, 0.8571428571428571) self.assertEqual(a.dof, 2) np_assert_allclose(a.chi2, 0.5306122448979592) np_assert_allclose(a.Q, 0.7669711269557102) self.assertEqual(str(a), '1.35(86)') s = [ "itn integral wgt average chi2/dof Q", "-------------------------------------------------------", " 1 1.0(1.0) 1.0(1.0) 0.00 1.00", " 2 2.0(2.0) 1.20(89) 0.20 0.65", " 3 3.0(3.0) 1.35(86) 0.27 0.77", "" ] self.assertEqual(a.summary(), '\n'.join(s))
def test_all(self): " RWavg " a = RAvg() a.add(gvar.gvar(1, 1)) a.add(gvar.gvar(2, 2)) a.add(gvar.gvar(3, 3)) np_assert_allclose(a.mean, 1.346938775510204) np_assert_allclose(a.sdev, 0.8571428571428571) self.assertEqual(a.dof, 2) np_assert_allclose(a.chi2, 0.5306122448979592) np_assert_allclose(a.Q, 0.7669711269557102) self.assertEqual(str(a), '1.35(86)') s = [ "itn integral wgt average chi2/dof Q", "-------------------------------------------------------", " 1 1.0(1.0) 1.0(1.0) 0.00 1.00", " 2 2.0(2.0) 1.20(89) 0.20 0.65", " 3 3.0(3.0) 1.35(86) 0.27 0.77", "" ] self.assertEqual(a.summary(), '\n'.join(s))
def test_array(self): " RAvgArray " a = RAvgArray((2, )) a.add([gvar.gvar(1, 1), gvar.gvar(10, 10)]) a.add([gvar.gvar(2, 2), gvar.gvar(20, 20)]) a.add([gvar.gvar(3, 3), gvar.gvar(30, 30)]) self.assertEqual(a.shape, (2, )) np_assert_allclose(a[0].mean, 1.346938775510204) np_assert_allclose(a[0].sdev, 0.8571428571428571) self.assertEqual(a.dof, 4) np_assert_allclose(a.chi2, 2 * 0.5306122448979592) np_assert_allclose(a.Q, 0.900374555485) self.assertEqual(str(a[0]), '1.35(86)') self.assertEqual(str(a[1]), '13.5(8.6)') s = [ "itn integral wgt average chi2/dof Q", "-------------------------------------------------------", " 1 1.0(1.0) 1.0(1.0) 0.00 1.00", " 2 2.0(2.0) 1.20(89) 0.20 0.82", " 3 3.0(3.0) 1.35(86) 0.27 0.90", "" ] self.assertEqual(a.summary(), '\n'.join(s))
def test_volume(self): " integrate constants " def f(x): return 2. I = Integrator([[-1, 1], [0, 4]]) r = I(f) np_assert_allclose(r.mean, 16, rtol=1e-6) self.assertTrue(r.sdev < 1e-6) def f(x): return [-1., 2.] I = Integrator([[-1, 1], [0, 4]]) r = I(f) np_assert_allclose(r[0].mean, -8, rtol=5e-2) self.assertTrue(r[0].sdev < 1e-6) np_assert_allclose(r[1].mean, 16, rtol=5e-2) self.assertTrue(r[1].sdev < 1e-6)
def test_array(self): " RAvgArray " a = RAvgArray((1, 2)) a.add([[gvar.gvar(1, 1), gvar.gvar(10,10)]]) a.add([[gvar.gvar(2, 2), gvar.gvar(20,20)]]) a.add([[gvar.gvar(3, 3), gvar.gvar(30,30)]]) self.assertEqual(a.shape, (1, 2)) np_assert_allclose(a[0, 0].mean, 1.346938775510204) np_assert_allclose(a[0, 0].sdev, 0.8571428571428571) self.assertEqual(a.dof, 4) np_assert_allclose(a.chi2, 2*0.5306122448979592) np_assert_allclose(a.Q, 0.900374555485) self.assertEqual(str(a[0, 0]), '1.35(86)') self.assertEqual(str(a[0, 1]), '13.5(8.6)') s = [ "itn integral wgt average chi2/dof Q", "-------------------------------------------------------", " 1 1.0(1.0) 1.0(1.0) 0.00 1.00", " 2 2.0(2.0) 1.20(89) 0.20 0.82", " 3 3.0(3.0) 1.35(86) 0.27 0.90", "" ] self.assertEqual(a.summary(), '\n'.join(s))
def test_volume(self): " integrate constants " def f(x): return 2. I = Integrator([[-1, 1], [0, 4]]) r = I(f) np_assert_allclose(r.mean, 16, rtol=1e-6) self.assertTrue(r.sdev < 1e-6) def f(x): return [-1., 2.] I = Integrator([[-1, 1], [0, 4]]) r = I(f) np_assert_allclose(r[0].mean, -8, rtol=5e-2) self.assertTrue(r[0].sdev < 1e-6) np_assert_allclose(r[1].mean, 16, rtol=5e-2) self.assertTrue(r[1].sdev < 1e-6)
def test_region(self): " region(...) " m = AdaptiveMap(grid=[[0, 1, 3], [-2, 0, 6]]) np_assert_allclose(m.region(0), [0, 3]) np_assert_allclose(m.region(1), [-2, 6]) np_assert_allclose(m.region(), [[0, 3], [-2, 6]])
def test_training_data_adapt(self): "add_training_data(...) adapt(...) " # change ninc; no adaptation -- already adapted m = AdaptiveMap([[0, 2], [-1, 1]], ninc=2) y = np.array([[0.25, 0.25], [0.75, 0.75]]) f = m.jac(y) m.add_training_data(y, f) m.adapt(alpha=1.5) np_assert_allclose(m.grid, [[0., 1., 2.], [-1, 0, 1]]) # no adaptation -- alpha = 0 m = AdaptiveMap([[0, 1.5, 2], [-1, 0, 1]]) y = np.array([[0.25, 0.25], [0.75, 0.75]]) f = m.jac(y) m.add_training_data(y, f) m.adapt(alpha=0.0) np_assert_allclose(m.grid, [[0, 1.5, 2], [-1, 0, 1]]) # Adapt to functions: # Place y values at 2-pt Gaussian quadrature # abscissas so training is equivalent to # an integral for functions that are linear # or quadratic (or a superposition). This # simulates random y's spread uniformly # over space. ygauss below is for ninc=2, # with two abscissas per increment. g = 1. /3.**0.5 ygauss = [(1-g)/4., (1+g)/4, (3-g)/4, (3+g)/4.] m = AdaptiveMap([[0, 2]], ninc=2) y = np.array([[yi] for yi in ygauss]) def F(x): return x[:, 0] ** 2 for i in range(60): f = F(m(y)) * m.jac(y) m.add_training_data(y, f) m.adapt(alpha=2.) np_assert_allclose(m.grid, [[0, 2./2.**(1./3.), 2.]]) m = AdaptiveMap([[0, 2], [0, 4]], ninc=2) y = np.array([[yi, yj] for yi in ygauss for yj in ygauss]) def F(x): return x[:, 0] * x[:, 1] ** 2 for i in range(60): f = F(m(y)) * m.jac(y) m.add_training_data(y, f) m.adapt(alpha=2.) np_assert_allclose( m.grid, [[0, 2. * 2.**(-0.5), 2.], [0, 4. * 2**(-1./3.), 4.]] ) # same again, with no smoothing m = AdaptiveMap([[0, 2], [0, 4]], ninc=2) y = np.array([[yi, yj] for yi in ygauss for yj in ygauss]) def F(x): return x[:, 0] * x[:, 1] ** 2 for i in range(20): f = F(m(y)) * m.jac(y) m.add_training_data(y, f) m.adapt(alpha=-2.) np_assert_allclose( m.grid, [[0, 2. * 2.**(-0.5), 2.], [0, 4. * 2**(-1./3.), 4.]] )
def test_init(self): " AdaptiveMap(...) " m = AdaptiveMap(grid=[[0, 1], [2, 4]]) np_assert_allclose(m.grid, [[0, 1], [2, 4]]) np_assert_allclose(m.inc, [[1], [2]]) m = AdaptiveMap(grid=[[0, 1], [-2, 4]], ninc=2) np_assert_allclose(m.grid, [[0, 0.5, 1.], [-2., 1., 4.]]) np_assert_allclose(m.inc, [[0.5, 0.5], [3., 3.]]) self.assertEqual(m.dim, 2) self.assertEqual(m.ninc, 2) m = AdaptiveMap([[0, 0.4, 1], [-2, 0., 4]], ninc=4) np_assert_allclose(m.grid, [[0, 0.2, 0.4, 0.7, 1.], [-2., -1., 0., 2., 4.]]) np_assert_allclose(m.inc, [[0.2, 0.2, 0.3, 0.3], [1, 1, 2, 2]]) self.assertEqual(m.dim, 2) self.assertEqual(m.ninc, 4)
def test_ravgdict(self): " RAvgDict " a = RAvgDict(dict(s=1.0, a=[[2.0, 3.0]])) a.add(dict(s=gv.gvar(1, 1), a=[[gvar.gvar(1, 1), gvar.gvar(10,10)]])) a.add(dict(s=gv.gvar(2, 2), a=[[gvar.gvar(2, 2), gvar.gvar(20,20)]])) a.add(dict(s=gv.gvar(3, 3), a=[[gvar.gvar(3, 3), gvar.gvar(30,30)]])) self.assertEqual(a['a'].shape, (1, 2)) np_assert_allclose(a['a'][0, 0].mean, 1.346938775510204) np_assert_allclose(a['a'][0, 0].sdev, 0.8571428571428571) self.assertEqual(str(a['a'][0, 0]), '1.35(86)') self.assertEqual(str(a['a'][0, 1]), '13.5(8.6)') np_assert_allclose(a['s'].mean, 1.346938775510204) np_assert_allclose(a['s'].sdev, 0.8571428571428571) self.assertEqual(str(a['s']), '1.35(86)') self.assertEqual(a.dof, 6) np_assert_allclose(a.chi2, 3*0.5306122448979592) np_assert_allclose(a.Q, 0.953162484587) s = [ "itn integral wgt average chi2/dof Q", "-------------------------------------------------------", " 1 1.0(1.0) 1.0(1.0) 0.00 1.00", " 2 2.0(2.0) 1.20(89) 0.20 0.90", " 3 3.0(3.0) 1.35(86) 0.27 0.95", "" ] self.assertEqual(a.summary(), '\n'.join(s))
def test_init(self): " AdaptiveMap(...) " m = AdaptiveMap(grid=[[0, 1], [2, 4]]) np_assert_allclose(m.grid, [[0, 1], [2, 4]]) np_assert_allclose(m.inc, [[1], [2]]) m = AdaptiveMap(grid=[[0, 1], [-2, 4]], ninc=2) np_assert_allclose(m.grid, [[0, 0.5, 1.], [-2., 1., 4.]]) np_assert_allclose(m.inc, [[0.5, 0.5], [3., 3.]]) self.assertEqual(m.dim, 2) self.assertEqual(m.ninc, 2) m = AdaptiveMap([[0, 0.4, 1], [-2, 0., 4]], ninc=4) np_assert_allclose( m.grid, [[0, 0.2, 0.4, 0.7, 1.], [-2., -1., 0., 2., 4.]] ) np_assert_allclose(m.inc, [[0.2, 0.2, 0.3, 0.3], [1, 1, 2, 2]]) self.assertEqual(m.dim, 2) self.assertEqual(m.ninc, 4)
def test_region(self): " region(...) " m = AdaptiveMap(grid=[[0, 1, 3], [-2, 0, 6]]) np_assert_allclose(m.region(0), [0, 3]) np_assert_allclose(m.region(1), [-2, 6]) np_assert_allclose(m.region(), [[0, 3], [-2, 6]])
def assert_allclose(actual, desired, rtol=None, atol=0.0, equal_nan=True, err_msg="", verbose=True): """dtype-aware variant of numpy.testing.assert_allclose This variant introspects the least precise floating point dtype in the input argument and automatically sets the relative tolerance parameter to 1e-4 float32 and use 1e-7 otherwise (typically float64 in scikit-learn). `atol` is always left to 0. by default. It should be adjusted manually to an assertion-specific value in case there are null values expected in `desired`. The aggregate tolerance is `atol + rtol * abs(desired)`. Parameters ---------- actual : array_like Array obtained. desired : array_like Array desired. rtol : float, optional, default=None Relative tolerance. If None, it is set based on the provided arrays' dtypes. atol : float, optional, default=0. Absolute tolerance. equal_nan : bool, optional, default=True If True, NaNs will compare equal. err_msg : str, optional, default='' The error message to be printed in case of failure. verbose : bool, optional, default=True If True, the conflicting values are appended to the error message. Raises ------ AssertionError If actual and desired are not equal up to specified precision. See Also -------- numpy.testing.assert_allclose Examples -------- >>> import numpy as np >>> from sklearn.utils._testing import assert_allclose >>> x = [1e-5, 1e-3, 1e-1] >>> y = np.arccos(np.cos(x)) >>> assert_allclose(x, y, rtol=1e-5, atol=0) >>> a = np.full(shape=10, fill_value=1e-5, dtype=np.float32) >>> assert_allclose(a, 1e-5) """ dtypes = [] actual, desired = np.asanyarray(actual), np.asanyarray(desired) dtypes = [actual.dtype, desired.dtype] if rtol is None: rtols = [1e-4 if dtype == np.float32 else 1e-7 for dtype in dtypes] rtol = max(rtols) np_assert_allclose( actual, desired, rtol=rtol, atol=atol, equal_nan=equal_nan, err_msg=err_msg, verbose=verbose, )
def test_injection(): comps = n.passive_branch_components np_assert_allclose( network_injection(n, branch_components=comps).values, n.buses_t.p.values, **tol_kwargs)
def test_ravgdict(self): " RAvgDict " a = RAvgDict(dict(s=1.0, a=[[2.0, 3.0]])) a.add(dict(s=gv.gvar(1, 1), a=[[gv.gvar(1, 1), gv.gvar(10, 10)]])) a.add(dict(s=gv.gvar(2, 2), a=[[gv.gvar(2, 2), gv.gvar(20, 20)]])) a.add(dict(s=gv.gvar(3, 3), a=[[gv.gvar(3, 3), gv.gvar(30, 30)]])) self.assertEqual(a['a'].shape, (1, 2)) np_assert_allclose(a['a'][0, 0].mean, 1.346938775510204) np_assert_allclose(a['a'][0, 0].sdev, 0.8571428571428571) self.assertEqual(str(a['a'][0, 0]), '1.35(86)') self.assertEqual(str(a['a'][0, 1]), '13.5(8.6)') np_assert_allclose(a['s'].mean, 1.346938775510204) np_assert_allclose(a['s'].sdev, 0.8571428571428571) self.assertEqual(str(a['s']), '1.35(86)') self.assertEqual(a.dof, 6) np_assert_allclose(a.chi2, 3 * 0.5306122448979592) np_assert_allclose(a.Q, 0.953162484587) s = [ "itn integral wgt average chi2/dof Q", "-------------------------------------------------------", " 1 1.0(1.0) 1.0(1.0) 0.00 1.00", " 2 2.0(2.0) 1.20(89) 0.20 0.90", " 3 3.0(3.0) 1.35(86) 0.27 0.95", "" ] self.assertEqual(a.summary(), '\n'.join(s))
def test_training_data_adapt(self): "add_training_data(...) adapt(...) " # change ninc; no adaptation -- already adapted m = AdaptiveMap([[0, 2], [-1, 1]], ninc=2) y = np.array([[0.25, 0.25], [0.75, 0.75]]) f = m.jac(y) m.add_training_data(y, f) m.adapt(alpha=1.5) np_assert_allclose(m.grid, [[0., 1., 2.], [-1, 0, 1]]) # no adaptation -- alpha = 0 m = AdaptiveMap([[0, 1.5, 2], [-1, 0, 1]]) y = np.array([[0.25, 0.25], [0.75, 0.75]]) f = m.jac(y) m.add_training_data(y, f) m.adapt(alpha=0.0) np_assert_allclose(m.grid, [[0, 1.5, 2], [-1, 0, 1]]) # Adapt to functions: # Place y values at 2-pt Gaussian quadrature # abscissas so training is equivalent to # an integral for functions that are linear # or quadratic (or a superposition). This # simulates random y's spread uniformly # over space. ygauss below is for ninc=2, # with two abscissas per increment. g = 1. / 3.**0.5 ygauss = [(1 - g) / 4., (1 + g) / 4, (3 - g) / 4, (3 + g) / 4.] m = AdaptiveMap([[0, 2]], ninc=2) y = np.array([[yi] for yi in ygauss]) def F(x): return x[:, 0]**2 for i in range(60): f = F(m(y)) * m.jac(y) m.add_training_data(y, f) m.adapt(alpha=2.) np_assert_allclose(m.grid, [[0, 2. / 2.**(1. / 3.), 2.]]) m = AdaptiveMap([[0, 2], [0, 4]], ninc=2) y = np.array([[yi, yj] for yi in ygauss for yj in ygauss]) def F(x): return x[:, 0] * x[:, 1]**2 for i in range(60): f = F(m(y)) * m.jac(y) m.add_training_data(y, f) m.adapt(alpha=2.) np_assert_allclose( m.grid, [[0, 2. * 2.**(-0.5), 2.], [0, 4. * 2**(-1. / 3.), 4.]]) # same again, with no smoothing m = AdaptiveMap([[0, 2], [0, 4]], ninc=2) y = np.array([[yi, yj] for yi in ygauss for yj in ygauss]) def F(x): return x[:, 0] * x[:, 1]**2 for i in range(20): f = F(m(y)) * m.jac(y) m.add_training_data(y, f) m.adapt(alpha=-2.) np_assert_allclose( m.grid, [[0, 2. * 2.**(-0.5), 2.], [0, 4. * 2**(-1. / 3.), 4.]])
def test_transforms(self): poses = """-0.999762 0.000000 -0.021799 1.370500 0.000000 1.000000 0.000000 1.517390 0.021799 0.000000 -0.999762 1.449630 </br> -0.999742 0.004366 -0.022272 1.367762 0.004157 0.999947 0.009404 1.519902 0.022312 0.009309 -0.999707 1.449820 </br> -0.999656 0.004837 -0.025762 1.366492 0.004688 0.999972 0.005846 1.513329 0.025790 0.005723 -0.999651 1.453128 </br> -0.998973 0.011128 -0.043917 1.359956 0.010923 0.999928 0.004896 1.516617 0.043968 0.004411 -0.999023 1.452431 </br> -0.992071 0.047180 -0.116485 1.349768 0.046773 0.998886 0.006226 1.521626 0.116649 0.000729 -0.993173 1.444061 </br> -0.980396 0.089427 -0.175573 1.244141 0.087771 0.995992 0.017191 1.526088 0.176407 0.001444 -0.984316 1.457656 </br> -0.954118 0.130523 -0.269486 1.223184 0.128578 0.991386 0.024936 1.532083 0.270419 -0.010858 -0.962681 1.461294 </br> -0.916433 0.180477 -0.357180 1.186825 0.176638 0.983308 0.043642 1.537364 0.359094 -0.023096 -0.933015 1.480111 """ poses = poses.split("</br>") for i, p in enumerate(poses): p = [ list(map(float, l.split(" "))) for l in p.split("\n") if len(l) != 0 ] p.append([0.0, 0.0, 0.0, 1.0]) pose = np.array(p, dtype=np.float32) if i == 0: p0_inv = np.linalg.inv(pose).copy() poses[i] = p0_inv @ pose # list of np.ndarray poses transforms = poses_to_transforms(poses) for i, t in enumerate(transforms): if i == 0: t_agg = t.copy() else: t_agg = np.matmul(t_agg, t) np_assert_allclose(poses[i], t_agg, rtol=1e-05, atol=1e-6) # np.ndarray poses poses = np.array(poses, dtype=np.float32) transforms = poses_to_transforms(poses) for i, t in enumerate(transforms): if i == 0: t_agg = t.copy() else: t_agg = np.matmul(t_agg, t) np_assert_allclose(poses[i], t_agg, rtol=1e-05, atol=1e-6)