def test_args_kwds_are_used(self): # check that user defined args and kwds make their way into the user # function a = [1., 2.] x = np.linspace(0, 10, 11) y = a[0] + 1 + 2 * a[1] * x par = Parameters() par.add('p0', 1.5) par.add('p1', 2.5) def fun(x, p, *args, **kwds): assert_equal(args, a) return args[0] + p['p0'] + p['p1'] * a[1] * x g = CurveFitter(fun, (x, y), par, fcn_args=a) res = g.fit() assert_almost_equal(values(res.params), [1., 2.]) d = {'a': 1, 'b': 2} def fun(x, p, *args, **kwds): return kwds['a'] + p['p0'] + p['p1'] * kwds['b'] * x g = CurveFitter(fun, (x, y), par, fcn_kws=d) res = g.fit() assert_almost_equal(values(res.params), [1., 2.])
def test_lnpost(self): data = (self.xvals, self.yvals, self.evals) f = _parallel_likelihood_calculator(gauss, data_tuple=data) lnprob = f(self.params) def lnpost(pars, generative, y, e): resid = y - generative resid /= e resid *= resid resid += np.log(2 * np.pi * e**2) return -0.5 * np.sum(resid) g = _parallel_likelihood_calculator(gauss, data_tuple=data, lnpost=lnpost) lnprob2 = g(self.params) assert_equal(lnprob2, lnprob) pars_copy = deepcopy(self.params) f = CurveFitter(gauss, data, self.params) res = f.emcee(steps=10, burn=0, thin=1, seed=1) g = CurveFitter(gauss, data, pars_copy, lnpost=lnpost) res2 = g.emcee(steps=10, burn=0, thin=1, seed=1) assert_almost_equal(values(res.params), values(res2.params))
def test_best_weighted(self): f = CurveFitter(gauss, (self.xvals, self.yvals, self.evals), self.params) res = f.fit() output = list(res.params.valuesdict().values()) assert_almost_equal(output, self.best_weighted, 4) assert_almost_equal(res.chisqr, self.best_weighted_chisqr) uncertainties = [res.params['p%d' % i].stderr for i in range(4)] assert_almost_equal(uncertainties, self.best_weighted_errors, 3)
def test_costfun(self): # test user defined costfun res = self.f.fit('nelder') def costfun(params, generative, y, e): return np.sum((y - generative / e) ** 2) g = CurveFitter(gauss, (self.xdata, self.ydata), self.params, costfun=costfun) res2 = g.fit('nelder') assert_almost_equal(self.pvals(res.params), self.pvals(res2.params))
def test_reflectivity_fit(self): # a smoke test to make sure the reflectivity fit proceeds fitfunc = reflect.ReflectivityFitFunction() transform = reflect.Transform('logY') yt, et = transform.transform(self.qvals361, self.rvals361, self.evals361) kws = {'transform': transform.transform} fitter2 = CurveFitter(fitfunc, (self.qvals361, yt, et), self.params361, fcn_kws=kws, kws={'seed': 2}) fitter2.fit('differential_evolution')
def test_emcee_vs_lm(self): # test mcmc output vs lm if not HAS_EMCEE: return True f = CurveFitter(gauss, self.xvals, self.yvals, self.params, edata=self.evals) np.random.seed(123456) # start = time.time() out = f.emcee(nwalkers=100, steps=300, burn=100, thin=10) # finish = time.time() # print(finish - start) within_sigma(self.best_weighted, out.params) # test mcmc output vs lm, some parameters not bounded self.params['p1'].max = None f = CurveFitter(gauss, self.xvals, self.yvals, self.params, edata=self.evals) np.random.seed(123456) f.emcee(nwalkers=100, steps=300, burn=100, thin=5) within_sigma(self.best_weighted, out.params) # test mcmc output vs lm, some parameters not bounded self.params['p1'].min = None f = CurveFitter(gauss, self.xvals, self.yvals, self.params, edata=self.evals) f.emcee(nwalkers=100, steps=300, burn=100, thin=5) within_sigma(self.best_weighted, out.params)
def test_reflectivity_emcee(self): transform = reflect.Transform('logY') yt, et = transform.transform(self.qvals361, self.rvals361, self.evals361) kws = {'transform': transform.transform} fitfunc = RFF(transform=transform.transform, dq=5.) fitter = CurveFitter(fitfunc, (self.qvals361, yt, et), self.params361, fcn_kws=kws) res = fitter.fit() res_em = fitter.emcee(steps=10)
def test_reflectivity_emcee(self): transform = reflect.Transform('logY') yt, et = transform.transform(self.qvals361, self.rvals361, self.evals361) kws = {'transform': transform.transform} fitfunc = RFF(transform=transform.transform, dq=5.) fitter = CurveFitter(fitfunc, (self.qvals361, yt, et), self.params361, fcn_kws=kws) res = fitter.fit() res_em = fitter.emcee(steps=10, seed=1) assert_allclose(values(res.params), values(res_em.params), rtol=1e-2)
def setUp(self): self.xdata = np.linspace(-4, 4, 100) self.p0 = np.array([0., 1., 0.0, 1.]) self.bounds = [(-1, 1), (0, 2), (-1, 1.), (0.001, 2)] self.params = curvefitter.to_parameters(self.p0 + 0.2, bounds=self.bounds) self.final_params = curvefitter.to_parameters(self.p0, bounds=self.bounds) self.ydata = gauss(self.xdata, self.final_params) self.f = CurveFitter(gauss, (self.xdata, self.ydata), self.params)
def test_smeared_reflectivity_fitter(self): # test smeared reflectivity calculation with values generated from # Motofit (quadrature precsion order = 13) theoretical = np.loadtxt(os.path.join(path, 'smeared_theoretical.txt')) qvals, rvals, dqvals = np.hsplit(theoretical, 3) ''' the order of the quadrature precision used to create these smeared values in Motofit was 13. Do the same here ''' params = curvefitter.to_parameters(self.coefs) fitfunc = RFF(quad_order=13) fitter = CurveFitter(fitfunc, (qvals, rvals), params, fcn_kws={'dqvals': dqvals}) model = fitter.model(params) assert_almost_equal(model, rvals)
def test_emcee_vs_lm(self): # test mcmc output vs lm f = CurveFitter(gauss, (self.xvals, self.yvals, self.evals), self.params) np.random.seed(123456) out = f.emcee(nwalkers=100, steps=500, burn=250, thin=20) within_sigma(self.best_weighted, out.params) # test if the sigmas are similar as well (within 20 %) errs = np.array([out.params[par].stderr for par in out.params]) assert_allclose(errs, self.best_weighted_errors, rtol=0.2) # now try with resampling MC out = f._resample_mc(500, params=self.params, method='leastsq') within_sigma(self.best_weighted, out.params) # test if the sigmas are similar as well (within 20 %) errs = np.array([out.params[par].stderr for par in out.params]) assert_allclose(errs, self.best_weighted_errors, rtol=0.2) # test mcmc output vs lm, some parameters not bounded self.params['p1'].max = np.inf f = CurveFitter(gauss, (self.xvals, self.yvals, self.evals), self.params) np.random.seed(123456) f.emcee(nwalkers=100, steps=300, burn=100, thin=5) within_sigma(self.best_weighted, out.params) # test mcmc output vs lm, some parameters not bounded self.params['p1'].min = -np.inf f = CurveFitter(gauss, (self.xvals, self.yvals, self.evals), self.params) f.emcee(nwalkers=100, steps=300, burn=100, thin=5) within_sigma(self.best_weighted, out.params)
def test_reflectivity_fit(self): # a smoke test to make sure the reflectivity fit proceeds params = curvefitter.to_parameters(self.coefs) params['p1'].value = 1.1 fitfunc = reflect.ReflectivityFitFunction() fitter = CurveFitter(fitfunc, self.qvals, self.rvals, params) fitter.fit() transform = reflect.Transform('logY') yt, et = transform.transform(self.qvals361, self.rvals361, self.evals361) kws = {'transform':transform.transform} fitter2 = CurveFitter(fitfunc, self.qvals361, yt, self.params361, edata=et, fcn_kws=kws) fitter2.fit('differential_evolution')
class TestFitter(unittest.TestCase): def setUp(self): self.xdata = np.linspace(-4, 4, 100) self.p0 = np.array([0., 1., 0.0, 1.]) self.bounds = [(-1, 1), (0, 2), (-1, 1.), (0.001, 2)] self.params = curvefitter.to_parameters(self.p0 + 0.2, bounds=self.bounds) self.final_params = curvefitter.to_parameters(self.p0, bounds=self.bounds) self.ydata = gauss(self.xdata, self.final_params) self.f = CurveFitter(gauss, (self.xdata, self.ydata), self.params) def test_fitting(self): # the simplest test - a really simple gauss curve with perfect data res = self.f.fit() assert_almost_equal(values(res.params), self.p0) assert_almost_equal(res.chisqr, 0) def test_NIST(self): # Run all the NIST standard tests with leastsq for model in Models: try: NIST_runner(model) except Exception: print(model) raise def test_model_returns_function(self): ydata = gauss(self.xdata, self.final_params) model = self.f.model(self.final_params) assert_almost_equal(ydata, model) def test_residuals(self): resid = self.f.residuals(self.final_params) assert_almost_equal(np.sum(resid**2), 0) def test_cost(self): resid = self.f.residuals(self.final_params) assert_almost_equal(0, np.sum(resid**2)) def test_leastsq(self): # test that a custom method can be used with scipy.optimize.minimize res = self.f.fit() assert_almost_equal(values(res.params), self.p0) def test_resid_length(self): # the residuals length should be equal to the data length resid = self.f.residuals(self.params) assert_equal(resid.size, self.f.dataset.y.size) def test_scalar_minimize(self): assert_equal(values(self.params), self.p0 + 0.2) res = self.f.fit(method='differential_evolution') assert_almost_equal(values(res.params), self.p0, 3) def test_holding_parameter(self): # holding parameters means that those parameters shouldn't change # during a fit self.params['p0'].vary = False res = self.f.fit() assert_almost_equal(self.p0[0] + 0.2, self.params['p0'].value) assert_almost_equal(res.params['p0'].value, self.params['p0'].value) def test_fit_returns_MinimizerResult(self): self.params['p0'].vary = False res = self.f.fit() assert_(isinstance(res, MinimizerResult)) def test_costfun(self): # test user defined costfun res = self.f.fit('nelder') def costfun(params, generative, y, e): return np.sum((y - generative / e) ** 2) g = CurveFitter(gauss, (self.xdata, self.ydata), self.params, costfun=costfun) res2 = g.fit('nelder') assert_almost_equal(values(res.params), values(res2.params)) def test_args_kwds_are_used(self): # check that user defined args and kwds make their way into the user # function a = [1., 2.] x = np.linspace(0, 10, 11) y = a[0] + 1 + 2 * a[1] * x par = Parameters() par.add('p0', 1.5) par.add('p1', 2.5) def fun(x, p, *args, **kwds): assert_equal(args, a) return args[0] + p['p0'] + p['p1'] * a[1] * x g = CurveFitter(fun, (x, y), par, fcn_args=a) res = g.fit() assert_almost_equal(values(res.params), [1., 2.]) d = {'a': 1, 'b': 2} def fun(x, p, *args, **kwds): return kwds['a'] + p['p0'] + p['p1'] * kwds['b'] * x g = CurveFitter(fun, (x, y), par, fcn_kws=d) res = g.fit() assert_almost_equal(values(res.params), [1., 2.])
def test_multipledataset_corefinement(self): # test corefinement of three datasets e361 = np.loadtxt(os.path.join(CURDIR, 'e361r.txt')) e365 = np.loadtxt(os.path.join(CURDIR, 'e365r.txt')) e366 = np.loadtxt(os.path.join(CURDIR, 'e366r.txt')) coefs361 = np.zeros(16) coefs361[0] = 2 coefs361[1] = 1. coefs361[2] = 2.07 coefs361[4] = 6.36 coefs361[6] = 2e-5 coefs361[7] = 3 coefs361[8] = 10 coefs361[9] = 3.47 coefs361[11] = 4 coefs361[12] = 200 coefs361[13] = 1 coefs361[15] = 3 coefs365 = np.copy(coefs361) coefs366 = np.copy(coefs361) coefs365[4] = 3.47 coefs366[4] = -0.56 qvals361, rvals361, evals361 = np.hsplit(e361, 3) qvals365, rvals365, evals365 = np.hsplit(e365, 3) qvals366, rvals366, evals366 = np.hsplit(e366, 3) lowlim = np.zeros(16) lowlim[4] = -0.8 hilim = 2 * coefs361 bounds = list(zip(lowlim, hilim)) params361 = curvefitter.to_parameters(coefs361, bounds=bounds, varies=[False] * 16) params365 = curvefitter.to_parameters(coefs365, bounds=bounds, varies=[False] * 16) params366 = curvefitter.to_parameters(coefs366, bounds=bounds, varies=[False] * 16) assert_(len(params361), 16) assert_(len(params365), 16) assert_(len(params366), 16) fit = [1, 6, 8, 12, 13] for p in fit: params361['p%d' % p].vary = True params365['p%d' % p].vary = True params366['p%d' % p].vary = True a = CurveFitter(reflect_fitfunc, qvals361.flatten(), np.log10(rvals361.flatten()), params361) b = CurveFitter(reflect_fitfunc, qvals365.flatten(), np.log10(rvals365.flatten()), params365) c = CurveFitter(reflect_fitfunc, qvals366.flatten(), np.log10(rvals366.flatten()), params366) g = GlobalFitter([a, b, c], constraints=['d1:p8=d0:p8', 'd2:p8=d0:p8', 'd1:p12=d0:p12', 'd2:p12 = d0:p12'], kws={'seed':1}) indiv_chisqr = (a.residuals(a.params) ** 2 + b.residuals(b.params) ** 2 + c.residuals(c.params) ** 2) global_chisqr = g.residuals(g.params) ** 2 assert_almost_equal(indiv_chisqr.sum(), global_chisqr.sum()) # import time res = g.fit('differential_evolution') # start = time.time() # g.emcee(params=res.params, nwalkers=300, steps=500, workers=1) # finish = time.time() # print(finish - start) assert_almost_equal(res.chisqr, 0.774590447535, 4)
def test_multipledataset_corefinement(self): # test corefinement of three datasets e361 = np.loadtxt(os.path.join(CURDIR, 'e361r.txt')) e365 = np.loadtxt(os.path.join(CURDIR, 'e365r.txt')) e366 = np.loadtxt(os.path.join(CURDIR, 'e366r.txt')) coefs361 = np.zeros(16) coefs361[0] = 2 coefs361[1] = 1. coefs361[2] = 2.07 coefs361[4] = 6.36 coefs361[6] = 2e-5 coefs361[7] = 3 coefs361[8] = 10 coefs361[9] = 3.47 coefs361[11] = 4 coefs361[12] = 200 coefs361[13] = 1 coefs361[15] = 3 coefs365 = np.copy(coefs361) coefs366 = np.copy(coefs361) coefs365[4] = 3.47 coefs366[4] = -0.56 qvals361, rvals361, evals361 = np.hsplit(e361, 3) qvals365, rvals365, evals365 = np.hsplit(e365, 3) qvals366, rvals366, evals366 = np.hsplit(e366, 3) lowlim = np.zeros(16) lowlim[4] = -0.8 hilim = 2 * coefs361 bounds = list(zip(lowlim, hilim)) params361 = curvefitter.to_parameters(coefs361, bounds=bounds, varies=[False] * 16) params365 = curvefitter.to_parameters(coefs365, bounds=bounds, varies=[False] * 16) params366 = curvefitter.to_parameters(coefs366, bounds=bounds, varies=[False] * 16) assert_(len(params361), 16) assert_(len(params365), 16) assert_(len(params366), 16) fit = [1, 6, 8, 12, 13] for p in fit: params361['p%d' % p].vary = True params365['p%d' % p].vary = True params366['p%d' % p].vary = True a = CurveFitter(reflect_fitfunc, (qvals361.flatten(), np.log10(rvals361.flatten())), params361) b = CurveFitter(reflect_fitfunc, (qvals365.flatten(), np.log10(rvals365.flatten())), params365) c = CurveFitter(reflect_fitfunc, (qvals366.flatten(), np.log10(rvals366.flatten())), params366) g = GlobalFitter([a, b, c], constraints=['d1:p8=d0:p8', 'd2:p8=d0:p8', 'd1:p12=d0:p12', 'd2:p12 = d0:p12'], kws={'seed': 1}) indiv_chisqr = (a.residuals(a.params) ** 2 + b.residuals(b.params) ** 2 + c.residuals(c.params) ** 2) global_chisqr = g.residuals(g.params) ** 2 assert_almost_equal(indiv_chisqr.sum(), global_chisqr.sum()) # import time res = g.fit('differential_evolution') # start = time.time() # g.emcee(params=res.params, nwalkers=300, steps=500, workers=1) # finish = time.time() # print(finish - start) assert_almost_equal(res.chisqr, 0.774590447535, 4) # updating of constraints should happen during the fit assert_almost_equal(a.params['p12'].value, res.params['p12_d0'].value) assert_almost_equal(b.params['p12'].value, a.params['p12'].value) assert_almost_equal(c.params['p12'].value, a.params['p12'].value) g.params['p8_d0'].value = 10.123456 # shouldn't need to call update constraints within the gfitter, that # happens when you retrieve a specific value assert_almost_equal(g.params['p8_d1'].value, g.params['p8_d0'].value) # However, you have to call model or residuals to redistribute the # parameters to the original fitters g.model() assert_almost_equal(a.params['p8'].value, 10.123456) assert_almost_equal(b.params['p8'].value, 10.123456)
class TestFitter(unittest.TestCase): def setUp(self): self.xdata = np.linspace(-4, 4, 100) self.p0 = np.array([0., 1., 0.0, 1.]) self.bounds = [(-1, 1), (0, 2), (-1, 1.), (0.001, 2)] self.params = curvefitter.to_parameters(self.p0 + 0.2, bounds=self.bounds) self.final_params = curvefitter.to_parameters(self.p0, bounds=self.bounds) self.ydata = gauss(self.xdata, self.final_params) self.f = CurveFitter(gauss, (self.xdata, self.ydata), self.params) def pvals(self, params): return np.asfarray(list(params.valuesdict().values())) def test_fitting(self): # the simplest test - a really simple gauss curve with perfect data res = self.f.fit() assert_almost_equal(self.pvals(res.params), self.p0) assert_almost_equal(res.chisqr, 0) def test_NIST(self): # Run all the NIST standard tests with leastsq for model in Models.keys(): try: NIST_runner(model) except Exception: print(model) raise def test_model_returns_function(self): ydata = gauss(self.xdata, self.final_params) model = self.f.model(self.final_params) assert_almost_equal(ydata, model) def test_residuals(self): resid = self.f.residuals(self.final_params) assert_almost_equal(np.sum(resid**2), 0) def test_cost(self): resid = self.f.residuals(self.final_params) assert_almost_equal(0, np.sum(resid**2)) def test_leastsq(self): # test that a custom method can be used with scipy.optimize.minimize res = self.f.fit() assert_almost_equal(self.pvals(res.params), self.p0) def test_resid_length(self): # the residuals length should be equal to the data length resid = self.f.residuals(self.params) assert_equal(resid.size, self.f.ydata.size) def test_scalar_minimize(self): assert_equal(self.pvals(self.params), self.p0 + 0.2) res = self.f.fit(method='differential_evolution') assert_almost_equal(self.pvals(res.params), self.p0, 3) def test_holding_parameter(self): # holding parameters means that those parameters shouldn't change # during a fit self.params['p0'].vary = False res = self.f.fit() assert_almost_equal(self.p0[0] + 0.2, self.params['p0'].value) def test_fit_returns_MinimizerResult(self): self.params['p0'].vary = False res = self.f.fit() assert_(isinstance(res, MinimizerResult)) def test_costfun(self): # test user defined costfun res = self.f.fit('nelder') def costfun(params, generative, y, e): return np.sum((y - generative / e) ** 2) g = CurveFitter(gauss, (self.xdata, self.ydata), self.params, costfun=costfun) res2 = g.fit('nelder') assert_almost_equal(self.pvals(res.params), self.pvals(res2.params))