def fit(self, method='simplex'): """Initiate a fit of the model using Sherpa. :param method: Method to be used to fit the model (e.g. simplex, levmar, or moncar) """ dummy_data = np.zeros(1) dummy_times = np.arange(1) ui.load_arrays(1, dummy_times, dummy_data) ui.set_method(method) ui.get_method().config.update(sherpa_configs.get(method, {})) ui.load_user_model(CalcModel(self.model, self.fit_logger), 'xijamod') # sets global xijamod ui.add_user_pars('xijamod', self.model.parnames) ui.set_model(1, 'xijamod') calc_stat = CalcStat(self.model, self.fit_logger) ui.load_user_stat('xijastat', calc_stat, lambda x: np.ones_like(x)) ui.set_stat(xijastat) # Set frozen, min, and max attributes for each xijamod parameter for par in self.model.pars: xijamod_par = getattr(xijamod, par.full_name) xijamod_par.val = par.val xijamod_par.frozen = par.frozen xijamod_par.min = par.min xijamod_par.max = par.max ui.fit(1) self.save_snapshot(fit_stat=calc_stat.min_fit_stat, method=method)
def test_est_errors_works_single_parameter(mdlcls, method, getter, clean_ui): """This is issue #1397. Rather than require XSPEC, we create a subclass of the Parameter class to check it works. We are not too concerned with the actual results hence the relatively low tolerance on the numeric checks. """ mdl = mdlcls() ui.load_arrays(1, [1, 2, 3, 4], [4, 2, 1, 3.5]) ui.set_source(mdl) with SherpaVerbosity("ERROR"): ui.fit() # this is where #1397 fails with Const2 method(mdl.con) atol = 1e-4 assert ui.calc_stat() == pytest.approx(0.7651548418626658, abs=atol) results = getter() assert results.parnames == (f"{mdl.name}.con", ) assert results.sigma == pytest.approx(1.0) assert results.parvals == pytest.approx((2.324060647544594, ), abs=atol) # The covar errors are -/+ 1.3704388763054511 # conf -1.3704388763054511 / +1.3704388763054514 # proj -1.3704388762971822 / +1.3704388763135826 # err = 1.3704388763054511 assert results.parmins == pytest.approx((-err, ), abs=atol) assert results.parmaxes == pytest.approx((err, ), abs=atol)
def fit_pix_values(t_ccd, esec, id=1): logger = logging.getLogger("sherpa") logger.setLevel(logging.WARN) data_id = id ui.clean() ui.set_method('simplex') ui.load_user_model(dark_scale_model, 'model') ui.add_user_pars('model', ['scale', 'dark_t_ref']) ui.set_model(data_id, 'model') ui.load_arrays( data_id, np.array(t_ccd), np.array(esec), ) ui.set_staterror(data_id, 30 * np.ones(len(t_ccd))) model.scale.val = 0.588 model.scale.min = 0.3 model.scale.max = 1.0 model.dark_t_ref.val = 500 ui.freeze(model.scale) # If more than 5 degrees in the temperature range, # thaw and fit for model.scale. Else just use/return # the fit of dark_t_ref if np.max(t_ccd) - np.min(t_ccd) > 2: # Fit first for dark_t_ref ui.fit(data_id) ui.thaw(model.scale) ui.fit(data_id) return ui.get_fit_results(), ui.get_model(data_id)
def test_show_conf_basic(clean_ui): """Set up a very basic data/model/fit""" ui.load_arrays(1, [1, 2, 4], [3, 5, 5]) ui.set_source(ui.scale1d.mdl) ui.fit() ui.conf() out = StringIO() ui.show_conf(outfile=out) got = out.getvalue().split('\n') assert len(got) == 12 assert got[0] == "Confidence:Dataset = 1" assert got[1] == "Confidence Method = confidence" assert got[2] == "Iterative Fit Method = None" assert got[3] == "Fitting Method = levmar" assert got[4] == "Statistic = chi2gehrels" assert got[5] == "confidence 1-sigma (68.2689%) bounds:" assert got[6] == " Param Best-Fit Lower Bound Upper Bound" assert got[7] == " ----- -------- ----------- -----------" assert got[8] == " mdl.c0 4.19798 -1.85955 1.85955" assert got[9] == "" assert got[10] == "" assert got[11] == ""
def fit_sbp(): ui.set_model(sbp) ui.thaw(sbp) ui.freeze(sbp.x_r) ui.freeze(sbp.gamma1) ui.fit()
def fit(self): dummy_data = np.zeros(1) dummy_times = np.arange(1) ui.load_arrays(1, dummy_times, dummy_data) ui.set_method(self.method) ui.get_method().config.update(sherpa_configs.get(self.method, {})) ui.load_user_model(CalcModel(self.model), 'xijamod') # sets global xijamod ui.add_user_pars('xijamod', self.model.parnames) ui.set_model(1, 'xijamod') calc_stat = CalcStat(self.model, self.child_pipe) ui.load_user_stat('xijastat', calc_stat, lambda x: np.ones_like(x)) ui.set_stat(xijastat) # Set frozen, min, and max attributes for each xijamod parameter for par in self.model.pars: xijamod_par = getattr(xijamod, par.full_name) xijamod_par.val = par.val xijamod_par.frozen = par.frozen xijamod_par.min = par.min xijamod_par.max = par.max if any(not par.frozen for par in self.model.pars): try: ui.fit(1) calc_stat.message['status'] = 'finished' logging.debug('Fit finished normally') except FitTerminated as err: calc_stat.message['status'] = 'terminated' logging.debug('Got FitTerminated exception {}'.format(err)) self.child_pipe.send(calc_stat.message)
def test_show_all_basic(clean_ui): """Set up a very basic data/model/fit""" ui.load_arrays(1, [1, 2, 4], [3, 5, 5]) ui.set_source(ui.scale1d.mdl) ui.fit() ui.conf() ui.proj() ui.covar() def get(value): out = StringIO() getattr(ui, f"show_{value}")(outfile=out) ans = out.getvalue() assert len(ans) > 1 # trim the trailing "\n" return ans[:-1] # All we are really checking is that the show_all output is the # comppsite of the following. We are not checking that the # actual output makes sense for any command. # expected = get("data") + get("model") + get("fit") + get("conf") + \ get("proj") + get("covar") got = get("all") assert expected == got
def test_err_estimate_single_parameter(strings, idval, otherids, clean_ui): """Ensure we can fti a single parameter with conf/proj/covar. Since this uses the same logic we only test the conf routine; ideally we'd use all but that's harder to test. We use the same model as test_err_estimate_multi_ids but here we only want to evaluate the error for the mdl.c1 component. The fit and error analysis should be the same however the ordering is done. """ # This is a bit ugly if strings: idval = str(idval) if type(otherids) == tuple: otherids = (str(otherids[0]), str(otherids[1])) else: otherids = [str(otherids[0]), str(otherids[1])] datasets = tuple([idval] + list(otherids)) setup_err_estimate_multi_ids(strings=strings) ui.fit(idval, *otherids) # pick an odd ordering just to check we pick it up ui.conf(datasets[0], mdl.c1, datasets[1], datasets[2]) res = ui.get_conf_results() assert res.datasets == datasets assert res.parnames == ("mdl.c1", ) assert res.parmins == pytest.approx([ERR_EST_C1_MIN]) assert res.parmaxes == pytest.approx([ERR_EST_C1_MAX])
def fit(self): dummy_data = np.zeros(1) dummy_times = np.arange(1) ui.load_arrays(1, dummy_times, dummy_data) ui.set_method(self.method) ui.get_method().config.update(sherpa_configs.get(self.method, {})) ui.load_user_model(CalcModel(self.model), 'xijamod') # sets global xijamod ui.add_user_pars('xijamod', self.model.parnames) ui.set_model(1, 'xijamod') calc_stat = CalcStat(self.model, self.child_pipe, self.maxiter) ui.load_user_stat('xijastat', calc_stat, lambda x: np.ones_like(x)) ui.set_stat(xijastat) # Set frozen, min, and max attributes for each xijamod parameter for par in self.model.pars: xijamod_par = getattr(xijamod, par.full_name) xijamod_par.val = par.val xijamod_par.frozen = par.frozen xijamod_par.min = par.min xijamod_par.max = par.max if any(not par.frozen for par in self.model.pars): try: ui.fit(1) calc_stat.message['status'] = 'finished' fit_logger.info('Fit finished normally') except FitTerminated as err: calc_stat.message['status'] = 'terminated' fit_logger.warning('Got FitTerminated exception {}'.format(err)) self.child_pipe.send(calc_stat.message)
def test_err_estimate_model(strings, idval, otherids, clean_ui): """Ensure we can use model with conf/proj/covar. This is test_err_estimate_multi_ids but - added an extra model to each source (that evaluates to 0) - we include the model expression in the call. The fit and error analysis should be the same however the ordering is done. """ # This is a bit ugly if strings: idval = str(idval) if type(otherids) == tuple: otherids = (str(otherids[0]), str(otherids[1])) else: otherids = [str(otherids[0]), str(otherids[1])] datasets = tuple([idval] + list(otherids)) setup_err_estimate_multi_ids(strings=strings) zero = ui.create_model_component("scale1d", "zero") zero.c0 = 0 zero.c0.freeze() for id in datasets: # In this case we have # orig == mdl # but let's be explicit in case the code changes # orig = ui.get_source(id) ui.set_source(id, orig + zero) ui.fit(idval, *otherids) res = ui.get_fit_results() assert res.datasets == datasets assert res.numpoints == 10 assert res.statval == pytest.approx(3.379367979541458) assert ui.calc_stat() == pytest.approx(4255.615602052843) assert mdl.c0.val == pytest.approx(46.046607302070015) assert mdl.c1.val == pytest.approx(-1.9783953989993386) # I wanted to have zero.co thawed at this stage, but then we can not # use the ERR_EST_C0/1_xxx values as the fit has changed (and mdl.c0 # and zero.c0 are degenerate to boot). # ui.conf(*datasets, mdl) res = ui.get_conf_results() assert res.datasets == datasets assert res.parnames == ("mdl.c0", "mdl.c1") assert res.parmins == pytest.approx([ERR_EST_C0_MIN, ERR_EST_C1_MIN]) assert res.parmaxes == pytest.approx([ERR_EST_C0_MAX, ERR_EST_C1_MAX])
def test_covar_as_argument(self): for stat in self.right_stats - {'wstat'}: ui.set_stat(stat) ui.fit() matrix = [[0.00064075, 0.01122127], [0.01122127, 0.20153251]] niter = 10 stat, accept, params = ui.get_draws(niter=niter, covar_matrix=matrix) self.assertEqual(niter + 1, stat.size) self.assertEqual(niter + 1, accept.size) self.assertEqual((2, niter + 1), params.shape) self.assertTrue(numpy.any(accept))
def test_covar_as_none(self): for stat in self.right_stats - {'wstat'}: ui.set_stat(stat) ui.fit() ui.covar() niter = 10 stat, accept, params = ui.get_draws(niter=niter) self.assertEqual(niter + 1, stat.size) self.assertEqual(niter + 1, accept.size) self.assertEqual((2, niter + 1), params.shape) self.assertTrue(numpy.any(accept))
def setUp(clean_ui, hide_logging): x = [-13, -5, -3, 2, 7, 12] y = [102.3, 16.7, -0.6, -6.7, -9.9, 33.2] dy = np.ones(6) * 5 ui.load_arrays(1, x, y, dy) ui.set_source(ui.polynom1d.poly) poly.c1.thaw() poly.c2.thaw() ui.int_proj(poly.c0) ui.fit()
def test_covar_as_none(self): for stat in self.right_stats - {'wstat'}: ui.set_stat(stat) ui.fit() ui.covar() niter = 10 stat, accept, params = ui.get_draws(niter=niter) self.assertEqual(niter+1, stat.size) self.assertEqual(niter+1, accept.size) self.assertEqual((2, niter+1), params.shape) self.assertTrue(numpy.any(accept))
def test_covar_as_argument(self): for stat in self.right_stats - {'wstat'}: ui.set_stat(stat) ui.fit() matrix = [[0.00064075, 0.01122127], [0.01122127, 0.20153251]] niter = 10 stat, accept, params = ui.get_draws(niter=niter, covar_matrix=matrix) self.assertEqual(niter+1, stat.size) self.assertEqual(niter+1, accept.size) self.assertEqual((2, niter+1), params.shape) self.assertTrue(numpy.any(accept))
def test_user_model1d_fit(): """Check can use in a fit.""" mname = "test_model" ui.load_user_model(um_line, mname) ui.add_user_pars(mname, ["slope", "intercept"], parvals = [1.0, 1.0]) mdl = ui.get_model_component(mname) x = numpy.asarray([-2.4, 2.3, 5.4, 8.7, 12.3]) # Set up the data to be scattered around y = -0.2 x + 2.8 # Pick the deltas so that they sum to 0 (except for central # point) # slope = -0.2 intercept = 2.8 dy = numpy.asarray([0.1, -0.2, 0.14, -0.1, 0.2]) ydata = x * slope + intercept + dy ui.load_arrays(1, x, ydata) ui.set_source(mname) ui.ignore(5.0, 6.0) # drop the central bin ui.set_stat('leastsq') ui.set_method('simplex') ui.fit() fres = ui.get_fit_results() assert fres.succeeded assert fres.parnames == ('test_model.slope', 'test_model.intercept') assert fres.numpoints == 4 assert fres.dof == 2 # Tolerance has been adjusted to get the tests to pass on my # machine. It's really just to check that the values have chanegd # from their default values. # assert fres.parvals[0] == pytest.approx(slope, abs=0.01) assert fres.parvals[1] == pytest.approx(intercept, abs=0.05) # Thse should be the same values, so no need to use pytest.approx # (unless there's some internal translation between types done # somewhere?). # assert mdl.slope.val == fres.parvals[0] assert mdl.intercept.val == fres.parvals[1]
def test_covar_as_none(stat, clean_ui, setup_covar): ui.set_stat(stat) ui.fit() ui.covar() niter = 10 stat, accept, params = ui.get_draws(niter=niter) n = niter + 1 assert stat.size == n assert accept.size == n assert params.shape == (2, n) assert np.any(accept)
def test_covar_as_argument(stat, clean_ui, setup_covar): ui.set_stat(stat) ui.fit() matrix = [[0.00064075, 0.01122127], [0.01122127, 0.20153251]] niter = 10 stat, accept, params = ui.get_draws(niter=niter, covar_matrix=matrix) n = niter + 1 assert stat.size == n assert accept.size == n assert params.shape == (2, n) assert np.any(accept)
def tst_ui(self, thaw_c1): ui.load_arrays(1, self._x, self._y, self._e) ui.set_source(1, ui.polynom1d.mdl) if thaw_c1: ui.thaw(mdl.c1) ui.thaw(mdl.c2) mdl.c2 = 1 ui.fit() if not thaw_c1: ui.thaw(mdl.c1) ui.fit() ui.conf() result = ui.get_conf_results() self.cmp_results(result)
def _fit_poly(fit_data, evt_times, degree, data_id=0): """ Given event data transformed into Y or Z angle positions, and a degree of the desired fit polynomial, fit a polynomial to the data. :param fit_data: event y or z angle position data :param evt_times: times of event/fit_data :param degree: degree of polynomial to use for the fit model :param data_id: sherpa dataset id to use for the fit :returns: (sherpa model plot, sherpa model) """ # Set initial value for fit data position error init_error = 1 ui.clean() ui.load_arrays(data_id, evt_times - evt_times[0], fit_data, np.zeros_like(fit_data) + init_error) v2("Fitting a line to the data to get reduced stat errors") # First just fit a line to get reduced errors on this set ui.polynom1d.line ui.set_model(data_id, 'line') ui.thaw('line.c1') ui.fit(data_id) fit = ui.get_fit_results() calc_error = init_error * np.sqrt(fit.rstat) ui.set_staterror(data_id, calc_error) # Then fit the specified model v2("Fitting a polynomial of degree {} to the data".format(degree)) ui.polynom1d.fitpoly ui.freeze('fitpoly') # Thaw the coefficients requested by the degree of the desired polynomial ui.thaw('fitpoly.c0') fitpoly.c0.val = 0 for deg in range(1, 1 + degree): ui.thaw("fitpoly.c{}".format(deg)) ui.set_model(data_id, 'fitpoly') ui.fit(data_id) # Let's screw up Y on purpose if data_id == 0: fitpoly.c0.val = 0 fitpoly.c1.val = 7.5e-05 fitpoly.c2.val = -1.0e-09 fitpoly.c3.val = 0 fitpoly.c4.val = 0 mp = ui.get_model_plot(data_id) model = ui.get_model(data_id) return mp, model
def setUp(self): # defensive programming (one of the tests has been seen to fail # when the whole test suite is run without this) ui.clean() self._old_logger_level = logger.getEffectiveLevel() logger.setLevel(logging.ERROR) x = [-13, -5, -3, 2, 7, 12] y = [102.3, 16.7, -0.6, -6.7, -9.9, 33.2] dy = np.ones(6) * 5 ui.load_arrays(1, x, y, dy) ui.set_source(ui.polynom1d.poly) poly.c1.thaw() poly.c2.thaw() ui.int_proj(poly.c0) ui.fit()
def test_err_estimate_multi_ids(strings, idval, otherids, clean_ui): """Ensure we can use multiple ids with conf/proj/covar. Since this uses the same logic we only test the conf routine; ideally we'd use all but that's harder to test. The fit and error analysis should be the same however the ordering is done. """ # This is a bit ugly if strings: idval = str(idval) if type(otherids) == tuple: otherids = (str(otherids[0]), str(otherids[1])) else: otherids = [str(otherids[0]), str(otherids[1])] datasets = tuple([idval] + list(otherids)) setup_err_estimate_multi_ids(strings=strings) ui.fit(idval, *otherids) # The "reduced statistic" is ~0.42 for the fit. # res = ui.get_fit_results() assert res.datasets == datasets assert res.numpoints == 10 # sum of datasets 1, 2, 3 assert res.statval == pytest.approx(3.379367979541458) # since there's a model assigned to dataset not-used the # overall statistic is not the same as res.statval. # assert ui.calc_stat() == pytest.approx(4255.615602052843) assert mdl.c0.val == pytest.approx(46.046607302070015) assert mdl.c1.val == pytest.approx(-1.9783953989993386) ui.conf(*datasets) res = ui.get_conf_results() assert res.datasets == datasets assert res.parnames == ("mdl.c0", "mdl.c1") assert res.parmins == pytest.approx([ERR_EST_C0_MIN, ERR_EST_C1_MIN]) assert res.parmaxes == pytest.approx([ERR_EST_C0_MAX, ERR_EST_C1_MAX])
def ccd_bias(bias): """ Calculate the mean and width of a gaussian fit to the bias histogram. `bias` is a numpy array. """ import sherpa.ui as ui from numpy import histogram, arange values, bins = histogram(bias, bins=arange(bias.min(),bias.max()+1)) ui.load_arrays(1, bins[:-1],values) ui.set_model(ui.gauss1d.g1) g1.pos = bias.mean() g1.fwhm = bias.std() ui.fit() return g1
def fit_model( model, comm=None, method='simplex', config=None, nofit=None, freeze_pars=freeze_pars, thaw_pars=[], ): dummy_data = np.zeros(1) dummy_times = np.arange(1) ui.load_arrays(1, dummy_times, dummy_data) ui.set_method(method) ui.get_method().config.update(config or sherpa_configs.get(method, {})) ui.load_user_model(CalcModel(model, comm), 'xijamod') ui.add_user_pars('xijamod', model.parnames) ui.set_model(1, 'xijamod') fit_parnames = set() for parname, parval in zip(model.parnames, model.parvals): getattr(xijamod, parname).val = parval fit_parnames.add(parname) if any([re.match(x + '$', parname) for x in freeze_pars]): fit_logger.info('Freezing ' + parname) ui.freeze(getattr(xijamod, parname)) fit_parnames.remove(parname) if any([re.match(x + '$', parname) for x in thaw_pars]): fit_logger.info('Thawing ' + parname) ui.thaw(getattr(xijamod, parname)) fit_parnames.add(parname) if 'tau' in parname: getattr(xijamod, parname).min = 0.1 calc_stat = CalcStat(model, comm) ui.load_user_stat('xijastat', calc_stat, lambda x: np.ones_like(x)) ui.set_stat(xijastat) if fit_parnames and not nofit: ui.fit(1) else: model.calc()
def tst_ui(thaw_c1, setUp, clean_ui): data, mdl = setUp ui.load_arrays(1, data.x, data.y, data.staterror) ui.set_source(1, ui.polynom1d.mdl) if thaw_c1: ui.thaw(mdl.c1) ui.thaw(mdl.c2) mdl.c2 = 1 ui.fit() if not thaw_c1: ui.thaw(mdl.c1) ui.fit() ui.conf() result = ui.get_conf_results() cmp_results(result)
def fit_model(model, comm=None, method='simplex', config=None, nofit=None, freeze_pars=freeze_pars, thaw_pars=[], ): dummy_data = np.zeros(1) dummy_times = np.arange(1) ui.load_arrays(1, dummy_times, dummy_data) ui.set_method(method) ui.get_method().config.update(config or sherpa_configs.get(method, {})) ui.load_user_model(CalcModel(model, comm), 'xijamod') ui.add_user_pars('xijamod', model.parnames) ui.set_model(1, 'xijamod') fit_parnames = set() for parname, parval in zip(model.parnames, model.parvals): getattr(xijamod, parname).val = parval fit_parnames.add(parname) if any([re.match(x + '$', parname) for x in freeze_pars]): fit_logger.info('Freezing ' + parname) ui.freeze(getattr(xijamod, parname)) fit_parnames.remove(parname) if any([re.match(x + '$', parname) for x in thaw_pars]): fit_logger.info('Thawing ' + parname) ui.thaw(getattr(xijamod, parname)) fit_parnames.add(parname) if 'tau' in parname: getattr(xijamod, parname).min = 0.1 calc_stat = CalcStat(model, comm) ui.load_user_stat('xijastat', calc_stat, lambda x: np.ones_like(x)) ui.set_stat(xijastat) if fit_parnames and not nofit: ui.fit(1) else: model.calc()
def test_electron_models(): """ test import """ from ..sherpa_models import InverseCompton, Synchrotron, Bremsstrahlung for modelclass in [InverseCompton, Synchrotron, Bremsstrahlung]: model = modelclass() model.ampl = 1e-8 model.index = 2.1 print(model) # point calc output = model.calc([p.val for p in model.pars], energies) # test as well ECPL model.cutoff = 100 # integrated output = model.calc([p.val for p in model.pars], elo, xhi=ehi) if modelclass is InverseCompton: # Perform a fit to fake data ui.load_arrays(1, energies, test_spec_points, test_err_points) ui.set_model(model) ui.guess() ui.fit() # add FIR and NIR components and test verbose model.uNIR.set(1.0) model.uFIR.set(1.0) model.verbose.set(1) # test with integrated data ui.load_arrays(1, elo, ehi, test_spec_int, test_err_int, ui.Data1DInt) ui.set_model(model) ui.guess() ui.fit()
def fit_pix_values(t_ccd, esec, id=1): logger = logging.getLogger("sherpa") logger.setLevel(logging.WARN) data_id = id ui.clean() ui.set_method("simplex") ui.load_user_model(dark_scale_model, "model") ui.add_user_pars("model", ["scale", "dark_t_ref"]) ui.set_model(data_id, "model") ui.load_arrays(data_id, np.array(t_ccd), np.array(esec), 0.1 * np.ones(len(t_ccd))) model.scale.val = 0.70 model.dark_t_ref.val = 500 ui.freeze(model.scale) # If more than 5 degrees in the temperature range, # thaw and fit for model.scale. Else just use/return # the fit of dark_t_ref ui.fit(data_id) ui.thaw(model.scale) ui.fit(data_id) return ui.get_fit_results(), ui.get_model(data_id)
def _fit_poly(fit_data, evt_times, degree, data_id=0): """ Given event data transformed into Y or Z angle positions, and a degree of the desired fit polynomial, fit a polynomial to the data. :param fit_data: event y or z angle position data :param evt_times: times of event/fit_data :param degree: degree of polynomial to use for the fit model :param data_id: sherpa dataset id to use for the fit :returns: (sherpa model plot, sherpa model) """ # Set initial value for fit data position error init_error = 1 ui.clean() ui.load_arrays(data_id, evt_times - evt_times[0], fit_data, np.zeros_like(fit_data) + init_error) v2("Fitting a line to the data to get reduced stat errors") # First just fit a line to get reduced errors on this set ui.polynom1d.line ui.set_model(data_id, 'line') ui.thaw('line.c1') ui.fit(data_id) fit = ui.get_fit_results() calc_error = init_error * np.sqrt(fit.rstat) ui.set_staterror(data_id, calc_error) # Then fit the specified model v2("Fitting a polynomial of degree {} to the data".format(degree)) ui.polynom1d.fitpoly ui.freeze('fitpoly') # Thaw the coefficients requested by the degree of the desired polynomial ui.thaw('fitpoly.c0') fitpoly.c0.val = 0 for deg in range(1, 1 + degree): ui.thaw("fitpoly.c{}".format(deg)) ui.set_model(data_id, 'fitpoly') ui.fit(data_id) mp = ui.get_model_plot(data_id) model = ui.get_model(data_id) return mp, model
def mwl_fit_high_level(): """Use high-level Sherpa API. High-level = session and convenience functions Example: http://cxc.harvard.edu/sherpa/threads/simultaneous/ Example: http://python4astronomers.github.io/fitting/spectrum.html """ import sherpa.ui as ui fermi_data = FermiData() ui.load_arrays(fermi_data.name, fermi_data.x, fermi_data.y, fermi_data.staterror) ui.load_user_stat('fermi_stat', FermiStat.calc_stat, FermiStat.calc_staterror) # TODO: is there a good way to get the stat?? # ui.get_stat('fermi_stat') # fermi_stat = ui._session._get_stat_by_name('fermi_stat') ui.set_stat(fermi_stat) # IPython.embed() iact_data = IACTData() ui.load_arrays(iact_data.name, iact_data.x, iact_data.y, iact_data.staterror) spec_model = ui.logparabola.spec_model spec_model.c1 = 0.5 spec_model.c2 = 0.2 spec_model.ampl = 5e-11 ui.set_source(fermi_data.name, spec_model) ui.set_source(iact_data.name, spec_model) ui.notice(lo=1e-3, hi=None) # IPython.embed() ui.fit() return dict(results=ui.get_fit_results(), model=spec_model)
def mwl_fit_high_level(): """Use high-level Sherpa API. High-level = session and convenience functions Example: http://cxc.harvard.edu/sherpa/threads/simultaneous/ Example: http://python4astronomers.github.io/fitting/spectrum.html """ import sherpa.ui as ui fermi_data = FermiData() ui.load_arrays(fermi_data.name, fermi_data.x, fermi_data.y, fermi_data.staterror) ui.load_user_stat('fermi_stat', FermiStat.calc_stat, FermiStat.calc_staterror) # TODO: is there a good way to get the stat?? # ui.get_stat('fermi_stat') # fermi_stat = ui._session._get_stat_by_name('fermi_stat') ui.set_stat(fermi_stat) # IPython.embed() iact_data = IACTData() ui.load_arrays(iact_data.name, iact_data.x, iact_data.y, iact_data.staterror) spec_model = ui.logparabola.spec_model spec_model.c1 = 0.5 spec_model.c2 = 0.2 spec_model.ampl = 5e-11 ui.set_source(fermi_data.name, spec_model) ui.set_source(iact_data.name, spec_model) ui.notice(lo=1e-3, hi=None) # IPython.embed() ui.fit() return Bunch(results=ui.get_fit_results(), model=spec_model)
def fit_gauss_sbp(): g1 = ui.gauss1d.g1 ui.set_model(sbp + g1) ui.set_method('simplex') g1.fwhm = 5.0 g1.pos = 7.0 g1.ampl = 30000. ui.freeze(sbp.gamma1) ui.freeze(sbp.gamma2) ui.freeze(sbp.x_b) ui.freeze(sbp.x_r) ui.freeze(g1.fwhm) ui.freeze(g1.pos) ui.thaw(g1.ampl) ui.fit() ui.thaw(g1.fwhm) ui.thaw(g1.pos) ui.fit() ui.thaw(sbp) ui.freeze(sbp.x_r) ui.fit()
# coding: utf-8 import sherpa.ui as ui from sherpa.models.template import KNNInterpolator ui.load_data("custom_interp", "load_template_interpolator-bb_data.dat") ui.load_template_interpolator('knn', KNNInterpolator, k=2, order=1) ui.load_template_model('bb1', "bb_index.dat", template_interpolator_name='knn') ui.set_model("custom_interp", "bb1") ui.freeze("bb1.dummy") ui.fit("custom_interp")
class AstropyToSherpa(object): def __init__(self, model): self.model = model def __call__(self, pars, x): self.model.parameters[:] = pars return self.model(x) ap_model = (models.Gaussian1D(amplitude=1.2, mean=0.9, stddev=0.5) + models.Gaussian1D(amplitude=2.0, mean=-0.9, stddev=0.75)) err = 0.02 x = np.arange(-3, 3, .1) y = ap_model(x) + err * np.random.uniform(size=len(x)) sh_model = AstropyToSherpa(ap_model) ui.load_arrays(1, x, y, err * np.ones_like(x)) ui.load_user_model(sh_model, 'sherpa_model') ui.add_user_pars('sherpa_model', ap_model.param_names, ap_model.parameters) ui.set_model(1, 'sherpa_model') ui.fit(1) ui.plot_fit(1) print() print('Params from astropy model: {}'.format(ap_model.parameters)) plt.show()
def run_fits(obsids, ax, user_pars=None, fixed_pars=None, guess_pars=None, label='model', per_obs_dir='per_obs_nfits', outdir=None, redo=False): if len(obsids) == 0: print "No obsids, nothing to fit" return None if user_pars is None: user_pars = USER_PARS if not os.path.exists(per_obs_dir): os.makedirs(per_obs_dir) obsfits = [] for obsid in obsids: outdir = os.path.join(per_obs_dir, 'obs{:05d}'.format(obsid)) if not os.path.exists(outdir): os.makedirs(outdir) model_file = os.path.join(outdir, '{}.pkl'.format(label)) if os.path.exists(model_file) and not redo: #logger.warn('Using previous fit found in %s' % model_file) print model_file mod_pick = open(model_file, 'r') modelfit = cPickle.load(mod_pick) mod_pick.close() obsfits.append(modelfit) continue modelfit = {'label': obsid} ui.clean() data_id = 0 obsdir = "%s/obs%05d" % (DATADIR, obsid) tf = open(os.path.join(obsdir, 'tilt.pkl'), 'r') tilt = cPickle.load(tf) tf.close() pf = open(os.path.join(obsdir, 'pos.pkl'), 'r') pos = cPickle.load(pf) pf.close() pos_data = pos[ax] point_error = 5 pos_data_mean = np.mean(pos_data) ui.set_method('simplex') # Fit a line to get more reasonable errors init_staterror = np.zeros(len(pos_data)) + point_error ui.load_arrays(data_id, pos['time'] - pos['time'][0], pos_data - np.mean(pos_data), init_staterror) ui.polynom1d.ypoly ui.set_model(data_id, 'ypoly') ui.thaw(ypoly.c0, ypoly.c1) ui.fit(data_id) fit = ui.get_fit_results() calc_staterror = init_staterror * np.sqrt(fit.rstat) ui.set_staterror(data_id, calc_staterror) # Confirm those errors ui.fit(data_id) fit = ui.get_fit_results() if (abs(fit.rstat - 1) > .2): raise ValueError('Reduced statistic not close to 1 for error calc') # Load up data to do the real model fit fit_times = pos['time'] tm_func = tilt_model(tilt, fit_times, user_pars=user_pars) ui.get_data(data_id).name = str(obsid) ui.load_user_model(tm_func, 'tiltm%d' % data_id) ui.add_user_pars('tiltm%d' % data_id, user_pars) ui.set_method('simplex') ui.set_model(data_id, 'tiltm%d' % (data_id)) ui.set_par('tiltm%d.diam' % data_id, 0) if fixed_pars is not None and ax in fixed_pars: for par in fixed_pars[ax]: ui.set_par('tiltm{}.{}'.format(0, par), fixed_pars[ax][par]) ui.freeze('tiltm{}.{}'.format(0, par)) if guess_pars is not None and ax in guess_pars: for par in guess_pars[ax]: ui.set_par('tiltm{}.{}'.format(0, par), guess_pars[ax][par]) ui.show_all() # Fit the tilt model ui.fit(data_id) fitres = ui.get_fit_results() ui.confidence(data_id) myconf = ui.get_confidence_results() # save_fits(ax=ax, fit=fitres, conf=myconf, outdir=outdir) # plot_fits(ids,outdir=os.path.join(outdir,'fit_plots')) axmod = dict(fit=fitres, conf=myconf) for idx, modpar in enumerate(myconf.parnames): par = modpar.lstrip('tiltm0.') axmod[par] = ui.get_par('tiltm0.%s' % par).val axmod["{}_parmax".format(par)] = myconf.parmaxes[idx] axmod["{}_parmin".format(par)] = myconf.parmins[idx] modelfit[ax] = axmod mod_pick = open(model_file, 'w') cPickle.dump(modelfit, mod_pick) mod_pick.close() obsfits.append(modelfit) plot_fits([dict(obsid=obsid, data_id=data_id, ax=ax)], posdir=obsdir, outdir=outdir) return obsfits
def run_fits(obsids, ax, user_pars=None, fixed_pars=None, guess_pars=None, label='model', per_obs_dir='per_obs_nfits', outdir=None, redo=False): if len(obsids) == 0: print "No obsids, nothing to fit" return None if user_pars is None: user_pars = USER_PARS if not os.path.exists(per_obs_dir): os.makedirs(per_obs_dir) obsfits = [] for obsid in obsids: outdir = os.path.join(per_obs_dir, 'obs{:05d}'.format(obsid)) if not os.path.exists(outdir): os.makedirs(outdir) model_file = os.path.join(outdir, '{}.pkl'.format(label)) if os.path.exists(model_file) and not redo: #logger.warn('Using previous fit found in %s' % model_file) print model_file mod_pick = open(model_file, 'r') modelfit = cPickle.load( mod_pick ) mod_pick.close() obsfits.append(modelfit) continue modelfit = {'label': obsid} ui.clean() data_id = 0 obsdir = "%s/obs%05d" % (DATADIR, obsid) tf = open(os.path.join(obsdir,'tilt.pkl'), 'r') tilt = cPickle.load(tf) tf.close() pf = open(os.path.join(obsdir, 'pos.pkl'), 'r') pos = cPickle.load(pf) pf.close() pos_data = pos[ax] point_error = 5 pos_data_mean = np.mean(pos_data) ui.set_method('simplex') # Fit a line to get more reasonable errors init_staterror = np.zeros(len(pos_data))+point_error ui.load_arrays(data_id, pos['time']-pos['time'][0], pos_data-np.mean(pos_data), init_staterror) ui.polynom1d.ypoly ui.set_model(data_id, 'ypoly') ui.thaw(ypoly.c0, ypoly.c1) ui.fit(data_id) fit = ui.get_fit_results() calc_staterror = init_staterror * np.sqrt(fit.rstat) ui.set_staterror(data_id, calc_staterror) # Confirm those errors ui.fit(data_id) fit = ui.get_fit_results() if ( abs(fit.rstat-1) > .2): raise ValueError('Reduced statistic not close to 1 for error calc') # Load up data to do the real model fit fit_times = pos['time'] tm_func = tilt_model(tilt, fit_times, user_pars=user_pars) ui.get_data(data_id).name = str(obsid) ui.load_user_model(tm_func, 'tiltm%d' % data_id) ui.add_user_pars('tiltm%d' % data_id, user_pars) ui.set_method('simplex') ui.set_model(data_id, 'tiltm%d' % (data_id)) ui.set_par('tiltm%d.diam' % data_id, 0) if fixed_pars is not None and ax in fixed_pars: for par in fixed_pars[ax]: ui.set_par('tiltm{}.{}'.format(0, par), fixed_pars[ax][par]) ui.freeze('tiltm{}.{}'.format(0, par)) if guess_pars is not None and ax in guess_pars: for par in guess_pars[ax]: ui.set_par('tiltm{}.{}'.format(0, par), guess_pars[ax][par]) ui.show_all() # Fit the tilt model ui.fit(data_id) fitres = ui.get_fit_results() ui.confidence(data_id) myconf = ui.get_confidence_results() # save_fits(ax=ax, fit=fitres, conf=myconf, outdir=outdir) # plot_fits(ids,outdir=os.path.join(outdir,'fit_plots')) axmod = dict(fit=fitres, conf=myconf) for idx, modpar in enumerate(myconf.parnames): par = modpar.lstrip('tiltm0.') axmod[par] = ui.get_par('tiltm0.%s' % par).val axmod["{}_parmax".format(par)] = myconf.parmaxes[idx] axmod["{}_parmin".format(par)] = myconf.parmins[idx] modelfit[ax] = axmod mod_pick = open(model_file, 'w') cPickle.dump( modelfit, mod_pick) mod_pick.close() obsfits.append(modelfit) plot_fits([dict(obsid=obsid, data_id=data_id, ax=ax)], posdir=obsdir, outdir=outdir) return obsfits
data_id = fail_types[ftype] ui.set_method('simplex') ui.load_arrays(data_id, rates['time'], rates['rate']) ui.set_staterror(data_id, rates['err']) ftype_poly = ui.polynom1d(ftype) ui.set_model(data_id, ftype_poly) ui.thaw(ftype_poly.c0) ui.thaw(ftype_poly.c1) ui.notice(DateTime(trend_date_start).frac_year) ui.fit(data_id) ui.notice() myfit = ui.get_fit_results() axplot = ui.get_model_plot(data_id) if myfit.succeeded: b = ftype_poly.c1.val * DateTime(trend_date_start).frac_year + ftype_poly.c0.val m = ftype_poly.c1.val rep_file = open('%s_fitfile.json' % ftype, 'w') rep_file.write(json.dumps(dict(time0=DateTime(trend_date_start).frac_year, datestart=trend_date_start, datestop=data_stop, bin=trend_type, m=m, b=b, comment="mx+b with b at time0 and m = (delta rate)/year"), sort_keys=True,
# coding: utf-8 import sherpa.ui as ui ui.load_data("default_interp", "bb_data.dat") ui.load_template_model('bb1', "bb_index.dat") ui.load_template_model('bb2', "bb_index.dat") ui.set_model("default_interp", bb1+bb2) ui.freeze("bb1.dummy") ui.freeze("bb2.dummy") ui.fit("default_interp")
def fitne(ne_data, nemodeltype, tspec_data=None): ''' Fits gas number density profile according to selected profile model. The fit is performed using python sherpa with the Levenberg-Marquardt method of minimizing chi-squared . Args: ----- ne_data (astropy table): observed gas density profile in the form established by set_prof_data() tspec_data (astropy table): observed temperature profile in the form established by set_prof_data() Returns: -------- nemodel (dictionary): stores relevant information about the model gas density profile nemodel['type']: ne model type; one of the following: ['single_beta','cusped_beta','double_beta_tied','double_beta'] nemodel['parnames']: names of the stored ne model parameters nemodel['parvals']: parameter values of fitted gas density model nemodel['parmins']: lower error bound on parvals nemodel['parmaxes']: upper error bound on parvals nemodel['chisq']: chi-squared of fit nemodel['dof']: degrees of freedom nemodel['rchisq']: reduced chi-squared of fit nemodel['nefit']: ne model values at radial values matching tspec_data (the observed temperature profile) References: ----------- python sherpa: https://github.com/sherpa/ ''' # remove any existing models and data ui.clean() # load data ui.load_arrays(1, np.array(ne_data['radius']), np.array(ne_data['ne']), np.array(ne_data['ne_err'])) # set guess and boundaries on params given selected model if nemodeltype == 'single_beta': # param estimate betaguess = 0.6 rcguess = 20. # units????? ne0guess = max(ne_data['ne']) # beta model ui.load_user_model(betamodel, "beta1d") ui.add_user_pars("beta1d", ["ne0", "rc", "beta"]) ui.set_source(beta1d) # creates model ui.set_full_model(beta1d) # set parameter values ui.set_par(beta1d.ne0, ne0guess, min=0, max=10. * max(ne_data['ne'])) ui.set_par(beta1d.rc, rcguess, min=0.1, max=max(ne_data['radius'])) ui.set_par(beta1d.beta, betaguess, min=0.1, max=1.) if nemodeltype == 'cusped_beta': # param estimate betaguess = 0.7 rcguess = 5. # [kpc] ne0guess = max(ne_data['ne']) alphaguess = 10. # ???? # beta model ui.load_user_model(cuspedbetamodel, "cuspedbeta1d") ui.add_user_pars("cuspedbeta1d", ["ne0", "rc", "beta", "alpha"]) ui.set_source(cuspedbeta1d) # creates model ui.set_full_model(cuspedbeta1d) # set parameter values ui.set_par(cuspedbeta1d.ne0, ne0guess, min=0.001 * max(ne_data['ne']), max=10. * max(ne_data['ne'])) ui.set_par(cuspedbeta1d.rc, rcguess, min=0.1, max=max(ne_data['radius'])) ui.set_par(cuspedbeta1d.beta, betaguess, min=0.1, max=1.) ui.set_par(cuspedbeta1d.alpha, alphaguess, min=0., max=100.) if nemodeltype == 'double_beta': # param estimate ne0guess1 = max(ne_data['ne']) # [cm^-3] rcguess1 = 10. # [kpc] betaguess1 = 0.6 ne0guess2 = 0.01 * max(ne_data['ne']) # [cm^-3] rcguess2 = 100. # [kpc] betaguess2 = 0.6 # double beta model ui.load_user_model(doublebetamodel, "doublebeta1d") ui.add_user_pars("doublebeta1d", ["ne01", "rc1", "beta1", "ne02", "rc2", "beta2"]) ui.set_source(doublebeta1d) # creates model ui.set_full_model(doublebeta1d) # set parameter values ui.set_par(doublebeta1d.ne01, ne0guess1, min=0.0001 * max(ne_data['ne']), max=100. * max(ne_data['ne'])) ui.set_par(doublebeta1d.rc1, rcguess1, min=0.1, max=max(ne_data['radius'])) ui.set_par(doublebeta1d.beta1, betaguess1, min=0.1, max=1.) ui.set_par(doublebeta1d.ne02, ne0guess2, min=0.0001 * max(ne_data['ne']), max=100. * max(ne_data['ne'])) ui.set_par(doublebeta1d.rc2, rcguess2, min=10., max=max(ne_data['radius'])) ui.set_par(doublebeta1d.beta2, betaguess2, min=0.1, max=1.) if nemodeltype == 'double_beta_tied': # param estimate ne0guess1 = max(ne_data['ne']) rcguess1 = 10. betaguess1 = 0.6 ne0guess2 = 0.01 * max(ne_data['ne']) rcguess2 = 100. # double beta model ui.load_user_model(doublebetamodel_tied, "doublebeta1d_tied") ui.add_user_pars("doublebeta1d_tied", ["ne01", "rc1", "beta1", "ne02", "rc2"]) ui.set_source(doublebeta1d_tied) # creates model ui.set_full_model(doublebeta1d_tied) # set parameter values ui.set_par(doublebeta1d_tied.ne01, ne0guess1, min=0.00001 * max(ne_data['ne']), max=100. * max(ne_data['ne'])) ui.set_par(doublebeta1d_tied.rc1, rcguess1, min=0.1, max=max(ne_data['radius'])) ui.set_par(doublebeta1d_tied.beta1, betaguess1, min=0.1, max=1.) ui.set_par(doublebeta1d_tied.ne02, ne0guess2, min=0.00001 * max(ne_data['ne']), max=100. * max(ne_data['ne'])) ui.set_par(doublebeta1d_tied.rc2, rcguess2, min=10., max=max(ne_data['radius'])) # fit model ui.fit() # fit statistics chisq = ui.get_fit_results().statval dof = ui.get_fit_results().dof rchisq = ui.get_fit_results().rstat # error analysis ui.set_conf_opt("max_rstat", 1e9) ui.conf() parvals = np.array(ui.get_conf_results().parvals) parmins = np.array(ui.get_conf_results().parmins) parmaxes = np.array(ui.get_conf_results().parmaxes) parnames = [ str(x).split('.')[1] for x in list(ui.get_conf_results().parnames) ] # where errors are stuck on a hard limit, change error to Inf if None in list(parmins): ind = np.where(parmins == np.array(None))[0] parmins[ind] = float('Inf') if None in list(parmaxes): ind = np.where(parmaxes == np.array(None))[0] parmaxes[ind] = float('Inf') # set up a dictionary to contain useful results of fit nemodel = {} nemodel['type'] = nemodeltype nemodel['parnames'] = parnames nemodel['parvals'] = parvals nemodel['parmins'] = parmins nemodel['parmaxes'] = parmaxes nemodel['chisq'] = chisq nemodel['dof'] = dof nemodel['rchisq'] = rchisq # if tspec_data included, calculate value of ne model at the same radius # positions as temperature profile if tspec_data is not None: if nemodeltype == 'double_beta': nefit_arr = doublebetamodel(nemodel['parvals'], np.array(tspec_data['radius'])) # [cm-3] if nemodeltype == 'single_beta': nefit_arr = betamodel(nemodel['parvals'], np.array(tspec_data['radius'])) # [cm-3] if nemodeltype == 'cusped_beta': nefit_arr = cuspedbetamodel(nemodel['parvals'], np.array(tspec_data['radius'])) # [cm-3] if nemodeltype == 'double_beta_tied': nefit_arr = doublebetamodel_tied(nemodel['parvals'], np.array(tspec_data['radius'])) # [cm-3] nemodel['nefit'] = nefit_arr return nemodel
#ui.set_stat('leastsq') #ui.load_user_stat("chi2custom", my_chi2, my_err) #ui.set_stat(chi2custom) ui.load_user_model(scaled_warm_frac, 'model') ui.add_user_pars('model', ['scale', 'offset']) ui.set_model(data_id, 'model') ui.load_arrays(data_id, np.array(times), np.array(bad_frac)) fmod = ui.get_model_component('model') fmod.scale.min = 1e-9 fmod.offset.val = 0 ui.freeze(fmod.offset) max_err = np.max([err_high, err_low], axis=0) ui.set_staterror(data_id, max_err) ui.fit(data_id) f = ui.get_fit_results() scale = f.rstat ** .5 ui.set_staterror(data_id, max_err * scale) ui.fit() f = ui.get_fit_results() if f.rstat > 3: raise ValueError ui.confidence() conf = ui.get_confidence_results() fit_info[range_type][mag][ftype][limit] = dict(fit=str(f), conf=str(conf), fmod=fmod, fit_orig=f, conf_orig=conf, mag_mean=np.mean(data[range_type][mag][ok]['mag_mean']))
# coding: utf-8 import sherpa.ui as ui ui.load_data("default_interp", "load_template_with_interpolation-bb_data.dat") ui.load_template_model('bb1', "bb_index.dat") ui.set_model("default_interp", bb1) ui.set_method('gridsearch') ui.set_method_opt('sequence', ui.get_model_component('bb1').parvals) ui.fit("default_interp")
# coding: utf-8 import sherpa.ui as ui ui.load_data("load_template_without_interpolation-bb_data.dat") ui.load_template_model('bb1', "bb_index.dat", template_interpolator_name=None) ui.set_source('bb1') ui.fit()