def fit_pix_values(t_ccd, esec, id=1): logger = logging.getLogger("sherpa") logger.setLevel(logging.WARN) data_id = id ui.clean() ui.set_method('simplex') ui.load_user_model(dark_scale_model, 'model') ui.add_user_pars('model', ['scale', 'dark_t_ref']) ui.set_model(data_id, 'model') ui.load_arrays( data_id, np.array(t_ccd), np.array(esec), ) ui.set_staterror(data_id, 30 * np.ones(len(t_ccd))) model.scale.val = 0.588 model.scale.min = 0.3 model.scale.max = 1.0 model.dark_t_ref.val = 500 ui.freeze(model.scale) # If more than 5 degrees in the temperature range, # thaw and fit for model.scale. Else just use/return # the fit of dark_t_ref if np.max(t_ccd) - np.min(t_ccd) > 2: # Fit first for dark_t_ref ui.fit(data_id) ui.thaw(model.scale) ui.fit(data_id) return ui.get_fit_results(), ui.get_model(data_id)
def fit(self, method='simplex'): """Initiate a fit of the model using Sherpa. :param method: Method to be used to fit the model (e.g. simplex, levmar, or moncar) """ dummy_data = np.zeros(1) dummy_times = np.arange(1) ui.load_arrays(1, dummy_times, dummy_data) ui.set_method(method) ui.get_method().config.update(sherpa_configs.get(method, {})) ui.load_user_model(CalcModel(self.model, self.fit_logger), 'xijamod') # sets global xijamod ui.add_user_pars('xijamod', self.model.parnames) ui.set_model(1, 'xijamod') calc_stat = CalcStat(self.model, self.fit_logger) ui.load_user_stat('xijastat', calc_stat, lambda x: np.ones_like(x)) ui.set_stat(xijastat) # Set frozen, min, and max attributes for each xijamod parameter for par in self.model.pars: xijamod_par = getattr(xijamod, par.full_name) xijamod_par.val = par.val xijamod_par.frozen = par.frozen xijamod_par.min = par.min xijamod_par.max = par.max ui.fit(1) self.save_snapshot(fit_stat=calc_stat.min_fit_stat, method=method)
def fit(self): dummy_data = np.zeros(1) dummy_times = np.arange(1) ui.load_arrays(1, dummy_times, dummy_data) ui.set_method(self.method) ui.get_method().config.update(sherpa_configs.get(self.method, {})) ui.load_user_model(CalcModel(self.model), 'xijamod') # sets global xijamod ui.add_user_pars('xijamod', self.model.parnames) ui.set_model(1, 'xijamod') calc_stat = CalcStat(self.model, self.child_pipe) ui.load_user_stat('xijastat', calc_stat, lambda x: np.ones_like(x)) ui.set_stat(xijastat) # Set frozen, min, and max attributes for each xijamod parameter for par in self.model.pars: xijamod_par = getattr(xijamod, par.full_name) xijamod_par.val = par.val xijamod_par.frozen = par.frozen xijamod_par.min = par.min xijamod_par.max = par.max if any(not par.frozen for par in self.model.pars): try: ui.fit(1) calc_stat.message['status'] = 'finished' logging.debug('Fit finished normally') except FitTerminated as err: calc_stat.message['status'] = 'terminated' logging.debug('Got FitTerminated exception {}'.format(err)) self.child_pipe.send(calc_stat.message)
def fit(self): dummy_data = np.zeros(1) dummy_times = np.arange(1) ui.load_arrays(1, dummy_times, dummy_data) ui.set_method(self.method) ui.get_method().config.update(sherpa_configs.get(self.method, {})) ui.load_user_model(CalcModel(self.model), 'xijamod') # sets global xijamod ui.add_user_pars('xijamod', self.model.parnames) ui.set_model(1, 'xijamod') calc_stat = CalcStat(self.model, self.child_pipe, self.maxiter) ui.load_user_stat('xijastat', calc_stat, lambda x: np.ones_like(x)) ui.set_stat(xijastat) # Set frozen, min, and max attributes for each xijamod parameter for par in self.model.pars: xijamod_par = getattr(xijamod, par.full_name) xijamod_par.val = par.val xijamod_par.frozen = par.frozen xijamod_par.min = par.min xijamod_par.max = par.max if any(not par.frozen for par in self.model.pars): try: ui.fit(1) calc_stat.message['status'] = 'finished' fit_logger.info('Fit finished normally') except FitTerminated as err: calc_stat.message['status'] = 'terminated' fit_logger.warning('Got FitTerminated exception {}'.format(err)) self.child_pipe.send(calc_stat.message)
def test_user_model1d_fit(): """Check can use in a fit.""" mname = "test_model" ui.load_user_model(um_line, mname) ui.add_user_pars(mname, ["slope", "intercept"], parvals = [1.0, 1.0]) mdl = ui.get_model_component(mname) x = numpy.asarray([-2.4, 2.3, 5.4, 8.7, 12.3]) # Set up the data to be scattered around y = -0.2 x + 2.8 # Pick the deltas so that they sum to 0 (except for central # point) # slope = -0.2 intercept = 2.8 dy = numpy.asarray([0.1, -0.2, 0.14, -0.1, 0.2]) ydata = x * slope + intercept + dy ui.load_arrays(1, x, ydata) ui.set_source(mname) ui.ignore(5.0, 6.0) # drop the central bin ui.set_stat('leastsq') ui.set_method('simplex') ui.fit() fres = ui.get_fit_results() assert fres.succeeded assert fres.parnames == ('test_model.slope', 'test_model.intercept') assert fres.numpoints == 4 assert fres.dof == 2 # Tolerance has been adjusted to get the tests to pass on my # machine. It's really just to check that the values have chanegd # from their default values. # assert fres.parvals[0] == pytest.approx(slope, abs=0.01) assert fres.parvals[1] == pytest.approx(intercept, abs=0.05) # Thse should be the same values, so no need to use pytest.approx # (unless there's some internal translation between types done # somewhere?). # assert mdl.slope.val == fres.parvals[0] assert mdl.intercept.val == fres.parvals[1]
def setup_err_estimate_multi_ids(strings=False): """Create the environment used in test_err_estimate_xxx tests. The model being fit is polynom1d with c0=50 c1=-2 and was evaluated and passed through sherpa.utils.poisson_noise to create the datasets. Since we can have string or integer ids we allow either, but do not try to mix them. """ if strings: id1 = "1" id2 = "2" id3 = "3" else: id1 = 1 id2 = 2 id3 = 3 ui.load_arrays(id1, [1, 3, 7, 12], [50, 40, 27, 20]) ui.load_arrays(id2, [-3, 4, 5], [55, 34, 37]) ui.load_arrays(id3, [10, 12, 20], [24, 26, 7]) # NOTE: dataset "not-used" is not used in the fit and is not # drawn from the distributino used to create the other datasets. # ui.load_arrays("not-used", [2000, 2010, 2020], [10, 12, 14]) mdl = ui.create_model_component("polynom1d", "mdl") mdl.c1.thaw() ui.set_source(id1, mdl) ui.set_source(id2, mdl) ui.set_source(id3, mdl) # apply the model to dataset not-used just so we can check we # don't end up using it mdl_not_used = ui.create_model_component("scale1d", "mdl_not_used") ui.set_source("not-used", mdl + mdl_not_used) # use cstat so we have an approximate goodness-of-fit just to # check we are getting sensible results. # ui.set_stat("cstat") ui.set_method("simplex")
def fit_model( model, comm=None, method='simplex', config=None, nofit=None, freeze_pars=freeze_pars, thaw_pars=[], ): dummy_data = np.zeros(1) dummy_times = np.arange(1) ui.load_arrays(1, dummy_times, dummy_data) ui.set_method(method) ui.get_method().config.update(config or sherpa_configs.get(method, {})) ui.load_user_model(CalcModel(model, comm), 'xijamod') ui.add_user_pars('xijamod', model.parnames) ui.set_model(1, 'xijamod') fit_parnames = set() for parname, parval in zip(model.parnames, model.parvals): getattr(xijamod, parname).val = parval fit_parnames.add(parname) if any([re.match(x + '$', parname) for x in freeze_pars]): fit_logger.info('Freezing ' + parname) ui.freeze(getattr(xijamod, parname)) fit_parnames.remove(parname) if any([re.match(x + '$', parname) for x in thaw_pars]): fit_logger.info('Thawing ' + parname) ui.thaw(getattr(xijamod, parname)) fit_parnames.add(parname) if 'tau' in parname: getattr(xijamod, parname).min = 0.1 calc_stat = CalcStat(model, comm) ui.load_user_stat('xijastat', calc_stat, lambda x: np.ones_like(x)) ui.set_stat(xijastat) if fit_parnames and not nofit: ui.fit(1) else: model.calc()
def fit_model(model, comm=None, method='simplex', config=None, nofit=None, freeze_pars=freeze_pars, thaw_pars=[], ): dummy_data = np.zeros(1) dummy_times = np.arange(1) ui.load_arrays(1, dummy_times, dummy_data) ui.set_method(method) ui.get_method().config.update(config or sherpa_configs.get(method, {})) ui.load_user_model(CalcModel(model, comm), 'xijamod') ui.add_user_pars('xijamod', model.parnames) ui.set_model(1, 'xijamod') fit_parnames = set() for parname, parval in zip(model.parnames, model.parvals): getattr(xijamod, parname).val = parval fit_parnames.add(parname) if any([re.match(x + '$', parname) for x in freeze_pars]): fit_logger.info('Freezing ' + parname) ui.freeze(getattr(xijamod, parname)) fit_parnames.remove(parname) if any([re.match(x + '$', parname) for x in thaw_pars]): fit_logger.info('Thawing ' + parname) ui.thaw(getattr(xijamod, parname)) fit_parnames.add(parname) if 'tau' in parname: getattr(xijamod, parname).min = 0.1 calc_stat = CalcStat(model, comm) ui.load_user_stat('xijastat', calc_stat, lambda x: np.ones_like(x)) ui.set_stat(xijastat) if fit_parnames and not nofit: ui.fit(1) else: model.calc()
def fit_pix_values(t_ccd, esec, id=1): logger = logging.getLogger("sherpa") logger.setLevel(logging.WARN) data_id = id ui.clean() ui.set_method("simplex") ui.load_user_model(dark_scale_model, "model") ui.add_user_pars("model", ["scale", "dark_t_ref"]) ui.set_model(data_id, "model") ui.load_arrays(data_id, np.array(t_ccd), np.array(esec), 0.1 * np.ones(len(t_ccd))) model.scale.val = 0.70 model.dark_t_ref.val = 500 ui.freeze(model.scale) # If more than 5 degrees in the temperature range, # thaw and fit for model.scale. Else just use/return # the fit of dark_t_ref ui.fit(data_id) ui.thaw(model.scale) ui.fit(data_id) return ui.get_fit_results(), ui.get_model(data_id)
def fit_gauss_sbp(): g1 = ui.gauss1d.g1 ui.set_model(sbp + g1) ui.set_method('simplex') g1.fwhm = 5.0 g1.pos = 7.0 g1.ampl = 30000. ui.freeze(sbp.gamma1) ui.freeze(sbp.gamma2) ui.freeze(sbp.x_b) ui.freeze(sbp.x_r) ui.freeze(g1.fwhm) ui.freeze(g1.pos) ui.thaw(g1.ampl) ui.fit() ui.thaw(g1.fwhm) ui.thaw(g1.pos) ui.fit() ui.thaw(sbp) ui.freeze(sbp.x_r) ui.fit()
# coding: utf-8 import sherpa.ui as ui ui.load_data("default_interp", "load_template_with_interpolation-bb_data.dat") ui.load_template_model('bb1', "bb_index.dat") ui.set_model("default_interp", bb1) ui.set_method('gridsearch') ui.set_method_opt('sequence', ui.get_model_component('bb1').parvals) ui.fit("default_interp")
def run_fits(obsids, ax, user_pars=None, fixed_pars=None, guess_pars=None, label='model', per_obs_dir='per_obs_nfits', outdir=None, redo=False): if len(obsids) == 0: print "No obsids, nothing to fit" return None if user_pars is None: user_pars = USER_PARS if not os.path.exists(per_obs_dir): os.makedirs(per_obs_dir) obsfits = [] for obsid in obsids: outdir = os.path.join(per_obs_dir, 'obs{:05d}'.format(obsid)) if not os.path.exists(outdir): os.makedirs(outdir) model_file = os.path.join(outdir, '{}.pkl'.format(label)) if os.path.exists(model_file) and not redo: #logger.warn('Using previous fit found in %s' % model_file) print model_file mod_pick = open(model_file, 'r') modelfit = cPickle.load( mod_pick ) mod_pick.close() obsfits.append(modelfit) continue modelfit = {'label': obsid} ui.clean() data_id = 0 obsdir = "%s/obs%05d" % (DATADIR, obsid) tf = open(os.path.join(obsdir,'tilt.pkl'), 'r') tilt = cPickle.load(tf) tf.close() pf = open(os.path.join(obsdir, 'pos.pkl'), 'r') pos = cPickle.load(pf) pf.close() pos_data = pos[ax] point_error = 5 pos_data_mean = np.mean(pos_data) ui.set_method('simplex') # Fit a line to get more reasonable errors init_staterror = np.zeros(len(pos_data))+point_error ui.load_arrays(data_id, pos['time']-pos['time'][0], pos_data-np.mean(pos_data), init_staterror) ui.polynom1d.ypoly ui.set_model(data_id, 'ypoly') ui.thaw(ypoly.c0, ypoly.c1) ui.fit(data_id) fit = ui.get_fit_results() calc_staterror = init_staterror * np.sqrt(fit.rstat) ui.set_staterror(data_id, calc_staterror) # Confirm those errors ui.fit(data_id) fit = ui.get_fit_results() if ( abs(fit.rstat-1) > .2): raise ValueError('Reduced statistic not close to 1 for error calc') # Load up data to do the real model fit fit_times = pos['time'] tm_func = tilt_model(tilt, fit_times, user_pars=user_pars) ui.get_data(data_id).name = str(obsid) ui.load_user_model(tm_func, 'tiltm%d' % data_id) ui.add_user_pars('tiltm%d' % data_id, user_pars) ui.set_method('simplex') ui.set_model(data_id, 'tiltm%d' % (data_id)) ui.set_par('tiltm%d.diam' % data_id, 0) if fixed_pars is not None and ax in fixed_pars: for par in fixed_pars[ax]: ui.set_par('tiltm{}.{}'.format(0, par), fixed_pars[ax][par]) ui.freeze('tiltm{}.{}'.format(0, par)) if guess_pars is not None and ax in guess_pars: for par in guess_pars[ax]: ui.set_par('tiltm{}.{}'.format(0, par), guess_pars[ax][par]) ui.show_all() # Fit the tilt model ui.fit(data_id) fitres = ui.get_fit_results() ui.confidence(data_id) myconf = ui.get_confidence_results() # save_fits(ax=ax, fit=fitres, conf=myconf, outdir=outdir) # plot_fits(ids,outdir=os.path.join(outdir,'fit_plots')) axmod = dict(fit=fitres, conf=myconf) for idx, modpar in enumerate(myconf.parnames): par = modpar.lstrip('tiltm0.') axmod[par] = ui.get_par('tiltm0.%s' % par).val axmod["{}_parmax".format(par)] = myconf.parmaxes[idx] axmod["{}_parmin".format(par)] = myconf.parmins[idx] modelfit[ax] = axmod mod_pick = open(model_file, 'w') cPickle.dump( modelfit, mod_pick) mod_pick.close() obsfits.append(modelfit) plot_fits([dict(obsid=obsid, data_id=data_id, ax=ax)], posdir=obsdir, outdir=outdir) return obsfits
# return np.zeros_like(data) for limit in [50, 75, 100, 125, 150, 200, 1000]: warm_frac = data[range_type][mag][ok]["n{}".format(limit)] print "range_type {}".format(range_type) print "mag {}".format(mag) print "limit is {}".format(limit) extent = np.max(warm_frac) - np.min(warm_frac) wp_min = np.min(warm_frac) warm_frac = warm_frac - wp_min def scaled_warm_frac(pars, x): scaled = pars[1] + warm_frac * pars[0] return scaled data_id = 1 ui.set_method("simplex") ui.set_stat("chi2datavar") # ui.set_stat('leastsq') # ui.load_user_stat("chi2custom", my_chi2, my_err) # ui.set_stat(chi2custom) ui.load_user_model(scaled_warm_frac, "model") ui.add_user_pars("model", ["scale", "offset"]) ui.set_model(data_id, "model") ui.load_arrays(data_id, np.array(times), np.array(bad_frac)) fmod = ui.get_model_component("model") fmod.scale.min = 1e-9 max_err = np.max([data[range_type][mag][ok]["err_high"], data[range_type][mag][ok]["err_low"]], axis=0) ui.set_staterror(data_id, max_err) ui.fit(data_id) f = ui.get_fit_results() scale = f.rstat ** 0.5
# coding: utf-8 import sherpa.ui as ui ui.load_data("bb_data.dat") ui.load_template_model('bb1', "bb_index.dat", template_interpolator_name=None) ui.set_method('gridsearch') ui.set_method_opt('sequence', [[2234, 0],[3512,0]]) ui.set_source('bb1') ui.fit()
err_high[err_high == 0] = .0001 err_low[err_low == 0] = .0001 for limit in warm_limits: print "range type {}".format(range_type) print "mag {}".format(mag) print "limit is {}".format(limit) print "ftype {}".format(limit) warm_frac = data[range_type][mag][ok]['n{}'.format(limit)] extent = np.max(warm_frac) - np.min(warm_frac) wp_min = np.min(warm_frac) warm_frac = warm_frac - wp_min def scaled_warm_frac(pars, x): scaled = pars[1] + warm_frac * pars[0] return scaled data_id = 1 ui.set_method('simplex') ui.set_stat('chi2datavar') #ui.set_stat('leastsq') #ui.load_user_stat("chi2custom", my_chi2, my_err) #ui.set_stat(chi2custom) ui.load_user_model(scaled_warm_frac, 'model') ui.add_user_pars('model', ['scale', 'offset']) ui.set_model(data_id, 'model') ui.load_arrays(data_id, np.array(times), np.array(bad_frac)) fmod = ui.get_model_component('model') fmod.scale.min = 1e-9 fmod.offset.val = 0 ui.freeze(fmod.offset) max_err = np.max([err_high, err_low], axis=0)
def run_fits(obsids, ax, user_pars=None, fixed_pars=None, guess_pars=None, label='model', per_obs_dir='per_obs_nfits', outdir=None, redo=False): if len(obsids) == 0: print "No obsids, nothing to fit" return None if user_pars is None: user_pars = USER_PARS if not os.path.exists(per_obs_dir): os.makedirs(per_obs_dir) obsfits = [] for obsid in obsids: outdir = os.path.join(per_obs_dir, 'obs{:05d}'.format(obsid)) if not os.path.exists(outdir): os.makedirs(outdir) model_file = os.path.join(outdir, '{}.pkl'.format(label)) if os.path.exists(model_file) and not redo: #logger.warn('Using previous fit found in %s' % model_file) print model_file mod_pick = open(model_file, 'r') modelfit = cPickle.load(mod_pick) mod_pick.close() obsfits.append(modelfit) continue modelfit = {'label': obsid} ui.clean() data_id = 0 obsdir = "%s/obs%05d" % (DATADIR, obsid) tf = open(os.path.join(obsdir, 'tilt.pkl'), 'r') tilt = cPickle.load(tf) tf.close() pf = open(os.path.join(obsdir, 'pos.pkl'), 'r') pos = cPickle.load(pf) pf.close() pos_data = pos[ax] point_error = 5 pos_data_mean = np.mean(pos_data) ui.set_method('simplex') # Fit a line to get more reasonable errors init_staterror = np.zeros(len(pos_data)) + point_error ui.load_arrays(data_id, pos['time'] - pos['time'][0], pos_data - np.mean(pos_data), init_staterror) ui.polynom1d.ypoly ui.set_model(data_id, 'ypoly') ui.thaw(ypoly.c0, ypoly.c1) ui.fit(data_id) fit = ui.get_fit_results() calc_staterror = init_staterror * np.sqrt(fit.rstat) ui.set_staterror(data_id, calc_staterror) # Confirm those errors ui.fit(data_id) fit = ui.get_fit_results() if (abs(fit.rstat - 1) > .2): raise ValueError('Reduced statistic not close to 1 for error calc') # Load up data to do the real model fit fit_times = pos['time'] tm_func = tilt_model(tilt, fit_times, user_pars=user_pars) ui.get_data(data_id).name = str(obsid) ui.load_user_model(tm_func, 'tiltm%d' % data_id) ui.add_user_pars('tiltm%d' % data_id, user_pars) ui.set_method('simplex') ui.set_model(data_id, 'tiltm%d' % (data_id)) ui.set_par('tiltm%d.diam' % data_id, 0) if fixed_pars is not None and ax in fixed_pars: for par in fixed_pars[ax]: ui.set_par('tiltm{}.{}'.format(0, par), fixed_pars[ax][par]) ui.freeze('tiltm{}.{}'.format(0, par)) if guess_pars is not None and ax in guess_pars: for par in guess_pars[ax]: ui.set_par('tiltm{}.{}'.format(0, par), guess_pars[ax][par]) ui.show_all() # Fit the tilt model ui.fit(data_id) fitres = ui.get_fit_results() ui.confidence(data_id) myconf = ui.get_confidence_results() # save_fits(ax=ax, fit=fitres, conf=myconf, outdir=outdir) # plot_fits(ids,outdir=os.path.join(outdir,'fit_plots')) axmod = dict(fit=fitres, conf=myconf) for idx, modpar in enumerate(myconf.parnames): par = modpar.lstrip('tiltm0.') axmod[par] = ui.get_par('tiltm0.%s' % par).val axmod["{}_parmax".format(par)] = myconf.parmaxes[idx] axmod["{}_parmin".format(par)] = myconf.parmins[idx] modelfit[ax] = axmod mod_pick = open(model_file, 'w') cPickle.dump(modelfit, mod_pick) mod_pick.close() obsfits.append(modelfit) plot_fits([dict(obsid=obsid, data_id=data_id, ax=ax)], posdir=obsdir, outdir=outdir) return obsfits
trend_date_start = '2008:001:00:00:00.000' fail_types = {'no_trak' : 1, 'bad_trak' : 2, 'obc_bad' : 3} ui.clean() for ftype in fail_types: filename = "by%s_data_%s.txt" % (trend_type, ftype) rates = asciitable.read(filename) data_id = fail_types[ftype] ui.set_method('simplex') ui.load_arrays(data_id, rates['time'], rates['rate']) ui.set_staterror(data_id, rates['err']) ftype_poly = ui.polynom1d(ftype) ui.set_model(data_id, ftype_poly) ui.thaw(ftype_poly.c0) ui.thaw(ftype_poly.c1) ui.notice(DateTime(trend_date_start).frac_year) ui.fit(data_id) ui.notice() myfit = ui.get_fit_results() axplot = ui.get_model_plot(data_id)