def fit(self, method='simplex'): """Initiate a fit of the model using Sherpa. :param method: Method to be used to fit the model (e.g. simplex, levmar, or moncar) """ dummy_data = np.zeros(1) dummy_times = np.arange(1) ui.load_arrays(1, dummy_times, dummy_data) ui.set_method(method) ui.get_method().config.update(sherpa_configs.get(method, {})) ui.load_user_model(CalcModel(self.model, self.fit_logger), 'xijamod') # sets global xijamod ui.add_user_pars('xijamod', self.model.parnames) ui.set_model(1, 'xijamod') calc_stat = CalcStat(self.model, self.fit_logger) ui.load_user_stat('xijastat', calc_stat, lambda x: np.ones_like(x)) ui.set_stat(xijastat) # Set frozen, min, and max attributes for each xijamod parameter for par in self.model.pars: xijamod_par = getattr(xijamod, par.full_name) xijamod_par.val = par.val xijamod_par.frozen = par.frozen xijamod_par.min = par.min xijamod_par.max = par.max ui.fit(1) self.save_snapshot(fit_stat=calc_stat.min_fit_stat, method=method)
def fit_pix_values(t_ccd, esec, id=1): logger = logging.getLogger("sherpa") logger.setLevel(logging.WARN) data_id = id ui.clean() ui.set_method('simplex') ui.load_user_model(dark_scale_model, 'model') ui.add_user_pars('model', ['scale', 'dark_t_ref']) ui.set_model(data_id, 'model') ui.load_arrays( data_id, np.array(t_ccd), np.array(esec), ) ui.set_staterror(data_id, 30 * np.ones(len(t_ccd))) model.scale.val = 0.588 model.scale.min = 0.3 model.scale.max = 1.0 model.dark_t_ref.val = 500 ui.freeze(model.scale) # If more than 5 degrees in the temperature range, # thaw and fit for model.scale. Else just use/return # the fit of dark_t_ref if np.max(t_ccd) - np.min(t_ccd) > 2: # Fit first for dark_t_ref ui.fit(data_id) ui.thaw(model.scale) ui.fit(data_id) return ui.get_fit_results(), ui.get_model(data_id)
def fit(self): dummy_data = np.zeros(1) dummy_times = np.arange(1) ui.load_arrays(1, dummy_times, dummy_data) ui.set_method(self.method) ui.get_method().config.update(sherpa_configs.get(self.method, {})) ui.load_user_model(CalcModel(self.model), 'xijamod') # sets global xijamod ui.add_user_pars('xijamod', self.model.parnames) ui.set_model(1, 'xijamod') calc_stat = CalcStat(self.model, self.child_pipe, self.maxiter) ui.load_user_stat('xijastat', calc_stat, lambda x: np.ones_like(x)) ui.set_stat(xijastat) # Set frozen, min, and max attributes for each xijamod parameter for par in self.model.pars: xijamod_par = getattr(xijamod, par.full_name) xijamod_par.val = par.val xijamod_par.frozen = par.frozen xijamod_par.min = par.min xijamod_par.max = par.max if any(not par.frozen for par in self.model.pars): try: ui.fit(1) calc_stat.message['status'] = 'finished' fit_logger.info('Fit finished normally') except FitTerminated as err: calc_stat.message['status'] = 'terminated' fit_logger.warning('Got FitTerminated exception {}'.format(err)) self.child_pipe.send(calc_stat.message)
def test_user_model_create_pars_names(clean_ui): mname = "test_model" ui.load_user_model(um_line, mname) mdl = ui.get_model_component(mname) assert len(mdl.pars) == 1 # add user pars doesn't change the existing instance, you have # to "get" the new version to see the change # ui.add_user_pars(mname, ['X1', 'x']) mdl = ui.get_model_component(mname) assert len(mdl.pars) == 2 p0 = mdl.pars[0] p1 = mdl.pars[1] assert p0.name == 'X1' assert p0.val == pytest.approx(0.0) assert p0.units == '' assert not p0.frozen assert p0.min == pytest.approx(-1 * hugeval) assert p0.max == pytest.approx(hugeval) assert p1.name == 'x' assert p1.val == pytest.approx(0.0) assert p1.units == '' assert not p1.frozen assert p1.min == pytest.approx(-1 * hugeval) assert p1.max == pytest.approx(hugeval)
def fit(self): dummy_data = np.zeros(1) dummy_times = np.arange(1) ui.load_arrays(1, dummy_times, dummy_data) ui.set_method(self.method) ui.get_method().config.update(sherpa_configs.get(self.method, {})) ui.load_user_model(CalcModel(self.model), 'xijamod') # sets global xijamod ui.add_user_pars('xijamod', self.model.parnames) ui.set_model(1, 'xijamod') calc_stat = CalcStat(self.model, self.child_pipe) ui.load_user_stat('xijastat', calc_stat, lambda x: np.ones_like(x)) ui.set_stat(xijastat) # Set frozen, min, and max attributes for each xijamod parameter for par in self.model.pars: xijamod_par = getattr(xijamod, par.full_name) xijamod_par.val = par.val xijamod_par.frozen = par.frozen xijamod_par.min = par.min xijamod_par.max = par.max if any(not par.frozen for par in self.model.pars): try: ui.fit(1) calc_stat.message['status'] = 'finished' logging.debug('Fit finished normally') except FitTerminated as err: calc_stat.message['status'] = 'terminated' logging.debug('Got FitTerminated exception {}'.format(err)) self.child_pipe.send(calc_stat.message)
def test_user_model_create_pars_full(clean_ui): mname = "test_model" ui.load_user_model(um_line, mname) ui.add_user_pars(mname, ['pAr1', '_p'], [23.2, 3.1e2], parunits=['', 'cm^2 s'], parfrozen=[True, False], parmins=[0, -100], parmaxs=[100, 1e5]) mdl = ui.get_model_component(mname) assert len(mdl.pars) == 2 p0 = mdl.pars[0] p1 = mdl.pars[1] assert p0.name == 'pAr1' assert p0.val == pytest.approx(23.2) assert p0.units == '' assert p0.frozen assert p0.min == pytest.approx(0) assert p0.max == pytest.approx(100) assert p1.name == '_p' assert p1.val == pytest.approx(3.1e2) assert p1.units == 'cm^2 s' assert not p1.frozen assert p1.min == pytest.approx(-100) assert p1.max == pytest.approx(1e5)
def test_user_model_create_pars_full(): mname = "test_model" ui.load_user_model(um_line, mname) ui.add_user_pars(mname, ['pAr1', '_p'], [23.2, 3.1e2], parunits=['', 'cm^2 s'], parfrozen=[True, False], parmins=[0, -100], parmaxs=[100, 1e5]) mdl = ui.get_model_component(mname) assert len(mdl.pars) == 2 p0 = mdl.pars[0] p1 = mdl.pars[1] assert p0.name == 'pAr1' assert p0.val == pytest.approx(23.2) assert p0.units == '' assert p0.frozen assert p0.min == pytest.approx(0) assert p0.max == pytest.approx(100) assert p1.name == '_p' assert p1.val == pytest.approx(3.1e2) assert p1.units == 'cm^2 s' assert not p1.frozen assert p1.min == pytest.approx(-100) assert p1.max == pytest.approx(1e5)
def test_user_model_create_pars_names(): mname = "test_model" ui.load_user_model(um_line, mname) mdl = ui.get_model_component(mname) assert len(mdl.pars) == 1 # add user pars doesn't change the existing instance, you have # to "get" the new version to see the change # ui.add_user_pars(mname, ['X1', 'x']) mdl = ui.get_model_component(mname) assert len(mdl.pars) == 2 p0 = mdl.pars[0] p1 = mdl.pars[1] assert p0.name == 'X1' assert p0.val == pytest.approx(0.0) assert p0.units == '' assert not p0.frozen assert p0.min == pytest.approx(-1 * hugeval) assert p0.max == pytest.approx(hugeval) assert p1.name == 'x' assert p1.val == pytest.approx(0.0) assert p1.units == '' assert not p1.frozen assert p1.min == pytest.approx(-1 * hugeval) assert p1.max == pytest.approx(hugeval)
def test_user_model1d_fit(): """Check can use in a fit.""" mname = "test_model" ui.load_user_model(um_line, mname) ui.add_user_pars(mname, ["slope", "intercept"], parvals = [1.0, 1.0]) mdl = ui.get_model_component(mname) x = numpy.asarray([-2.4, 2.3, 5.4, 8.7, 12.3]) # Set up the data to be scattered around y = -0.2 x + 2.8 # Pick the deltas so that they sum to 0 (except for central # point) # slope = -0.2 intercept = 2.8 dy = numpy.asarray([0.1, -0.2, 0.14, -0.1, 0.2]) ydata = x * slope + intercept + dy ui.load_arrays(1, x, ydata) ui.set_source(mname) ui.ignore(5.0, 6.0) # drop the central bin ui.set_stat('leastsq') ui.set_method('simplex') ui.fit() fres = ui.get_fit_results() assert fres.succeeded assert fres.parnames == ('test_model.slope', 'test_model.intercept') assert fres.numpoints == 4 assert fres.dof == 2 # Tolerance has been adjusted to get the tests to pass on my # machine. It's really just to check that the values have chanegd # from their default values. # assert fres.parvals[0] == pytest.approx(slope, abs=0.01) assert fres.parvals[1] == pytest.approx(intercept, abs=0.05) # Thse should be the same values, so no need to use pytest.approx # (unless there's some internal translation between types done # somewhere?). # assert mdl.slope.val == fres.parvals[0] assert mdl.intercept.val == fres.parvals[1]
def fit_model( model, comm=None, method='simplex', config=None, nofit=None, freeze_pars=freeze_pars, thaw_pars=[], ): dummy_data = np.zeros(1) dummy_times = np.arange(1) ui.load_arrays(1, dummy_times, dummy_data) ui.set_method(method) ui.get_method().config.update(config or sherpa_configs.get(method, {})) ui.load_user_model(CalcModel(model, comm), 'xijamod') ui.add_user_pars('xijamod', model.parnames) ui.set_model(1, 'xijamod') fit_parnames = set() for parname, parval in zip(model.parnames, model.parvals): getattr(xijamod, parname).val = parval fit_parnames.add(parname) if any([re.match(x + '$', parname) for x in freeze_pars]): fit_logger.info('Freezing ' + parname) ui.freeze(getattr(xijamod, parname)) fit_parnames.remove(parname) if any([re.match(x + '$', parname) for x in thaw_pars]): fit_logger.info('Thawing ' + parname) ui.thaw(getattr(xijamod, parname)) fit_parnames.add(parname) if 'tau' in parname: getattr(xijamod, parname).min = 0.1 calc_stat = CalcStat(model, comm) ui.load_user_stat('xijastat', calc_stat, lambda x: np.ones_like(x)) ui.set_stat(xijastat) if fit_parnames and not nofit: ui.fit(1) else: model.calc()
def fit_model(model, comm=None, method='simplex', config=None, nofit=None, freeze_pars=freeze_pars, thaw_pars=[], ): dummy_data = np.zeros(1) dummy_times = np.arange(1) ui.load_arrays(1, dummy_times, dummy_data) ui.set_method(method) ui.get_method().config.update(config or sherpa_configs.get(method, {})) ui.load_user_model(CalcModel(model, comm), 'xijamod') ui.add_user_pars('xijamod', model.parnames) ui.set_model(1, 'xijamod') fit_parnames = set() for parname, parval in zip(model.parnames, model.parvals): getattr(xijamod, parname).val = parval fit_parnames.add(parname) if any([re.match(x + '$', parname) for x in freeze_pars]): fit_logger.info('Freezing ' + parname) ui.freeze(getattr(xijamod, parname)) fit_parnames.remove(parname) if any([re.match(x + '$', parname) for x in thaw_pars]): fit_logger.info('Thawing ' + parname) ui.thaw(getattr(xijamod, parname)) fit_parnames.add(parname) if 'tau' in parname: getattr(xijamod, parname).min = 0.1 calc_stat = CalcStat(model, comm) ui.load_user_stat('xijastat', calc_stat, lambda x: np.ones_like(x)) ui.set_stat(xijastat) if fit_parnames and not nofit: ui.fit(1) else: model.calc()
def test_user_model_change_par(clean_ui): mname = "test_model" ui.load_user_model(um_line, mname) ui.add_user_pars(mname, ['xXx', 'Y2']) mdl = ui.get_model_component(mname) assert len(mdl.pars) == 2 p0 = mdl.pars[0] p1 = mdl.pars[1] assert p0.name == 'xXx' assert p1.name == 'Y2' assert p0.val == pytest.approx(0.0) assert p1.val == pytest.approx(0.0) # Use the user-supplied names: # mdl.xXx = 2.0 assert p0.val == pytest.approx(2.0) mdl.Y2 = 3.0 assert p1.val == pytest.approx(3.0) # Now all lower case # mdl.xxx = 4.0 assert p0.val == pytest.approx(4.0) mdl.y2 = 12.0 assert p1.val == pytest.approx(12.0) # Try with the set_par function # ui.set_par('test_model.xxx', 12.2) assert p0.val == pytest.approx(12.2) ui.set_par('test_model.y2', 14.0, frozen=True) assert p1.val == pytest.approx(14.0) assert p1.frozen ui.clean()
def test_user_model_change_par(): mname = "test_model" ui.load_user_model(um_line, mname) ui.add_user_pars(mname, ['xXx', 'Y2']) mdl = ui.get_model_component(mname) assert len(mdl.pars) == 2 p0 = mdl.pars[0] p1 = mdl.pars[1] assert p0.name == 'xXx' assert p1.name == 'Y2' assert p0.val == pytest.approx(0.0) assert p1.val == pytest.approx(0.0) # Use the user-supplied names: # mdl.xXx = 2.0 assert p0.val == pytest.approx(2.0) mdl.Y2 = 3.0 assert p1.val == pytest.approx(3.0) # Now all lower case # mdl.xxx = 4.0 assert p0.val == pytest.approx(4.0) mdl.y2 = 12.0 assert p1.val == pytest.approx(12.0) # Try with the set_par function # ui.set_par('test_model.xxx', 12.2) assert p0.val == pytest.approx(12.2) ui.set_par('test_model.y2', 14.0, frozen=True) assert p1.val == pytest.approx(14.0) assert p1.frozen ui.clean()
def fit_pix_values(t_ccd, esec, id=1): logger = logging.getLogger("sherpa") logger.setLevel(logging.WARN) data_id = id ui.clean() ui.set_method("simplex") ui.load_user_model(dark_scale_model, "model") ui.add_user_pars("model", ["scale", "dark_t_ref"]) ui.set_model(data_id, "model") ui.load_arrays(data_id, np.array(t_ccd), np.array(esec), 0.1 * np.ones(len(t_ccd))) model.scale.val = 0.70 model.dark_t_ref.val = 500 ui.freeze(model.scale) # If more than 5 degrees in the temperature range, # thaw and fit for model.scale. Else just use/return # the fit of dark_t_ref ui.fit(data_id) ui.thaw(model.scale) ui.fit(data_id) return ui.get_fit_results(), ui.get_model(data_id)
def test_user_model1d_eval(clean_ui): """Simple evaluation check for 1D case.""" mname = "test_model" ui.load_user_model(um_line, mname) ui.add_user_pars(mname, ["slope", "intercept"]) m = 2.1 c = -4.8 mdl = ui.get_model_component(mname) mdl.slope = m mdl.intercept = c x = np.asarray([2.3, 5.4, 8.7]) y = mdl(x) yexp = x * m + c # This check require pytest >= 3.2.0 # assert y == pytest.approx(yexp)
def test_user_model1d_eval(): """Simple evaluation check for 1D case.""" mname = "test_model" ui.load_user_model(um_line, mname) ui.add_user_pars(mname, ["slope", "intercept"]) m = 2.1 c = -4.8 mdl = ui.get_model_component(mname) mdl.slope = m mdl.intercept = c x = numpy.asarray([2.3, 5.4, 8.7]) y = mdl(x) yexp = x * m + c # This check require pytest >= 3.2.0 # assert y == pytest.approx(yexp)
line = pars[0] * x + pars[1] line[line <= 0] = 1e-7 line[line >= 1] = 1 - 1e-7 return line #axplot = {} #ftype = 'obc_bad' for ftype in failures: fail_mask = failures[ftype] data_id = figmap[ftype] ui.set_method('simplex') ui.load_user_model(lim_line, '%s_mod' % ftype) ui.add_user_pars('%s_mod' % ftype, ['m', 'b']) ui.set_model(data_id, '%s_mod' % ftype) ui.load_arrays(data_id, times, failures[ftype]) fmod = ui.get_model_component('%s_mod' % ftype) fmod.b.min = 0 fmod.b.max = 1 fmod.m.min = 0 fmod.m.max = 0.5 fmod.b.val = 1e-7 ui.load_user_stat("loglike", llh, my_err) ui.set_stat(loglike) # the tricky part here is that the "model" is the probability polynomial
def run_fits(obsids, ax, user_pars=None, fixed_pars=None, guess_pars=None, label='model', per_obs_dir='per_obs_nfits', outdir=None, redo=False): if len(obsids) == 0: print "No obsids, nothing to fit" return None if user_pars is None: user_pars = USER_PARS if not os.path.exists(per_obs_dir): os.makedirs(per_obs_dir) obsfits = [] for obsid in obsids: outdir = os.path.join(per_obs_dir, 'obs{:05d}'.format(obsid)) if not os.path.exists(outdir): os.makedirs(outdir) model_file = os.path.join(outdir, '{}.pkl'.format(label)) if os.path.exists(model_file) and not redo: #logger.warn('Using previous fit found in %s' % model_file) print model_file mod_pick = open(model_file, 'r') modelfit = cPickle.load( mod_pick ) mod_pick.close() obsfits.append(modelfit) continue modelfit = {'label': obsid} ui.clean() data_id = 0 obsdir = "%s/obs%05d" % (DATADIR, obsid) tf = open(os.path.join(obsdir,'tilt.pkl'), 'r') tilt = cPickle.load(tf) tf.close() pf = open(os.path.join(obsdir, 'pos.pkl'), 'r') pos = cPickle.load(pf) pf.close() pos_data = pos[ax] point_error = 5 pos_data_mean = np.mean(pos_data) ui.set_method('simplex') # Fit a line to get more reasonable errors init_staterror = np.zeros(len(pos_data))+point_error ui.load_arrays(data_id, pos['time']-pos['time'][0], pos_data-np.mean(pos_data), init_staterror) ui.polynom1d.ypoly ui.set_model(data_id, 'ypoly') ui.thaw(ypoly.c0, ypoly.c1) ui.fit(data_id) fit = ui.get_fit_results() calc_staterror = init_staterror * np.sqrt(fit.rstat) ui.set_staterror(data_id, calc_staterror) # Confirm those errors ui.fit(data_id) fit = ui.get_fit_results() if ( abs(fit.rstat-1) > .2): raise ValueError('Reduced statistic not close to 1 for error calc') # Load up data to do the real model fit fit_times = pos['time'] tm_func = tilt_model(tilt, fit_times, user_pars=user_pars) ui.get_data(data_id).name = str(obsid) ui.load_user_model(tm_func, 'tiltm%d' % data_id) ui.add_user_pars('tiltm%d' % data_id, user_pars) ui.set_method('simplex') ui.set_model(data_id, 'tiltm%d' % (data_id)) ui.set_par('tiltm%d.diam' % data_id, 0) if fixed_pars is not None and ax in fixed_pars: for par in fixed_pars[ax]: ui.set_par('tiltm{}.{}'.format(0, par), fixed_pars[ax][par]) ui.freeze('tiltm{}.{}'.format(0, par)) if guess_pars is not None and ax in guess_pars: for par in guess_pars[ax]: ui.set_par('tiltm{}.{}'.format(0, par), guess_pars[ax][par]) ui.show_all() # Fit the tilt model ui.fit(data_id) fitres = ui.get_fit_results() ui.confidence(data_id) myconf = ui.get_confidence_results() # save_fits(ax=ax, fit=fitres, conf=myconf, outdir=outdir) # plot_fits(ids,outdir=os.path.join(outdir,'fit_plots')) axmod = dict(fit=fitres, conf=myconf) for idx, modpar in enumerate(myconf.parnames): par = modpar.lstrip('tiltm0.') axmod[par] = ui.get_par('tiltm0.%s' % par).val axmod["{}_parmax".format(par)] = myconf.parmaxes[idx] axmod["{}_parmin".format(par)] = myconf.parmins[idx] modelfit[ax] = axmod mod_pick = open(model_file, 'w') cPickle.dump( modelfit, mod_pick) mod_pick.close() obsfits.append(modelfit) plot_fits([dict(obsid=obsid, data_id=data_id, ax=ax)], posdir=obsdir, outdir=outdir) return obsfits
print "ftype {}".format(limit) warm_frac = data[range_type][mag][ok]['n{}'.format(limit)] extent = np.max(warm_frac) - np.min(warm_frac) wp_min = np.min(warm_frac) warm_frac = warm_frac - wp_min def scaled_warm_frac(pars, x): scaled = pars[1] + warm_frac * pars[0] return scaled data_id = 1 ui.set_method('simplex') ui.set_stat('chi2datavar') #ui.set_stat('leastsq') #ui.load_user_stat("chi2custom", my_chi2, my_err) #ui.set_stat(chi2custom) ui.load_user_model(scaled_warm_frac, 'model') ui.add_user_pars('model', ['scale', 'offset']) ui.set_model(data_id, 'model') ui.load_arrays(data_id, np.array(times), np.array(bad_frac)) fmod = ui.get_model_component('model') fmod.scale.min = 1e-9 fmod.offset.val = 0 ui.freeze(fmod.offset) max_err = np.max([err_high, err_low], axis=0) ui.set_staterror(data_id, max_err) ui.fit(data_id) f = ui.get_fit_results() scale = f.rstat ** .5 ui.set_staterror(data_id, max_err * scale) ui.fit()
def fitne(ne_data, nemodeltype, tspec_data=None): ''' Fits gas number density profile according to selected profile model. The fit is performed using python sherpa with the Levenberg-Marquardt method of minimizing chi-squared . Args: ----- ne_data (astropy table): observed gas density profile in the form established by set_prof_data() tspec_data (astropy table): observed temperature profile in the form established by set_prof_data() Returns: -------- nemodel (dictionary): stores relevant information about the model gas density profile nemodel['type']: ne model type; one of the following: ['single_beta','cusped_beta','double_beta_tied','double_beta'] nemodel['parnames']: names of the stored ne model parameters nemodel['parvals']: parameter values of fitted gas density model nemodel['parmins']: lower error bound on parvals nemodel['parmaxes']: upper error bound on parvals nemodel['chisq']: chi-squared of fit nemodel['dof']: degrees of freedom nemodel['rchisq']: reduced chi-squared of fit nemodel['nefit']: ne model values at radial values matching tspec_data (the observed temperature profile) References: ----------- python sherpa: https://github.com/sherpa/ ''' # remove any existing models and data ui.clean() # load data ui.load_arrays(1, np.array(ne_data['radius']), np.array(ne_data['ne']), np.array(ne_data['ne_err'])) # set guess and boundaries on params given selected model if nemodeltype == 'single_beta': # param estimate betaguess = 0.6 rcguess = 20. # units????? ne0guess = max(ne_data['ne']) # beta model ui.load_user_model(betamodel, "beta1d") ui.add_user_pars("beta1d", ["ne0", "rc", "beta"]) ui.set_source(beta1d) # creates model ui.set_full_model(beta1d) # set parameter values ui.set_par(beta1d.ne0, ne0guess, min=0, max=10. * max(ne_data['ne'])) ui.set_par(beta1d.rc, rcguess, min=0.1, max=max(ne_data['radius'])) ui.set_par(beta1d.beta, betaguess, min=0.1, max=1.) if nemodeltype == 'cusped_beta': # param estimate betaguess = 0.7 rcguess = 5. # [kpc] ne0guess = max(ne_data['ne']) alphaguess = 10. # ???? # beta model ui.load_user_model(cuspedbetamodel, "cuspedbeta1d") ui.add_user_pars("cuspedbeta1d", ["ne0", "rc", "beta", "alpha"]) ui.set_source(cuspedbeta1d) # creates model ui.set_full_model(cuspedbeta1d) # set parameter values ui.set_par(cuspedbeta1d.ne0, ne0guess, min=0.001 * max(ne_data['ne']), max=10. * max(ne_data['ne'])) ui.set_par(cuspedbeta1d.rc, rcguess, min=0.1, max=max(ne_data['radius'])) ui.set_par(cuspedbeta1d.beta, betaguess, min=0.1, max=1.) ui.set_par(cuspedbeta1d.alpha, alphaguess, min=0., max=100.) if nemodeltype == 'double_beta': # param estimate ne0guess1 = max(ne_data['ne']) # [cm^-3] rcguess1 = 10. # [kpc] betaguess1 = 0.6 ne0guess2 = 0.01 * max(ne_data['ne']) # [cm^-3] rcguess2 = 100. # [kpc] betaguess2 = 0.6 # double beta model ui.load_user_model(doublebetamodel, "doublebeta1d") ui.add_user_pars("doublebeta1d", ["ne01", "rc1", "beta1", "ne02", "rc2", "beta2"]) ui.set_source(doublebeta1d) # creates model ui.set_full_model(doublebeta1d) # set parameter values ui.set_par(doublebeta1d.ne01, ne0guess1, min=0.0001 * max(ne_data['ne']), max=100. * max(ne_data['ne'])) ui.set_par(doublebeta1d.rc1, rcguess1, min=0.1, max=max(ne_data['radius'])) ui.set_par(doublebeta1d.beta1, betaguess1, min=0.1, max=1.) ui.set_par(doublebeta1d.ne02, ne0guess2, min=0.0001 * max(ne_data['ne']), max=100. * max(ne_data['ne'])) ui.set_par(doublebeta1d.rc2, rcguess2, min=10., max=max(ne_data['radius'])) ui.set_par(doublebeta1d.beta2, betaguess2, min=0.1, max=1.) if nemodeltype == 'double_beta_tied': # param estimate ne0guess1 = max(ne_data['ne']) rcguess1 = 10. betaguess1 = 0.6 ne0guess2 = 0.01 * max(ne_data['ne']) rcguess2 = 100. # double beta model ui.load_user_model(doublebetamodel_tied, "doublebeta1d_tied") ui.add_user_pars("doublebeta1d_tied", ["ne01", "rc1", "beta1", "ne02", "rc2"]) ui.set_source(doublebeta1d_tied) # creates model ui.set_full_model(doublebeta1d_tied) # set parameter values ui.set_par(doublebeta1d_tied.ne01, ne0guess1, min=0.00001 * max(ne_data['ne']), max=100. * max(ne_data['ne'])) ui.set_par(doublebeta1d_tied.rc1, rcguess1, min=0.1, max=max(ne_data['radius'])) ui.set_par(doublebeta1d_tied.beta1, betaguess1, min=0.1, max=1.) ui.set_par(doublebeta1d_tied.ne02, ne0guess2, min=0.00001 * max(ne_data['ne']), max=100. * max(ne_data['ne'])) ui.set_par(doublebeta1d_tied.rc2, rcguess2, min=10., max=max(ne_data['radius'])) # fit model ui.fit() # fit statistics chisq = ui.get_fit_results().statval dof = ui.get_fit_results().dof rchisq = ui.get_fit_results().rstat # error analysis ui.set_conf_opt("max_rstat", 1e9) ui.conf() parvals = np.array(ui.get_conf_results().parvals) parmins = np.array(ui.get_conf_results().parmins) parmaxes = np.array(ui.get_conf_results().parmaxes) parnames = [ str(x).split('.')[1] for x in list(ui.get_conf_results().parnames) ] # where errors are stuck on a hard limit, change error to Inf if None in list(parmins): ind = np.where(parmins == np.array(None))[0] parmins[ind] = float('Inf') if None in list(parmaxes): ind = np.where(parmaxes == np.array(None))[0] parmaxes[ind] = float('Inf') # set up a dictionary to contain useful results of fit nemodel = {} nemodel['type'] = nemodeltype nemodel['parnames'] = parnames nemodel['parvals'] = parvals nemodel['parmins'] = parmins nemodel['parmaxes'] = parmaxes nemodel['chisq'] = chisq nemodel['dof'] = dof nemodel['rchisq'] = rchisq # if tspec_data included, calculate value of ne model at the same radius # positions as temperature profile if tspec_data is not None: if nemodeltype == 'double_beta': nefit_arr = doublebetamodel(nemodel['parvals'], np.array(tspec_data['radius'])) # [cm-3] if nemodeltype == 'single_beta': nefit_arr = betamodel(nemodel['parvals'], np.array(tspec_data['radius'])) # [cm-3] if nemodeltype == 'cusped_beta': nefit_arr = cuspedbetamodel(nemodel['parvals'], np.array(tspec_data['radius'])) # [cm-3] if nemodeltype == 'double_beta_tied': nefit_arr = doublebetamodel_tied(nemodel['parvals'], np.array(tspec_data['radius'])) # [cm-3] nemodel['nefit'] = nefit_arr return nemodel
import pickle import matplotlib.pyplot as plt import numpy as np from sherpa import ui import dark_models sbp = None # for pychecker g1 = None method = 'levmar' ui.set_stat('cash') ui.set_method('simplex') ui.load_user_model(dark_models.smooth_broken_pow, 'sbp') ui.add_user_pars('sbp', ('gamma1', 'gamma2', 'x_b', 'x_r', 'ampl1')) def fit_gauss_sbp(): g1 = ui.gauss1d.g1 ui.set_model(sbp + g1) ui.set_method('simplex') g1.fwhm = 5.0 g1.pos = 7.0 g1.ampl = 30000. ui.freeze(sbp.gamma1) ui.freeze(sbp.gamma2) ui.freeze(sbp.x_b) ui.freeze(sbp.x_r) ui.freeze(g1.fwhm)
extent = np.max(warm_frac) - np.min(warm_frac) wp_min = np.min(warm_frac) warm_frac = warm_frac - wp_min def scaled_warm_frac(pars, x): scaled = pars[1] + warm_frac * pars[0] return scaled data_id = 1 ui.set_method("simplex") ui.set_stat("chi2datavar") # ui.set_stat('leastsq') # ui.load_user_stat("chi2custom", my_chi2, my_err) # ui.set_stat(chi2custom) ui.load_user_model(scaled_warm_frac, "model") ui.add_user_pars("model", ["scale", "offset"]) ui.set_model(data_id, "model") ui.load_arrays(data_id, np.array(times), np.array(bad_frac)) fmod = ui.get_model_component("model") fmod.scale.min = 1e-9 max_err = np.max([data[range_type][mag][ok]["err_high"], data[range_type][mag][ok]["err_low"]], axis=0) ui.set_staterror(data_id, max_err) ui.fit(data_id) f = ui.get_fit_results() scale = f.rstat ** 0.5 ui.set_staterror(data_id, max_err * scale) ui.fit() f = ui.get_fit_results() if f.rstat > 3: raise ValueError ui.confidence()
class AstropyToSherpa(object): def __init__(self, model): self.model = model def __call__(self, pars, x): self.model.parameters[:] = pars return self.model(x) ap_model = (models.Gaussian1D(amplitude=1.2, mean=0.9, stddev=0.5) + models.Gaussian1D(amplitude=2.0, mean=-0.9, stddev=0.75)) err = 0.02 x = np.arange(-3, 3, .1) y = ap_model(x) + err * np.random.uniform(size=len(x)) sh_model = AstropyToSherpa(ap_model) ui.load_arrays(1, x, y, err * np.ones_like(x)) ui.load_user_model(sh_model, 'sherpa_model') ui.add_user_pars('sherpa_model', ap_model.param_names, ap_model.parameters) ui.set_model(1, 'sherpa_model') ui.fit(1) ui.plot_fit(1) print() print('Params from astropy model: {}'.format(ap_model.parameters)) plt.show()
def lim_line(pars, x): line = pars[0] * x + pars[1] line[line <= 0] = 1e-7 line[line >= 1] = 1 - 1e-7 return line #axplot = {} #ftype = 'obc_bad' for ftype in failures: fail_mask = failures[ftype] data_id = figmap[ftype] ui.set_method('simplex') ui.load_user_model(lim_line, '%s_mod' % ftype) ui.add_user_pars('%s_mod' % ftype, ['m', 'b']) ui.set_model(data_id, '%s_mod' % ftype) ui.load_arrays(data_id, times, failures[ftype]) fmod = ui.get_model_component('%s_mod' % ftype) fmod.b.min = 0 fmod.b.max = 1 fmod.m.min = 0 fmod.m.max = 0.5 fmod.b.val=1e-7
def run_fits(obsids, ax, user_pars=None, fixed_pars=None, guess_pars=None, label='model', per_obs_dir='per_obs_nfits', outdir=None, redo=False): if len(obsids) == 0: print "No obsids, nothing to fit" return None if user_pars is None: user_pars = USER_PARS if not os.path.exists(per_obs_dir): os.makedirs(per_obs_dir) obsfits = [] for obsid in obsids: outdir = os.path.join(per_obs_dir, 'obs{:05d}'.format(obsid)) if not os.path.exists(outdir): os.makedirs(outdir) model_file = os.path.join(outdir, '{}.pkl'.format(label)) if os.path.exists(model_file) and not redo: #logger.warn('Using previous fit found in %s' % model_file) print model_file mod_pick = open(model_file, 'r') modelfit = cPickle.load(mod_pick) mod_pick.close() obsfits.append(modelfit) continue modelfit = {'label': obsid} ui.clean() data_id = 0 obsdir = "%s/obs%05d" % (DATADIR, obsid) tf = open(os.path.join(obsdir, 'tilt.pkl'), 'r') tilt = cPickle.load(tf) tf.close() pf = open(os.path.join(obsdir, 'pos.pkl'), 'r') pos = cPickle.load(pf) pf.close() pos_data = pos[ax] point_error = 5 pos_data_mean = np.mean(pos_data) ui.set_method('simplex') # Fit a line to get more reasonable errors init_staterror = np.zeros(len(pos_data)) + point_error ui.load_arrays(data_id, pos['time'] - pos['time'][0], pos_data - np.mean(pos_data), init_staterror) ui.polynom1d.ypoly ui.set_model(data_id, 'ypoly') ui.thaw(ypoly.c0, ypoly.c1) ui.fit(data_id) fit = ui.get_fit_results() calc_staterror = init_staterror * np.sqrt(fit.rstat) ui.set_staterror(data_id, calc_staterror) # Confirm those errors ui.fit(data_id) fit = ui.get_fit_results() if (abs(fit.rstat - 1) > .2): raise ValueError('Reduced statistic not close to 1 for error calc') # Load up data to do the real model fit fit_times = pos['time'] tm_func = tilt_model(tilt, fit_times, user_pars=user_pars) ui.get_data(data_id).name = str(obsid) ui.load_user_model(tm_func, 'tiltm%d' % data_id) ui.add_user_pars('tiltm%d' % data_id, user_pars) ui.set_method('simplex') ui.set_model(data_id, 'tiltm%d' % (data_id)) ui.set_par('tiltm%d.diam' % data_id, 0) if fixed_pars is not None and ax in fixed_pars: for par in fixed_pars[ax]: ui.set_par('tiltm{}.{}'.format(0, par), fixed_pars[ax][par]) ui.freeze('tiltm{}.{}'.format(0, par)) if guess_pars is not None and ax in guess_pars: for par in guess_pars[ax]: ui.set_par('tiltm{}.{}'.format(0, par), guess_pars[ax][par]) ui.show_all() # Fit the tilt model ui.fit(data_id) fitres = ui.get_fit_results() ui.confidence(data_id) myconf = ui.get_confidence_results() # save_fits(ax=ax, fit=fitres, conf=myconf, outdir=outdir) # plot_fits(ids,outdir=os.path.join(outdir,'fit_plots')) axmod = dict(fit=fitres, conf=myconf) for idx, modpar in enumerate(myconf.parnames): par = modpar.lstrip('tiltm0.') axmod[par] = ui.get_par('tiltm0.%s' % par).val axmod["{}_parmax".format(par)] = myconf.parmaxes[idx] axmod["{}_parmin".format(par)] = myconf.parmins[idx] modelfit[ax] = axmod mod_pick = open(model_file, 'w') cPickle.dump(modelfit, mod_pick) mod_pick.close() obsfits.append(modelfit) plot_fits([dict(obsid=obsid, data_id=data_id, ax=ax)], posdir=obsdir, outdir=outdir) return obsfits