def fit_pix_values(t_ccd, esec, id=1): logger = logging.getLogger("sherpa") logger.setLevel(logging.WARN) data_id = id ui.clean() ui.set_method('simplex') ui.load_user_model(dark_scale_model, 'model') ui.add_user_pars('model', ['scale', 'dark_t_ref']) ui.set_model(data_id, 'model') ui.load_arrays( data_id, np.array(t_ccd), np.array(esec), ) ui.set_staterror(data_id, 30 * np.ones(len(t_ccd))) model.scale.val = 0.588 model.scale.min = 0.3 model.scale.max = 1.0 model.dark_t_ref.val = 500 ui.freeze(model.scale) # If more than 5 degrees in the temperature range, # thaw and fit for model.scale. Else just use/return # the fit of dark_t_ref if np.max(t_ccd) - np.min(t_ccd) > 2: # Fit first for dark_t_ref ui.fit(data_id) ui.thaw(model.scale) ui.fit(data_id) return ui.get_fit_results(), ui.get_model(data_id)
def test_set_staterror_array(clean_ui): """What happens when we set the staterror to an array?""" staterror = 0.1 * np.ones(3) syserror = 0.5 * np.ones(3) combo = np.sqrt(0.01 + 0.25) * np.ones(3) y = np.asarray([2, 3, 4]) ui.load_arrays(1, np.arange(3), y, None, syserror) ui.set_stat('cstat') ui.set_staterror(staterror) assert ui.get_staterror() == pytest.approx(staterror) assert ui.get_syserror() == pytest.approx(syserror) assert ui.get_error() == pytest.approx(combo)
def _fit_poly(fit_data, evt_times, degree, data_id=0): """ Given event data transformed into Y or Z angle positions, and a degree of the desired fit polynomial, fit a polynomial to the data. :param fit_data: event y or z angle position data :param evt_times: times of event/fit_data :param degree: degree of polynomial to use for the fit model :param data_id: sherpa dataset id to use for the fit :returns: (sherpa model plot, sherpa model) """ # Set initial value for fit data position error init_error = 1 ui.clean() ui.load_arrays(data_id, evt_times - evt_times[0], fit_data, np.zeros_like(fit_data) + init_error) v2("Fitting a line to the data to get reduced stat errors") # First just fit a line to get reduced errors on this set ui.polynom1d.line ui.set_model(data_id, 'line') ui.thaw('line.c1') ui.fit(data_id) fit = ui.get_fit_results() calc_error = init_error * np.sqrt(fit.rstat) ui.set_staterror(data_id, calc_error) # Then fit the specified model v2("Fitting a polynomial of degree {} to the data".format(degree)) ui.polynom1d.fitpoly ui.freeze('fitpoly') # Thaw the coefficients requested by the degree of the desired polynomial ui.thaw('fitpoly.c0') fitpoly.c0.val = 0 for deg in range(1, 1 + degree): ui.thaw("fitpoly.c{}".format(deg)) ui.set_model(data_id, 'fitpoly') ui.fit(data_id) # Let's screw up Y on purpose if data_id == 0: fitpoly.c0.val = 0 fitpoly.c1.val = 7.5e-05 fitpoly.c2.val = -1.0e-09 fitpoly.c3.val = 0 fitpoly.c4.val = 0 mp = ui.get_model_plot(data_id) model = ui.get_model(data_id) return mp, model
def test_set_staterror_scalar_no_fractional(clean_ui): """What happens when we set the staterror to a scalar fractional=False?""" staterror = 0.1 * np.ones(3) syserror = 0.5 * np.ones(3) combo = np.sqrt(0.01 + 0.25) * np.ones(3) ui.load_arrays(1, np.arange(3), np.ones(3), staterror, syserror) ui.set_stat('cstat') ui.set_staterror(3) assert ui.get_staterror() == pytest.approx(3 * np.ones(3)) assert ui.get_syserror() == pytest.approx(syserror) combo = np.sqrt(9 + 0.25) * np.ones(3) assert ui.get_error() == pytest.approx(combo)
def test_set_staterror_scalar_fractional(clean_ui): """What happens when we set the staterror to a scalar fractional=True?""" staterror = 0.1 * np.ones(3) syserror = 0.5 * np.ones(3) combo = np.sqrt(0.01 + 0.25) * np.ones(3) y = np.asarray([2, 3, 4]) ui.load_arrays(1, np.arange(3), y, staterror, syserror) ui.set_stat('cstat') ui.set_staterror(0.4, fractional=True) assert ui.get_staterror() == pytest.approx(0.4 * y) assert ui.get_syserror() == pytest.approx(syserror) combo = np.sqrt(0.16 * y * y + 0.25) assert ui.get_error() == pytest.approx(combo)
def _fit_poly(fit_data, evt_times, degree, data_id=0): """ Given event data transformed into Y or Z angle positions, and a degree of the desired fit polynomial, fit a polynomial to the data. :param fit_data: event y or z angle position data :param evt_times: times of event/fit_data :param degree: degree of polynomial to use for the fit model :param data_id: sherpa dataset id to use for the fit :returns: (sherpa model plot, sherpa model) """ # Set initial value for fit data position error init_error = 1 ui.clean() ui.load_arrays(data_id, evt_times - evt_times[0], fit_data, np.zeros_like(fit_data) + init_error) v2("Fitting a line to the data to get reduced stat errors") # First just fit a line to get reduced errors on this set ui.polynom1d.line ui.set_model(data_id, 'line') ui.thaw('line.c1') ui.fit(data_id) fit = ui.get_fit_results() calc_error = init_error * np.sqrt(fit.rstat) ui.set_staterror(data_id, calc_error) # Then fit the specified model v2("Fitting a polynomial of degree {} to the data".format(degree)) ui.polynom1d.fitpoly ui.freeze('fitpoly') # Thaw the coefficients requested by the degree of the desired polynomial ui.thaw('fitpoly.c0') fitpoly.c0.val = 0 for deg in range(1, 1 + degree): ui.thaw("fitpoly.c{}".format(deg)) ui.set_model(data_id, 'fitpoly') ui.fit(data_id) mp = ui.get_model_plot(data_id) model = ui.get_model(data_id) return mp, model
def test_set_staterror_none(clean_ui): """What happens when we set the staterror to None?""" staterror = 0.1 * np.ones(3) syserror = 0.5 * np.ones(3) combo = np.sqrt(0.01 + 0.25) * np.ones(3) ui.load_arrays(1, np.arange(3), np.ones(3), staterror, syserror) ui.set_stat('cstat') assert ui.get_staterror() == pytest.approx(staterror) assert ui.get_syserror() == pytest.approx(syserror) assert ui.get_error() == pytest.approx(combo) # removing the statistical error means that the statistic is used; # for the likelihood stats we just get 1's # ui.set_staterror(None) assert ui.get_staterror() == pytest.approx(np.ones(3)) assert ui.get_syserror() == pytest.approx(syserror) combo = np.sqrt(1 + 0.25) * np.ones(3) assert ui.get_error() == pytest.approx(combo)
def run_fits(obsids, ax, user_pars=None, fixed_pars=None, guess_pars=None, label='model', per_obs_dir='per_obs_nfits', outdir=None, redo=False): if len(obsids) == 0: print "No obsids, nothing to fit" return None if user_pars is None: user_pars = USER_PARS if not os.path.exists(per_obs_dir): os.makedirs(per_obs_dir) obsfits = [] for obsid in obsids: outdir = os.path.join(per_obs_dir, 'obs{:05d}'.format(obsid)) if not os.path.exists(outdir): os.makedirs(outdir) model_file = os.path.join(outdir, '{}.pkl'.format(label)) if os.path.exists(model_file) and not redo: #logger.warn('Using previous fit found in %s' % model_file) print model_file mod_pick = open(model_file, 'r') modelfit = cPickle.load( mod_pick ) mod_pick.close() obsfits.append(modelfit) continue modelfit = {'label': obsid} ui.clean() data_id = 0 obsdir = "%s/obs%05d" % (DATADIR, obsid) tf = open(os.path.join(obsdir,'tilt.pkl'), 'r') tilt = cPickle.load(tf) tf.close() pf = open(os.path.join(obsdir, 'pos.pkl'), 'r') pos = cPickle.load(pf) pf.close() pos_data = pos[ax] point_error = 5 pos_data_mean = np.mean(pos_data) ui.set_method('simplex') # Fit a line to get more reasonable errors init_staterror = np.zeros(len(pos_data))+point_error ui.load_arrays(data_id, pos['time']-pos['time'][0], pos_data-np.mean(pos_data), init_staterror) ui.polynom1d.ypoly ui.set_model(data_id, 'ypoly') ui.thaw(ypoly.c0, ypoly.c1) ui.fit(data_id) fit = ui.get_fit_results() calc_staterror = init_staterror * np.sqrt(fit.rstat) ui.set_staterror(data_id, calc_staterror) # Confirm those errors ui.fit(data_id) fit = ui.get_fit_results() if ( abs(fit.rstat-1) > .2): raise ValueError('Reduced statistic not close to 1 for error calc') # Load up data to do the real model fit fit_times = pos['time'] tm_func = tilt_model(tilt, fit_times, user_pars=user_pars) ui.get_data(data_id).name = str(obsid) ui.load_user_model(tm_func, 'tiltm%d' % data_id) ui.add_user_pars('tiltm%d' % data_id, user_pars) ui.set_method('simplex') ui.set_model(data_id, 'tiltm%d' % (data_id)) ui.set_par('tiltm%d.diam' % data_id, 0) if fixed_pars is not None and ax in fixed_pars: for par in fixed_pars[ax]: ui.set_par('tiltm{}.{}'.format(0, par), fixed_pars[ax][par]) ui.freeze('tiltm{}.{}'.format(0, par)) if guess_pars is not None and ax in guess_pars: for par in guess_pars[ax]: ui.set_par('tiltm{}.{}'.format(0, par), guess_pars[ax][par]) ui.show_all() # Fit the tilt model ui.fit(data_id) fitres = ui.get_fit_results() ui.confidence(data_id) myconf = ui.get_confidence_results() # save_fits(ax=ax, fit=fitres, conf=myconf, outdir=outdir) # plot_fits(ids,outdir=os.path.join(outdir,'fit_plots')) axmod = dict(fit=fitres, conf=myconf) for idx, modpar in enumerate(myconf.parnames): par = modpar.lstrip('tiltm0.') axmod[par] = ui.get_par('tiltm0.%s' % par).val axmod["{}_parmax".format(par)] = myconf.parmaxes[idx] axmod["{}_parmin".format(par)] = myconf.parmins[idx] modelfit[ax] = axmod mod_pick = open(model_file, 'w') cPickle.dump( modelfit, mod_pick) mod_pick.close() obsfits.append(modelfit) plot_fits([dict(obsid=obsid, data_id=data_id, ax=ax)], posdir=obsdir, outdir=outdir) return obsfits
ui.set_stat('chi2datavar') #ui.set_stat('leastsq') #ui.load_user_stat("chi2custom", my_chi2, my_err) #ui.set_stat(chi2custom) ui.load_user_model(scaled_warm_frac, 'model') ui.add_user_pars('model', ['scale', 'offset']) ui.set_model(data_id, 'model') ui.load_arrays(data_id, np.array(times), np.array(bad_frac)) fmod = ui.get_model_component('model') fmod.scale.min = 1e-9 fmod.offset.val = 0 ui.freeze(fmod.offset) max_err = np.max([err_high, err_low], axis=0) ui.set_staterror(data_id, max_err) ui.fit(data_id) f = ui.get_fit_results() scale = f.rstat ** .5 ui.set_staterror(data_id, max_err * scale) ui.fit() f = ui.get_fit_results() if f.rstat > 3: raise ValueError ui.confidence() conf = ui.get_confidence_results() fit_info[range_type][mag][ftype][limit] = dict(fit=str(f), conf=str(conf), fmod=fmod, fit_orig=f, conf_orig=conf,
'bad_trak' : 2, 'obc_bad' : 3} ui.clean() for ftype in fail_types: filename = "by%s_data_%s.txt" % (trend_type, ftype) rates = asciitable.read(filename) data_id = fail_types[ftype] ui.set_method('simplex') ui.load_arrays(data_id, rates['time'], rates['rate']) ui.set_staterror(data_id, rates['err']) ftype_poly = ui.polynom1d(ftype) ui.set_model(data_id, ftype_poly) ui.thaw(ftype_poly.c0) ui.thaw(ftype_poly.c1) ui.notice(DateTime(trend_date_start).frac_year) ui.fit(data_id) ui.notice() myfit = ui.get_fit_results() axplot = ui.get_model_plot(data_id) if myfit.succeeded: b = ftype_poly.c1.val * DateTime(trend_date_start).frac_year + ftype_poly.c0.val m = ftype_poly.c1.val rep_file = open('%s_fitfile.json' % ftype, 'w') rep_file.write(json.dumps(dict(time0=DateTime(trend_date_start).frac_year,
def run_fits(obsids, ax, user_pars=None, fixed_pars=None, guess_pars=None, label='model', per_obs_dir='per_obs_nfits', outdir=None, redo=False): if len(obsids) == 0: print "No obsids, nothing to fit" return None if user_pars is None: user_pars = USER_PARS if not os.path.exists(per_obs_dir): os.makedirs(per_obs_dir) obsfits = [] for obsid in obsids: outdir = os.path.join(per_obs_dir, 'obs{:05d}'.format(obsid)) if not os.path.exists(outdir): os.makedirs(outdir) model_file = os.path.join(outdir, '{}.pkl'.format(label)) if os.path.exists(model_file) and not redo: #logger.warn('Using previous fit found in %s' % model_file) print model_file mod_pick = open(model_file, 'r') modelfit = cPickle.load(mod_pick) mod_pick.close() obsfits.append(modelfit) continue modelfit = {'label': obsid} ui.clean() data_id = 0 obsdir = "%s/obs%05d" % (DATADIR, obsid) tf = open(os.path.join(obsdir, 'tilt.pkl'), 'r') tilt = cPickle.load(tf) tf.close() pf = open(os.path.join(obsdir, 'pos.pkl'), 'r') pos = cPickle.load(pf) pf.close() pos_data = pos[ax] point_error = 5 pos_data_mean = np.mean(pos_data) ui.set_method('simplex') # Fit a line to get more reasonable errors init_staterror = np.zeros(len(pos_data)) + point_error ui.load_arrays(data_id, pos['time'] - pos['time'][0], pos_data - np.mean(pos_data), init_staterror) ui.polynom1d.ypoly ui.set_model(data_id, 'ypoly') ui.thaw(ypoly.c0, ypoly.c1) ui.fit(data_id) fit = ui.get_fit_results() calc_staterror = init_staterror * np.sqrt(fit.rstat) ui.set_staterror(data_id, calc_staterror) # Confirm those errors ui.fit(data_id) fit = ui.get_fit_results() if (abs(fit.rstat - 1) > .2): raise ValueError('Reduced statistic not close to 1 for error calc') # Load up data to do the real model fit fit_times = pos['time'] tm_func = tilt_model(tilt, fit_times, user_pars=user_pars) ui.get_data(data_id).name = str(obsid) ui.load_user_model(tm_func, 'tiltm%d' % data_id) ui.add_user_pars('tiltm%d' % data_id, user_pars) ui.set_method('simplex') ui.set_model(data_id, 'tiltm%d' % (data_id)) ui.set_par('tiltm%d.diam' % data_id, 0) if fixed_pars is not None and ax in fixed_pars: for par in fixed_pars[ax]: ui.set_par('tiltm{}.{}'.format(0, par), fixed_pars[ax][par]) ui.freeze('tiltm{}.{}'.format(0, par)) if guess_pars is not None and ax in guess_pars: for par in guess_pars[ax]: ui.set_par('tiltm{}.{}'.format(0, par), guess_pars[ax][par]) ui.show_all() # Fit the tilt model ui.fit(data_id) fitres = ui.get_fit_results() ui.confidence(data_id) myconf = ui.get_confidence_results() # save_fits(ax=ax, fit=fitres, conf=myconf, outdir=outdir) # plot_fits(ids,outdir=os.path.join(outdir,'fit_plots')) axmod = dict(fit=fitres, conf=myconf) for idx, modpar in enumerate(myconf.parnames): par = modpar.lstrip('tiltm0.') axmod[par] = ui.get_par('tiltm0.%s' % par).val axmod["{}_parmax".format(par)] = myconf.parmaxes[idx] axmod["{}_parmin".format(par)] = myconf.parmins[idx] modelfit[ax] = axmod mod_pick = open(model_file, 'w') cPickle.dump(modelfit, mod_pick) mod_pick.close() obsfits.append(modelfit) plot_fits([dict(obsid=obsid, data_id=data_id, ax=ax)], posdir=obsdir, outdir=outdir) return obsfits