def fit(self, conf = False): ui.ignore(None, None) ui.notice(self.start, self.stop) self.set_source() ui.fit(1) if conf: ui.conf() res = ui.get_fit_results() for line in (self.H2lines + self.nonH2lines): sourcename = line['source'].split('.')[1] print sourcename for p in ['pos', 'fwhm', 'ampl']: n = '{0}.{1}'.format(sourcename, p) _place_val(line, p, ui.get_par(n).val) self.const = ui.get_par('c1.c0').val self.redchi2 = res.rstat if conf: res = ui.get_conf_results() for line in (self.H2lines + self.nonH2lines): sourcename = line['source'].split('.')[1] for p in ['pos', 'fwhm', 'ampl']: n = '{0}.{1}'.format(sourcename, p) parmin, parmax = _parminmax(res, n) line[p+'_max'] = parmax line[p+'_min'] = parmin # deal with error on const parmin, parmax = _parminmax(res, 'c1.c0') self.const_min = parmin self.const_max = parmax
def fit(self, do_covar=False, do_conf=False): """Perform fit using profile likelihood technique for background estimation and subtraction.""" listnames = self.get_noticed_list() # [ids.name for ids in self.listids[self.noticed_ids]] if len(listnames) > 0: wfit(listnames) print_fit() if do_covar is True: sau.covar(*listnames) if do_conf is True: sau.set_conf_opt('max_rstat', 10000) sau.conf(*listnames) print_conf() else: print("Empty noticed runs list. No fit")
def wfit(dataids=None): listids = () if dataids is None: listids = sau.list_data_ids() else: listids = dataids wstat = w_statistic(listids) sau.load_user_stat("mystat", wstat, wstat.CATstat_err_LV) sau.set_stat(mystat) sau.set_method("neldermead") # set_method("moncar") sau.set_conf_opt("max_rstat", 1000) # We don't use a specific maximum reduced statistic value # since we don't expect the cstat to be anywhere near the # large number limit sau.fit(*listids) sau.conf()
def wfit(dataids=None): listids = () if dataids is None: listids = sau.list_data_ids() else: listids = dataids wstat = w_statistic(listids) sau.load_user_stat("mystat", wstat, wstat.CATstat_err_LV) sau.set_stat(mystat) sau.set_method("neldermead") # set_method("moncar") sau.set_conf_opt( "max_rstat", 1000) # We don't use a specific maximum reduced statistic value # since we don't expect the cstat to be anywhere near the # large number limit sau.fit(*listids) sau.conf()
def fit_multiplets(multipletlist, id = None, outpath = None, plot = False, delta_lam = .2): # n_lines = np.sum([len(mult['wave']) for mult in multipletlist]) result = np.zeros(n_lines, dtype = {'names': ['multname', 'linename', 'wave', 'flux', 'errup', 'errdown', 'photons', 'photonflux'], 'formats': ['S30', 'S30', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4']}) currentline = 0 # for mult in multipletlist: if outpath is not None: outfile = os.path.join(outpath, filter(lambda x: x.isalnum(), mult['name'])) else: outfile = None fit_lines(mult, id, delta_lam = delta_lam, plot = plot, outfile = outfile) # ui.conf(id) conf_res = ui.get_conf_results() source = ui.get_source(id) #set all ampl to 0 and only for 1 line to real value set_all_val('c0', 0., id) for lname, lfili, lwave in zip(mult['linename'], mult['fililiname'], mult['wave']): print 'Fitting line '+str(currentline+1)+'/'+str(n_lines) par = ui.get_model_component(lfili) indconf_res = ( np.array(conf_res.parnames) == lfili+'.ampl').nonzero()[0] set_all_val('ampl', 0., id) par.ampl.val = conf_res.parvals[indconf_res] counts = ui.calc_model_sum(None, None, id) # print(lname, counts) photonflux = ui.calc_photon_flux(None, None, id) # determine scaling between ampl and flux par.ampl.val = 1 amp2flux = ui.calc_energy_flux(None, None, id) par.ampl.val = conf_res.parvals[indconf_res] # val = conf_res.parvals[indconf_res] * amp2flux errdown = conf_res.parmins[indconf_res] * amp2flux if conf_res.parmins[indconf_res] else np.nan errup = conf_res.parmaxes[indconf_res] * amp2flux if conf_res.parmaxes[indconf_res] else np.nan # result[currentline] = (mult['name'], lname, lwave, val, errup, errdown, counts, photonflux) # currentline +=1 return result
def conf(self): """ Run conf on each of the model parameters using the "onion-peeling" method: - First conf the outside shell model using the outer annulus spectrum - Freeze the model parameters for the outside shell - get confidences for the next inward shell / annulus and freeze those parameters - Repeat until all datasets have been conf()-ed and all shell-by-shell error parameters determined. - Return model parameters to original thawed/frozen status - WARNING: This ignores the correlations between parameters :rtype: None """ thawed = [] # Parameter objects that are not already frozen conf_results = [] this_conf_result = [] for annulus in reversed(range(self.nshell)): dataids = [x['id'] for x in self.datasets if x['annulus'] == annulus] print 'Getting shell-by-shell confidence for dataset ', dataids SherpaUI.conf(*dataids) this_conf_result = SherpaUI.get_conf_results() conf_results.insert(0, this_conf_result) for model_comp in self.model_comps: name = model_comp['name'] if model_comp['shell'] == annulus: # Remember parameters that are currently thawed for par in [SherpaUI.get_par('%s.%s'%(name, x)) for x in SherpaUI.get_model_pars(name)]: if not par.frozen: thawed.append(par) print 'Freezing', model_comp['name'] SherpaUI.freeze(model_comp['name']) # Unfreeze parameters for par in thawed: print 'Thawing', par.fullname par.thaw() return conf_results
import sherpa.astro.ui as shp shp.restore('20-40 keV/fits/chxepwnblob.sav') shp.set_conf_opt('numcores', 3) shp.set_conf_opt('maxiters', 50) shp.set_conf_opt('fast', True) shp.set_conf_opt('remin', 10000.0) shp.set_conf_opt('soft_limits', True) shp.set_conf_opt('verbose', True) shp.freeze(chxe) shp.thaw(chxe.ampl) print(shp.get_model()) shp.conf(blob.xpos, blob.ypos, blob.ellip, blob.fwhm, blob.theta, blob.ampl) shp.save('20-40 keV/fits/chxepwnblobconf.sav')