def test_xspecvar_no_grouping_comparison_xspec(make_data_path, l, h, ndp, ndof, statval): """Compare chi2xspecvar values for a data set to XSPEC. The data set has a background. See test_xspecvar_no_grouping_no_bg_comparison_xspec The XSPEC version used was 12.9.0o. """ dset = create_xspec_comparison_dataset(make_data_path, keep_background=True) # Lazy, so add it to "bad" channels too dset.counts += 5 dset.get_background().counts += 3 ui.clean() ui.set_data(dset) ui.subtract() ui.set_source(ui.powlaw1d.pl) ui.set_par('pl.ampl', 5e-4) ui.set_stat('chi2xspecvar') ui.set_analysis('energy') validate_xspec_result(l, h, ndp, ndof, statval) ui.clean()
def setup_model(make_data_path): """Set up a model that is reasonably close to the data. Returns the expected statistic values for various filters. """ infile = make_data_path('q1127_src1_grp30.pi') ui.clean() ui.load_pha(infile) ui.subtract() ui.set_stat('chi2datavar') ui.set_source(ui.powlaw1d.pl) pl = ui.get_model_component('pl') pl.ampl = 5.28e-4 pl.gamma = 1.04 # These statistic values were created using CIAO 4.9 on a # Ubuntu machine. The quality=2 values are for high energies # (above ~ 10 keV or so), and so a filter of 0.5-8.0 keV should # give the same answer with or without ignore_bad. # return { 'all': 2716.7086246284807, 'bad': 2716.682482792285, '0.5-8.0': 1127.7165108405597 }
def test_ARFModelPHA(self): from sherpa.astro import ui ui.load_pha(self.make_path("3c120_meg_1.pha")) # remove the RMF to ensure this is an ARF-only analysis # (which is what is needed to trigger the bug that lead to #699) ui.get_data().set_rmf(None) ui.group_counts(20) ui.notice(0.5, 6) ui.subtract() ui.set_model(ui.xsphabs.abs1 * (ui.xsapec.bubble + ui.powlaw1d.p1)) ui.set_xsabund('angr') ui.set_xsxsect('vern') abs1.nh = 0.163 abs1.nh.freeze() p1.ampl = 0.017 p1.gamma = 1.9 bubble.kt = 0.5 bubble.norm = 4.2e-5 tol = 1.0e-2 ui.set_method_opt('ftol', tol) ui.fit() result = ui.get_fit_results() assert result.numpoints == self._fit_using_ARFModelPHA['numpoints'] assert result.dof == self._fit_using_ARFModelPHA['dof']
def test_load_multi_arfsrmfs(make_data_path, clean_astro_ui): """Added in #728 to ensure cache parameter is sent along by MultiResponseSumModel (fix #717). This has since been simplified to switch from xsapec to powlaw1d as it drops the need for XSPEC and is a simpler model, so is less affected by changes in the model code. A fit of the Sherpa powerlaw-model to 3c273.pi with a single response in CIAO 4.11 (background subtracted, 0.5-7 keV) returns gamma = 1.9298, ampl = 1.73862e-4 so doubling the response should halve the amplitude but leave the gamma value the same when using two responses, as below. This is with chi2datavar. """ pha_pi = make_data_path("3c273.pi") ui.load_pha(1, pha_pi) ui.load_pha(2, pha_pi) arf = make_data_path("3c273.arf") rmf = make_data_path("3c273.rmf") ui.load_multi_arfs(1, [arf, arf], [1, 2]) ui.load_multi_arfs(2, [arf, arf], [1, 2]) ui.load_multi_rmfs(1, [rmf, rmf], [1, 2]) ui.load_multi_rmfs(2, [rmf, rmf], [1, 2]) ui.notice(0.5, 7) ui.subtract(1) ui.subtract(2) src = ui.create_model_component('powlaw1d', 'src') ui.set_model(1, src) ui.set_model(2, src) # ensure the test is repeatable by running with a known # statistic and method # ui.set_method('levmar') ui.set_stat('chi2datavar') # Really what we care about for fixing #717 is that # fit does not error out, but it's useful to know that # the fit has changed the parameter values (which were # both 1 before the fit). # ui.fit() fr = ui.get_fit_results() assert fr.succeeded assert fr.datasets == (1, 2) assert src.gamma.val == pytest.approx(1.9298, rel=1.0e-4) assert src.ampl.val == pytest.approx(1.73862e-4 / 2, rel=1.0e-4)
def basic_pha1(make_data_path): """Create a basic PHA-1 data set/setup""" ui.set_default_id('tst') ui.load_pha(make_data_path('3c273.pi')) ui.subtract() ui.notice(0.5, 7) ui.set_source(ui.powlaw1d.pl) pl = ui.get_model_component('pl') pl.gamma = 1.93 pl.ampl = 1.74e-4
def subtract(spectrum): """Subtract the background (a no-op if there is no background). Parameters ---------- spectrum : int Dataset identifier. """ try: ui.subtract(spectrum) except DataErr: print(f"Dataset {spectrum} has no background data!")
def test_dataspace1d_datapha_bkg(clean_astro_ui): """Explicitly test dataspace1d for DataPHA (background)""" # list_bkg_ids will error out until the dataset exists assert ui.list_data_ids() == [] # We don't use the grid range or step size since numbins has been # given. ui.dataspace1d(20, 30, step=2.5, numbins=10, id='x', dstype=ui.DataPHA) assert ui.list_data_ids() == ['x'] assert ui.list_bkg_ids('x') == [] ui.dataspace1d(20, 30, step=2.5, numbins=10, id='x', bkg_id=2, dstype=ui.DataPHA) assert ui.list_data_ids() == ['x'] assert ui.list_bkg_ids('x') == [2] assert ui.get_data('x').name == 'dataspace1d' # I've explicitly not chosen the default background identifier with pytest.raises(IdentifierErr): ui.get_bkg('x') assert ui.get_bkg('x', 2).name == 'bkg_dataspace1d' grid = ui.get_indep('x', bkg_id=2) assert len(grid) == 1 expected = numpy.asarray([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) assert grid[0] == pytest.approx(expected) y = ui.get_dep('x', bkg_id=2) assert y == pytest.approx(numpy.zeros(10)) assert ui.get_exposure('x', bkg_id=2) is None assert ui.get_grouping('x', bkg_id=2) is None assert ui.get_quality('x', bkg_id=2) is None assert ui.get_bkg('x', bkg_id=2).subtracted is False # check we can subtract the dataset; as the data is all zeros # we don't bother checking the result. # ui.subtract('x')
def test_subtract_already_subtracted(idval): """Does subtract still work if the data is already subtracted?""" x = [1, 2, 3] y = [0, 4, 3] ui.load_arrays('bgnd', x, y, ui.DataPHA) bkg = ui.get_data('bgnd') if idval is None: ui.load_arrays(1, x, y, ui.DataPHA) ui.set_bkg(bkg) else: ui.load_arrays(idval, x, y, ui.DataPHA) ui.set_bkg(idval, bkg) data = ui.get_data(idval) assert not data.subtracted ui.subtract(idval) assert data.subtracted ui.subtract(idval) assert ui.get_dep(idval) == pytest.approx([0, 0, 0])
def test_pileup_model(make_data_path, clean_astro_ui): """Basic check of setting a pileup model. It is more to check we can set a pileup model, not to check the model works. """ infile = make_data_path('3c273.pi') ui.load_pha('pileup', infile) ui.subtract('pileup') ui.notice(0.3, 7) ui.set_stat('chi2datavar') # pick xswabs as it is unlikely to change with XSPEC ui.set_source('pileup', ui.xswabs.amdl * ui.powlaw1d.pl) # get close to the best fit, but don't need to fit pl.ampl = 1.82e-4 pl.gamma = 1.97 amdl.nh = 0.012 stat0 = ui.calc_stat('pileup') # We want to compare the data to the pileup model, # which should be run with the higher-energy bins included, # but that's not relevant here where I am just # checking the statistic value. ui.set_pileup_model('pileup', ui.jdpileup.jdp) # pick some values to make the model change the data jdp.ftime = 3.2 jdp.fracexp = 1 jdp.alpha = 0.95 jdp.f = 0.91 # Check pileup is added to get_model # mlines = str(ui.get_model('pileup')).split('\n') assert mlines[0] == 'apply_rmf(jdpileup.jdp((xswabs.amdl * powlaw1d.pl)))' assert mlines[3].strip( ) == 'jdp.alpha thawed 0.95 0 1' assert mlines[5].strip( ) == 'jdp.f thawed 0.91 0.9 1' assert mlines[11].strip( ) == 'pl.gamma thawed 1.97 -10 10' # Ensure that the statistic has got worse (technically # it coud get better, but not for this case). # stat1 = ui.calc_stat('pileup') assert stat1 > stat0 # As a test, check the actual statistic values # (evaluated with XSPEC 12.11.0 and Sherpa with the # master branch 2020/07/29, on Linux). # assert stat0 == pytest.approx(35.99899827358692) assert stat1 == pytest.approx(36.58791181460404) # Can we remove the pileup model? # ui.delete_pileup_model('pileup') # Check pileup is not in get_model # mlines = str(ui.get_model('pileup')).split('\n') # Numeric display depends on Python and/or NumPy # for the exposure time. I should have set the exposure # time to an integer value. # # assert mlines[0] == 'apply_rmf(apply_arf((38564.608926889 * (xswabs.amdl * powlaw1d.pl))))' toks = mlines[0].split() assert len(toks) == 5 assert toks[0].startswith('apply_rmf(apply_arf((38564.608') assert toks[1] == '*' assert toks[2] == '(xswabs.amdl' assert toks[3] == '*' assert toks[4] == 'powlaw1d.pl))))' assert mlines[4].strip( ) == 'pl.gamma thawed 1.97 -10 10' stat2 = ui.calc_stat('pileup') assert stat2 == stat0
def test_xspec_con_ui_shift_regrid(make_data_path, clean_astro_ui, restore_xspec_settings): """Check shifted models from the UI layer with a response and regrid. Unlike test_xspec_con_ui_shift, the convolution model is run on an extended grid compared to the RMF. """ from sherpa.astro import xspec infile = make_data_path('3c273.pi') ui.load_pha(infile) ui.subtract() ui.ignore(None, 0.5) ui.ignore(7, None) # Ensure the grid contains the RMF grid (0.1-11 keV). # Really we should have emax to be > 11 * (1+z) but # I purposefully pick a smaller maximum to check we # get 0 values in the output # rgrid = np.arange(0.1, 20, 0.01) rlo = rgrid[:-1] rhi = rgrid[1:] msource = ui.box1d.box + ui.const1d.bgnd csource = ui.xszashift.zsh(msource) ui.set_source(ui.xsphabs.gal * csource.regrid(rlo, rhi)) mdl = ui.get_source() # What should the string representation be? # assert mdl.name == '(xsphabs.gal * regrid1d(xszashift.zsh((box1d.box + const1d.bgnd))))' assert len(mdl.pars) == 6 assert mdl.pars[0].fullname == 'gal.nH' assert mdl.pars[1].fullname == 'zsh.Redshift' assert mdl.pars[2].fullname == 'box.xlow' assert mdl.pars[3].fullname == 'box.xhi' assert mdl.pars[4].fullname == 'box.ampl' assert mdl.pars[5].fullname == 'bgnd.c0' assert isinstance(mdl.lhs, xspec.XSphabs) assert isinstance(mdl.rhs, RegridWrappedModel) gal = ui.get_model_component('gal') zsh = ui.get_model_component('zsh') box = ui.get_model_component('box') bgnd = ui.get_model_component('bgnd') assert isinstance(gal, xspec.XSphabs) assert isinstance(zsh, xspec.XSzashift) assert isinstance(box, Box1D) assert isinstance(bgnd, Const1D) zsh.redshift = 1 # turn off the absorption to make the comparison easier gal.nh = 0 # pick an energy range that exceeds the RMF maximum energy (11 keV) box.xlow = 10 box.xhi = 13 box.ampl = 0.5 bgnd.c0 = 0.001 bgnd.integrate = False mplot = ui.get_source_plot() # Expect, as z = 1 # # 0.1 for E < 5 keV or 6.5 - 10 keV # 0.6 5 - 6.5 keV # 0 > 10 keV # idx1 = (mplot.xhi <= 5) | ((mplot.xlo >= 6.5) & (mplot.xhi <= 10)) idx2 = (mplot.xlo >= 5) & (mplot.xhi <= 6.5) idx3 = mplot.xlo >= 10 # ensure we pick the "expected" range (there are 1090 bins in the # RMF energy grid) assert idx1.sum() == 840 assert idx2.sum() == 150 assert idx3.sum() == 100 # The tolerance has to be relatively large otherwise things fail # # It appears that the very-last bin of idx1 is actually ~ 0.05, # so separate that out here. It is the last bin of the "valid" # array, and so at z=1 may only have been half-filled by the # convolution. # assert mplot.y[idx1][:-1] == pytest.approx(0.1, rel=1e-5) assert mplot.y[idx1][-1] == pytest.approx(0.05, rel=3e-5) assert mplot.y[idx2] == pytest.approx(0.6, rel=3e-5) assert mplot.y[idx3] == pytest.approx(0)
def test_xspec_con_ui_shift(make_data_path, clean_astro_ui, restore_xspec_settings): """Check shifted models from the UI layer with a response. There is no regrid here, so we see the issue with the upper edge of the RMF cutting off the source. """ from sherpa.astro import xspec infile = make_data_path('3c273.pi') ui.load_pha(infile) ui.subtract() ui.ignore(None, 0.5) ui.ignore(7, None) msource = ui.box1d.box + ui.const1d.bgnd ui.set_source(ui.xsphabs.gal * ui.xszashift.zsh(msource)) mdl = ui.get_source() assert mdl.name == '(xsphabs.gal * xszashift.zsh((box1d.box + const1d.bgnd)))' assert len(mdl.pars) == 6 assert mdl.pars[0].fullname == 'gal.nH' assert mdl.pars[1].fullname == 'zsh.Redshift' assert mdl.pars[2].fullname == 'box.xlow' assert mdl.pars[3].fullname == 'box.xhi' assert mdl.pars[4].fullname == 'box.ampl' assert mdl.pars[5].fullname == 'bgnd.c0' assert isinstance(mdl.lhs, xspec.XSphabs) assert isinstance(mdl.rhs, xspec.XSConvolutionModel) gal = ui.get_model_component('gal') zsh = ui.get_model_component('zsh') box = ui.get_model_component('box') bgnd = ui.get_model_component('bgnd') assert isinstance(gal, xspec.XSphabs) assert isinstance(zsh, xspec.XSzashift) assert isinstance(box, Box1D) assert isinstance(bgnd, Const1D) zsh.redshift = 1 # turn off the absorption to make the comparison easier gal.nh = 0 # pick an energy range that exceeds the RMF maximum energy (11 keV) box.xlow = 10 box.xhi = 13 box.ampl = 0.5 bgnd.c0 = 0.001 bgnd.integrate = False mplot = ui.get_source_plot() # Expect, as z = 1 # # 0.1 for E < 5 keV (10 / (1+z)) # 0 E > 5.5 keV (11 / (1+z)) due to RMF cut off # 0.6 5 - 5.5 keV # idx1 = mplot.xhi <= 5 idx2 = mplot.xlo >= 5.5 # ensure we pick the "expected" range assert idx1.sum() == 490 assert idx2.sum() == 550 assert mplot.xhi[idx1].max() == pytest.approx(5) assert mplot.xlo[idx2].min() == pytest.approx(5.5) # use the inverse of the two index arrays to ensure we are not # missing any bins. # idx3 = ~(idx1 | idx2) # The tolerance has to be relatively large otherwise things fail assert mplot.y[idx1] == pytest.approx(0.1, rel=3e-5) assert mplot.y[idx2] == pytest.approx(0) assert mplot.y[idx3] == pytest.approx(0.6, rel=1e-5)
def test_xspec_con_ui_cflux(make_data_path, clean_astro_ui, restore_xspec_settings): """Check cflux from the UI layer with a response.""" from sherpa.astro import xspec infile = make_data_path('3c273.pi') ui.load_pha('random', infile) ui.subtract('random') ui.ignore(None, 0.5) ui.ignore(7, None) ui.set_source('random', 'xsphabs.gal * xscflux.sflux(powlaw1d.pl)') mdl = ui.get_source('random') assert mdl.name == '(xsphabs.gal * xscflux.sflux(powlaw1d.pl))' assert len(mdl.pars) == 7 assert mdl.pars[0].fullname == 'gal.nH' assert mdl.pars[1].fullname == 'sflux.Emin' assert mdl.pars[2].fullname == 'sflux.Emax' assert mdl.pars[3].fullname == 'sflux.lg10Flux' assert mdl.pars[4].fullname == 'pl.gamma' assert mdl.pars[5].fullname == 'pl.ref' assert mdl.pars[6].fullname == 'pl.ampl' assert isinstance(mdl.lhs, xspec.XSphabs) assert isinstance(mdl.rhs, xspec.XSConvolutionModel) gal = ui.get_model_component('gal') sflux = ui.get_model_component('sflux') pl = ui.get_model_component('pl') assert isinstance(gal, xspec.XSphabs) assert isinstance(sflux, xspec.XScflux) assert isinstance(pl, PowLaw1D) # the convolution model needs the normalization to be fixed # (not for this example, as we are not fitting, but do this # anyway for reference) pl.ampl.frozen = True sflux.emin = 1 sflux.emax = 5 sflux.lg10Flux = -12.3027 pl.gamma = 2.03 gal.nh = 0.039 ui.set_xsabund('angr') ui.set_xsxsect('vern') # check we get the "expected" statistic (so this is a regression # test). # ui.set_stat('chi2gehrels') sinfo = ui.get_stat_info() assert len(sinfo) == 1 sinfo = sinfo[0] assert sinfo.numpoints == 40 assert sinfo.dof == 37 assert sinfo.statval == pytest.approx(21.25762265234619) # Do we get the same flux from Sherpa's calc_energy_flux? # cflux = ui.calc_energy_flux(id='random', model=sflux(pl), lo=1, hi=5) lcflux = np.log10(cflux) assert lcflux == pytest.approx(sflux.lg10Flux.val)
def fit_sherpa(obsid_list, redshift, nH_Gal, energy, min_counts=25, kT_guess=3, Ab_guess=1, fix_nH_Gal=True, fix_abund=False, find_errors=True): spectra = [] for obs in obsid_list: temp = glob.glob('xaf_*_' + obs + '.pi') # get spectra of regions spectra.append( temp ) # spectra will be made of lists which have xaf_*_obs.pi filenames - access with spectra[i][j] spectra.sort() num_obs = len(obsid_list) num_reg = len(temp) filename = 'spectra_wabs_mekal.dat' results_file = open(filename, "w") results_file.write( '# Fit results for wabs*mekal (zeros indicate that no fitting was performed)\n' ) results_file.write( '# Reg_no. kT kT_loerr kT_hierr Z Z_loerr Z_hierr norm norm_loerr norm_hierr nH_Gal nH_loerr nH_hierr red_chisq total_counts num_bins\n' ) for i in range(num_reg): sherpa.clean() # clean everything cnts = numpy.zeros( num_obs ) # make array of zeros with index length same as num_obs to store counts max_rate = numpy.zeros(num_obs) # max count rate [counts/s/keV] data_set = 0 # data set number good_src_ids = numpy.zeros(num_obs, dtype=int) - 1 for j in range(num_obs): sherpa.load_pha( data_set, spectra[j] [i]) # load xaf_#_obs_####.pi and .arf and .rmf files. sherpa.ignore_id(data_set, 0.0, energy[0]) sherpa.ignore_id(data_set, energy[1], None) cnts[j] = sherpa.calc_data_sum(energy[0], energy[1], data_set) cnt_rate = sherpa.get_rate(data_set, filter=True) if len(cnt_rate) == 0: max_rate[ j] = 0.0 # when few counts (<50), get_rate can return zero-length array else: max_rate[j] = numpy.max(cnt_rate) sherpa.subtract(data_set) # subtract background sherpa.set_source( data_set, sherpa.xswabs.abs1 * sherpa.xsmekal.plsm1) # 1 temperature mekal model fit good_src_ids[j] = data_set data_set += 1 # same run for region but different obs # Filter out ignored obs good_src_ids_indx = numpy.where(good_src_ids >= 0) good_src_ids = good_src_ids[good_src_ids_indx] max_rate = max_rate[good_src_ids_indx] cnts = cnts[good_src_ids_indx] totcnts = numpy.sum(cnts) if totcnts >= min_counts: print('Fitting spectra in region: ' + str(i)) abs1.nH = nH_Gal abs1.cache = 0 if fix_nH_Gal: sherpa.freeze(abs1.nH) else: sherpa.thaw(abs1.nH) plsm1.kt = kT_guess sherpa.thaw(plsm1.kt) plsm1.Abundanc = Ab_guess if fix_abund: sherpa.freeze(plsm1.Abundanc) else: sherpa.thaw(plsm1.Abundanc) plsm1.redshift = redshift sherpa.freeze(plsm1.redshift) plsm1.cache = 0 sherpa.fit() fit_result = sherpa.get_fit_results() red_chi2 = fit_result.rstat num_bins = fit_result.numpoints if fix_nH_Gal: nH = nH_Gal kT = fit_result.parvals[0] if fix_abund: Z = Ab_guess norm = fit_result.parvals[1] else: Z = fit_result.parvals[1] norm = fit_result.parvals[2] else: nH = fit_result.parvals[0] kT = fit_result.parvals[1] if fix_abund: Z = Ab_guess norm = fit_result.parvals[2] else: Z = fit_result.parvals[2] norm = fit_result.parvals[3] del fit_result if find_errors: sherpa.covar() covar_result = sherpa.get_covar_results() if fix_nH_Gal: nH_loerr = 0.0 nH_hierr = 0.0 kT_loerr = covar_result.parmins[0] kT_hierr = covar_result.parmaxes[0] if fix_abund: Z_loerr = 0.0 Z_hierr = 0.0 norm_loerr = covar_result.parmins[1] norm_hierr = covar_result.parmaxes[1] else: Z_loerr = covar_result.parmins[1] Z_hierr = covar_result.parmaxes[1] norm_loerr = covar_result.parmins[2] norm_hierr = covar_result.parmaxes[2] else: nH_loerr = covar_result.parmins[0] nH_hierr = covar_result.parmaxes[0] kT_loerr = covar_result.parmins[1] kT_hierr = covar_result.parmaxes[1] if fix_abund: Z_loerr = 0.0 Z_hierr = 0.0 norm_loerr = covar_result.parmins[2] norm_hierr = covar_result.parmaxes[2] else: Z_loerr = covar_result.parmins[2] Z_hierr = covar_result.parmaxes[2] norm_loerr = covar_result.parmins[3] norm_hierr = covar_result.parmaxes[3] del covar_result # Check for failed errors (= None) and set them to +/- best-fit value if not fix_nH_Gal: if nH_loerr is None: nH_loerr = -nH # is was == if nH_hierr is None: nH_hierr = nH if kT_loerr is None: kT_loerr = -kT if kT_hierr is None: kT_hierr = kT if not fix_abund: if Z_loerr is None: Z_loerr = -Z if Z_hierr is None: Z_hierr = Z if norm_loerr is None: norm_loerr = -norm if norm_hierr is None: norm_hierr = norm else: kT_loerr = 0.0 Z_loerr = 0.0 nH_loerr = 0.0 norm_loerr = 0.0 kT_hierr = 0.0 Z_hierr = 0.0 nH_hierr = 0.0 norm_hierr = 0.0 else: # if total counts < min_counts, just write zeros print('\n Warning: no fit performed for for region: ' + str(i)) print( '\n Spectra have insufficient counts after filtering or do not exist.' ) print('\n --> All parameters for this region set to 0.0.') kT = 0.0 Z = 0.0 nH = 0.0 norm = 0.0 kT_loerr = 0.0 Z_loerr = 0.0 nH_loerr = 0.0 norm_loerr = 0.0 kT_hierr = 0.0 Z_hierr = 0.0 nH_hierr = 0.0 norm_hierr = 0.0 red_chi2 = 0.0 num_bins = 0 reg_id = spectra[0][i].split( '_' ) # Splits string after every underscore so that region number can be accessed. reg_id[1] is accessed because that is the region number after 'xaf' results_file.write( '%7r %7.4f %7.4f %7.4f %7.4f %7.4f %7.4f %6.4e %6.4e %6.4e %7.4f %7.4f %7.4f %7.4f %8.1f %8r\n' % (int(reg_id[1]), kT, kT_loerr, kT_hierr, Z, Z_loerr, Z_hierr, norm, norm_loerr, norm_hierr, nH, nH_loerr, nH_hierr, red_chi2, totcnts, num_bins)) # Write all data to a file results_file.close()
def fit_sherpa(obsid_list, redshift, nH_Gal, energy, min_counts=25, kT_guess=3, Ab_guess=1, fix_nH_Gal=True, fix_abund=False, find_errors=True): spectra = [] for obs in obsid_list: temp = glob.glob('xaf_*_' + obs + '.pi') # get spectra of regions spectra.append(temp) # spectra will be made of lists which have xaf_*_obs.pi filenames - access with spectra[i][j] spectra.sort() num_obs = len(obsid_list) num_reg = len(temp) filename = 'spectra_wabs_mekal.dat' results_file = open(filename, "w") results_file.write('# Fit results for wabs*mekal (zeros indicate that no fitting was performed)\n') results_file.write('# Reg_no. kT kT_loerr kT_hierr Z Z_loerr Z_hierr norm norm_loerr norm_hierr nH_Gal nH_loerr nH_hierr red_chisq total_counts num_bins\n') for i in range(num_reg): sherpa.clean() # clean everything cnts = numpy.zeros(num_obs) # make array of zeros with index length same as num_obs to store counts max_rate = numpy.zeros(num_obs) # max count rate [counts/s/keV] data_set = 0 # data set number good_src_ids = numpy.zeros(num_obs, dtype=int) - 1 for j in range(num_obs): sherpa.load_pha(data_set, spectra[j][i]) # load xaf_#_obs_####.pi and .arf and .rmf files. sherpa.ignore_id(data_set, 0.0, energy[0]) sherpa.ignore_id(data_set, energy[1], None) cnts[j] = sherpa.calc_data_sum(energy[0], energy[1], data_set) cnt_rate = sherpa.get_rate(data_set, filter=True) if len(cnt_rate) == 0: max_rate[j] = 0.0 # when few counts (<50), get_rate can return zero-length array else: max_rate[j] = numpy.max(cnt_rate) sherpa.subtract(data_set) # subtract background sherpa.set_source(data_set, sherpa.xswabs.abs1 * sherpa.xsmekal.plsm1) # 1 temperature mekal model fit good_src_ids[j] = data_set data_set += 1 # same run for region but different obs # Filter out ignored obs good_src_ids_indx = numpy.where(good_src_ids >= 0) good_src_ids = good_src_ids[good_src_ids_indx] max_rate = max_rate[good_src_ids_indx] cnts = cnts[good_src_ids_indx] totcnts = numpy.sum(cnts) if totcnts >= min_counts: print('Fitting spectra in region: ' + str(i)) abs1.nH = nH_Gal abs1.cache = 0 if fix_nH_Gal: sherpa.freeze(abs1.nH) else: sherpa.thaw(abs1.nH) plsm1.kt = kT_guess sherpa.thaw(plsm1.kt) plsm1.Abundanc = Ab_guess if fix_abund: sherpa.freeze(plsm1.Abundanc) else: sherpa.thaw(plsm1.Abundanc) plsm1.redshift = redshift sherpa.freeze(plsm1.redshift) plsm1.cache = 0 sherpa.fit() fit_result = sherpa.get_fit_results() red_chi2 = fit_result.rstat num_bins = fit_result.numpoints if fix_nH_Gal: nH = nH_Gal kT = fit_result.parvals[0] if fix_abund: Z = Ab_guess norm = fit_result.parvals[1] else: Z = fit_result.parvals[1] norm = fit_result.parvals[2] else: nH = fit_result.parvals[0] kT = fit_result.parvals[1] if fix_abund: Z = Ab_guess norm = fit_result.parvals[2] else: Z = fit_result.parvals[2] norm = fit_result.parvals[3] del fit_result if find_errors: sherpa.covar() covar_result = sherpa.get_covar_results() if fix_nH_Gal: nH_loerr = 0.0 nH_hierr = 0.0 kT_loerr = covar_result.parmins[0] kT_hierr = covar_result.parmaxes[0] if fix_abund: Z_loerr = 0.0 Z_hierr = 0.0 norm_loerr = covar_result.parmins[1] norm_hierr = covar_result.parmaxes[1] else: Z_loerr = covar_result.parmins[1] Z_hierr = covar_result.parmaxes[1] norm_loerr = covar_result.parmins[2] norm_hierr = covar_result.parmaxes[2] else: nH_loerr = covar_result.parmins[0] nH_hierr = covar_result.parmaxes[0] kT_loerr = covar_result.parmins[1] kT_hierr = covar_result.parmaxes[1] if fix_abund: Z_loerr = 0.0 Z_hierr = 0.0 norm_loerr = covar_result.parmins[2] norm_hierr = covar_result.parmaxes[2] else: Z_loerr = covar_result.parmins[2] Z_hierr = covar_result.parmaxes[2] norm_loerr = covar_result.parmins[3] norm_hierr = covar_result.parmaxes[3] del covar_result # Check for failed errors (= None) and set them to +/- best-fit value if not fix_nH_Gal: if nH_loerr is None: nH_loerr = -nH # is was == if nH_hierr is None: nH_hierr = nH if kT_loerr is None: kT_loerr = -kT if kT_hierr is None: kT_hierr = kT if not fix_abund: if Z_loerr is None: Z_loerr = -Z if Z_hierr is None: Z_hierr = Z if norm_loerr is None: norm_loerr = -norm if norm_hierr is None: norm_hierr = norm else: kT_loerr = 0.0 Z_loerr = 0.0 nH_loerr = 0.0 norm_loerr = 0.0 kT_hierr = 0.0 Z_hierr = 0.0 nH_hierr = 0.0 norm_hierr = 0.0 else: # if total counts < min_counts, just write zeros print('\n Warning: no fit performed for for region: ' + str(i)) print('\n Spectra have insufficient counts after filtering or do not exist.') print('\n --> All parameters for this region set to 0.0.') kT = 0.0 Z = 0.0 nH = 0.0 norm = 0.0 kT_loerr = 0.0 Z_loerr = 0.0 nH_loerr = 0.0 norm_loerr = 0.0 kT_hierr = 0.0 Z_hierr = 0.0 nH_hierr = 0.0 norm_hierr = 0.0 red_chi2 = 0.0 num_bins = 0 reg_id = spectra[0][i].split('_') # Splits string after every underscore so that region number can be accessed. reg_id[1] is accessed because that is the region number after 'xaf' results_file.write('%7r %7.4f %7.4f %7.4f %7.4f %7.4f %7.4f %6.4e %6.4e %6.4e %7.4f %7.4f %7.4f %7.4f %8.1f %8r\n' % (int(reg_id[1]), kT, kT_loerr, kT_hierr, Z, Z_loerr, Z_hierr, norm, norm_loerr, norm_hierr, nH, nH_loerr, nH_hierr, red_chi2, totcnts, num_bins)) # Write all data to a file results_file.close()