def validate_pha(idval): """Check that the PHA dataset in id=idval is as expected. """ assert ui.list_data_ids() == [idval] pha = ui.get_data(idval) assert isinstance(pha, DataPHA) arf = ui.get_arf(idval) assert isinstance(arf, ARF1D) rmf = ui.get_rmf(idval) assert isinstance(rmf, RMF1D) bpha = ui.get_bkg(idval, bkg_id=1) assert isinstance(bpha, DataPHA) barf = ui.get_arf(idval, bkg_id=1) assert isinstance(barf, ARF1D) brmf = ui.get_rmf(idval, bkg_id=1) assert isinstance(brmf, RMF1D) # normally the background data set would have a different name, # but this is a PHA Type 3 file. # assert pha.name == bpha.name assert arf.name == barf.name assert rmf.name == brmf.name
def validate_pha(self, idval): """Check that the PHA dataset in id=idval is as expected. """ self.assertEqual(ui.list_data_ids(), [idval]) pha = ui.get_data(idval) self.assertIsInstance(pha, DataPHA) arf = ui.get_arf(idval) self.assertIsInstance(arf, ARF1D) rmf = ui.get_rmf(idval) self.assertIsInstance(rmf, RMF1D) bpha = ui.get_bkg(idval, bkg_id=1) self.assertIsInstance(bpha, DataPHA) barf = ui.get_arf(idval, bkg_id=1) self.assertIsInstance(barf, ARF1D) brmf = ui.get_rmf(idval, bkg_id=1) self.assertIsInstance(brmf, RMF1D) # normally the background data set would have a different name, # but this is a PHA Type 3 file. # self.assertEqual(pha.name, bpha.name) self.assertEqual(arf.name, barf.name) self.assertEqual(rmf.name, brmf.name)
def test_xmm2(self): self.run_thread('xmm2') self.assertEqualWithinTol(ui.get_data().channel[0], 1.0, 1e-4) self.assertEqual(ui.get_rmf().detchans, 800) self.assertEqual(len(ui.get_rmf().energ_lo), 2400) self.assertEqual(len(ui.get_rmf().energ_hi), 2400) self.assertEqual(len(ui.get_rmf().n_grp), 2400) self.assertEqual(len(ui.get_rmf().f_chan), 2394) self.assertEqual(len(ui.get_rmf().n_chan), 2394) self.assertEqual(len(ui.get_rmf().matrix), 1281216) self.assertEqual(ui.get_rmf().offset, 0) self.assertEqual(len(ui.get_rmf().e_min), 800) self.assertEqual(len(ui.get_rmf().e_max), 800) self.assertEqual(len(ui.get_arf().energ_lo), 2400) self.assertEqual(len(ui.get_arf().energ_hi), 2400) self.assertEqual(len(ui.get_arf().specresp), 2400)
def set_minimalArea(self, areamin): """Define threshold using minimal area. Extract true energy value from arf file To be implemented """ my_arf = sau.get_arf(self.name)
def get_identity_response(i): n = ui.get_bkg(i).counts.size rmf = ui.get_rmf(i) try: arf = ui.get_arf(i) return lambda model: IdentityResponse(n, model, arf=arf, rmf=rmf) except: return lambda model: IdentityRMF(n, model, rmf=rmf)
def test_xmm2(run_thread, fix_xspec): with warnings.catch_warnings(record=True) as ws: warnings.simplefilter("always") run_thread('xmm2') # NOTE: if this test is run on its own it can generate three warnings, # with the first being a RuntimeWarnnig about numpy.ndarray size # changed. We filter this out as it's not at all clear what is going # on and we have filters in conftest to remove similar warnings # ws = [ w for w in ws if not (w.category == RuntimeWarning and str(w.message).startswith( 'numpy.ndarray size changed, may indicate binary incompatibility.') ) ] assert len(ws) == 2 cats = set([w.category for w in ws]) assert cats == set([UserWarning]) # The order of reading the ARF and RMF is not guaranteed, # so do not force it here when testing the two warning # messages. # arffile = 'MNLup_2138_0670580101_EMOS1_S001_spec.arf' rmffile = 'MNLup_2138_0670580101_EMOS1_S001_spec.rmf' emsg_arf = "The minimum ENERG_LO in the ARF " + \ "'{}' ".format(arffile) + \ "was 0 and has been replaced by {}".format(EMIN) emsg_rmf = "The minimum ENERG_LO in the RMF " + \ "'{}' ".format(rmffile) + \ "was 0 and has been replaced by {}".format(EMIN) emsgs = set([emsg_arf, emsg_rmf]) wmsgs = set([str(w.message) for w in ws]) assert wmsgs == emsgs assert ui.get_data().channel[0] == approx(1.0, rel=1e-4) rmf = ui.get_rmf() arf = ui.get_arf() assert rmf.detchans == 800 assert len(rmf.energ_lo) == 2400 assert len(rmf.energ_hi) == 2400 assert len(rmf.n_grp) == 2400 assert len(rmf.f_chan) == 2394 assert len(rmf.n_chan) == 2394 assert len(rmf.matrix) == 1281216 assert rmf.offset == 0 assert len(rmf.e_min) == 800 assert len(rmf.e_max) == 800 assert len(arf.energ_lo) == 2400 assert len(arf.energ_hi) == 2400 assert len(arf.specresp) == 2400 etol = EMIN / 100.0 assert rmf.energ_lo[0] == approx(EMIN, rel=etol) assert arf.energ_lo[0] == approx(EMIN, rel=etol)
def test_sherpa(self, tmpdir, extraction): """Same as above for files to be used with sherpa""" extraction.run(outdir=tmpdir, use_sherpa=True) import sherpa.astro.ui as sau sau.load_pha(str(tmpdir / 'ogip_data' / 'pha_obs23523.fits')) arf = sau.get_arf() actual = arf._arf._specresp desired = extraction.observations[0].aeff.data.data.value assert_allclose(actual, desired)
def setup_order_plot(make_data_path): """Set up a faked dataset with multiple orders.""" pha = make_data_path('3c273.pi') ui.load_pha(pha) # It has already loaded in one response arf = ui.get_arf(resp_id=1) arf.specresp *= 0.5 for order, scale in enumerate([0.4, 0.25], 2): ui.load_arf(make_data_path('3c273.arf'), resp_id=order) ui.load_rmf(make_data_path('3c273.rmf'), resp_id=order) arf = ui.get_arf(resp_id=order) arf.specresp *= scale ui.set_source(ui.powlaw1d.pl) ui.notice(0.5, 7) ui.ignore(3, 4)
def test_sherpa(self, tmpdir, extraction): """Same as above for files to be used with sherpa""" import sherpa.astro.ui as sau extraction.run() extraction.write(outdir=tmpdir, use_sherpa=True, overwrite=True) sau.load_pha(str(tmpdir / "ogip_data" / "pha_obs23523.fits")) arf = sau.get_arf() actual = arf._arf._specresp desired = extraction.spectrum_observations[0].aeff.data.data.to_value("cm2") assert_allclose(actual, desired)
def test_sherpa(self, tmpdir, extraction): """Same as above for files to be used with sherpa""" import sherpa.astro.ui as sau extraction.run() extraction.write(outdir=tmpdir, use_sherpa=True, overwrite=True) sau.load_pha(str(tmpdir / "ogip_data" / "pha_obs23523.fits")) arf = sau.get_arf() actual = arf._arf._specresp desired = extraction.spectrum_observations[0].aeff.data.data.value assert_allclose(actual, desired)
def test_xmm2(self): with warnings.catch_warnings(record=True) as ws: warnings.simplefilter("always") self.run_thread('xmm2') assert len(ws) == 2 cats = set([w.category for w in ws]) assert cats == set([UserWarning]) # The order of reading the ARF and RMF is not guaranteed, # so do not force it here when testing the two warning # messages. # arffile = 'MNLup_2138_0670580101_EMOS1_S001_spec.arf' rmffile = 'MNLup_2138_0670580101_EMOS1_S001_spec.rmf' emsg_arf = "The minimum ENERG_LO in the ARF " + \ "'{}' ".format(arffile) + \ "was 0 and has been replaced by {}".format(EMIN) emsg_rmf = "The minimum ENERG_LO in the RMF " + \ "'{}' ".format(rmffile) + \ "was 0 and has been replaced by {}".format(EMIN) emsgs = set([emsg_arf, emsg_rmf]) wmsgs = set([str(w.message) for w in ws]) assert wmsgs == emsgs assert ui.get_data().channel[0] == approx(1.0, rel=1e-4) rmf = ui.get_rmf() arf = ui.get_arf() self.assertEqual(rmf.detchans, 800) self.assertEqual(len(rmf.energ_lo), 2400) self.assertEqual(len(rmf.energ_hi), 2400) self.assertEqual(len(rmf.n_grp), 2400) self.assertEqual(len(rmf.f_chan), 2394) self.assertEqual(len(rmf.n_chan), 2394) self.assertEqual(len(rmf.matrix), 1281216) self.assertEqual(rmf.offset, 0) self.assertEqual(len(rmf.e_min), 800) self.assertEqual(len(rmf.e_max), 800) self.assertEqual(len(arf.energ_lo), 2400) self.assertEqual(len(arf.energ_hi), 2400) self.assertEqual(len(arf.specresp), 2400) etol = EMIN / 100.0 assert rmf.energ_lo[0] == approx(EMIN, rel=etol) assert arf.energ_lo[0] == approx(EMIN, rel=etol)
def estimate_expmap(self, *args): """Estimate the exposure map given an ARF. If no argumenhts are supplied then the ARF of the Sherpa dataset associated with this obhect is used (``self.id``). Although the arguments are listed with parameter names below, the function **does not** accept named arguments. It uses positional arguments and type checks to determine the parameters. Parameters ---------- crate A TABLECrate, containing ``energ_lo``, ``energ_hi``, and ``specresp`` columns. filename : string The name of an ARF file xlo, xhi, y : arrays of numbers The arrays taken to be the ``energ_lo``, ``energ_hi``, and ``specresp`` columns Return ------ expmap : number An estimate of the exposure map at the position of the source, and has units of cm^2 count / self.fluxtype Notes ----- The ARF is linearly interpolated onto the energy grid of the dataset and the weighted sum calculated. The ARF is assumed to have units of cm^2 count / photon, and be defined on a grid given in keV. """ nargs = len(args) if nargs == 0: darf = ui.get_arf(self.id) return self._estimate_expmap(darf.energ_lo, darf.energ_hi, darf.specresp) else: return self._estimate_expmap(*args)
def test_can_use_swift_data(make_data_path, clean_astro_ui): """A basic check that we can read in and use the Swift data. Unlike the previous tests, that directly access the io module, this uses the ui interface. """ # The Swift PHA file does not have the ANCRFILE/RESPFILE keywords # set up, so the responses have to be manually added. # ui.load_pha(make_data_path(PHAFILE)) rmffile = make_data_path(RMFFILE) with warnings.catch_warnings(record=True) as ws: warnings.simplefilter("always") ui.load_rmf(rmffile) validate_replacement_warning(ws, 'RMF', rmffile) arffile = make_data_path(ARFFILE) with warnings.catch_warnings(record=True) as ws: warnings.simplefilter("always") ui.load_arf(arffile) validate_replacement_warning(ws, 'ARF', arffile) assert ui.get_analysis() == 'energy' arf = ui.get_arf() rmf = ui.get_rmf() assert arf.energ_lo[0] == EMIN assert rmf.energ_lo[0] == EMIN assert rmf.e_min[0] == 0.0 ui.set_source(ui.powlaw1d.pl) ui.set_par('pl.ampl', 0.0003) stat = ui.calc_stat() # This check is purely a regression test, so the value has # not been externally validated. # assert_allclose(stat, 58.2813692358182) # Pick an energy range which isn't affected by the first # bin. # # Unfortunately, using a range of 0.3-8.0 gives 771 bins # in XSPEC - channels 30 to 800 - but 770 bins in Sherpa, # channels 31 to 800. # # Note that the channel numbering starts at 0: # % dmlist target_sr.pha header,clean,raw | grep TLMIN # TLMIN1 = 0 / Lowest legal channel number # # and so it's not clear when XSPEC says 30-800 what it # means. From https://github.com/sherpa/sherpa/issues/1211#issuecomment-881647128 # we have that the first bin it is using is # 0.29-0.30 # and the last bin is # 7.99-8.00 # and I've checked with iplot that it has renumbered the # channels to 1-1024 from 0-1023 # # % dmlist swxpc0to12s6_20130101v014.rmf.gz"[ebounds][channel=28:31]" data,clean # CHANNEL E_MIN E_MAX # 28 0.28000000119209 0.28999999165535 # 29 0.28999999165535 0.30000001192093 # 30 0.30000001192093 0.31000000238419 # 31 0.31000000238419 0.31999999284744 # % dmlist swxpc0to12s6_20130101v014.rmf.gz"[ebounds][channel=798:801]" data,clean # CHANNEL E_MIN E_MAX # 798 7.9800000191 7.9899997711 # 799 7.9899997711 8.0 # 800 8.0 8.0100002289 # 801 8.0100002289 8.0200004578 # # If I use ignore(None, 0.3); ignore(8.0, None) instead then the # result is 771 bins (channels 31 to 800). This is because the # e_min/max of the RMF has channel widths of 0.01 keV, starting at # 0, so both 0.3 and 8.0 fall on a bin boundary. So, it's either a # difference in < or <= (or > vs >=), or a rounding issue due to # floating-point conversion leading to one bin boundary being # slightly different in Sherpa vs XSPEC). # # When using ui.notice(0.3, 8.0); ui.get_indep(filter=True) # returns 770 channels, 31 to 800. # # Using ui.notice(0.3, 7.995) selects channels 31 to 800. # Using ui.notice(0.299, 8.0) selects channels 30 to 800. # Using ui.notice(0.299, 7.995) selects channels 30 to 800. # ui.notice(0.299, 8.0) # Check the selected range pha = ui.get_data() expected = np.zeros(1024, dtype=bool) expected[29:800] = True assert pha.mask == pytest.approx(expected) assert pha.get_mask() == pytest.approx(expected) # XSPEC 12.9.1b calculation of the statistic: # chi sq = 203.88 from 771 bins with 769 dof # cstat = 568.52 # # There are known differences between XSPEC and Sherpa # with chi2xspecvar. This only affects data sets where # there is background subtraction, which is not the case # here. See https://github.com/sherpa/sherpa/issues/356 # ui.set_stat('chi2xspecvar') stat_xvar = ui.get_stat_info() assert len(stat_xvar) == 1 stat_xvar = stat_xvar[0] assert stat_xvar.numpoints == 771 assert stat_xvar.dof == 769 assert_allclose(stat_xvar.statval, 203.88, rtol=0, atol=0.005) ui.set_stat('cstat') stat_cstat = ui.get_stat_info() assert len(stat_cstat) == 1 stat_cstat = stat_cstat[0] assert stat_cstat.numpoints == 771 assert stat_cstat.dof == 769 assert_allclose(stat_cstat.statval, 568.52, rtol=0, atol=0.005)
def test_load_multi_arfsrmfs(make_data_path, clean_astro_ui): """Added in #728 to ensure cache parameter is sent along by MultiResponseSumModel (fix #717). This has since been simplified to switch from xsapec to powlaw1d as it drops the need for XSPEC and is a simpler model, so is less affected by changes in the model code. A fit of the Sherpa powerlaw-model to 3c273.pi with a single response in CIAO 4.11 (background subtracted, 0.5-7 keV) returns gamma = 1.9298, ampl = 1.73862e-4 so doubling the response should halve the amplitude but leave the gamma value the same when using two responses, as below. This is with chi2datavar. """ pha_pi = make_data_path("3c273.pi") ui.load_pha(1, pha_pi) ui.load_pha(2, pha_pi) arf = make_data_path("3c273.arf") rmf = make_data_path("3c273.rmf") ui.load_multi_arfs(1, [arf, arf], [1, 2]) ui.load_multi_arfs(2, [arf, arf], [1, 2]) ui.load_multi_rmfs(1, [rmf, rmf], [1, 2]) ui.load_multi_rmfs(2, [rmf, rmf], [1, 2]) # Check multiple responses have been loaded # d1 = ui.get_data(1) d2 = ui.get_data(2) assert d1.response_ids == [1, 2] assert d2.response_ids == [1, 2] # Unfortunately we load the same response so it's hard # to tell the difference here! # assert ui.get_arf(resp_id=1).name == arf assert ui.get_arf(resp_id=2).name == arf assert ui.get_rmf(2, resp_id=1).name == rmf assert ui.get_rmf(2, resp_id=2).name == rmf ui.notice(0.5, 7) ui.subtract(1) ui.subtract(2) src = ui.create_model_component('powlaw1d', 'src') ui.set_model(1, src) ui.set_model(2, src) # ensure the test is repeatable by running with a known # statistic and method # ui.set_method('levmar') ui.set_stat('chi2datavar') # Really what we care about for fixing #717 is that # fit does not error out, but it's useful to know that # the fit has changed the parameter values (which were # both 1 before the fit). # ui.fit() fr = ui.get_fit_results() assert fr.succeeded assert fr.datasets == (1, 2) assert src.gamma.val == pytest.approx(1.9298, rel=1.0e-4) assert src.ampl.val == pytest.approx(1.73862e-4 / 2, rel=1.0e-4)
def test_can_use_swift_data(make_data_path): """A basic check that we can read in and use the Swift data. Unlike the previous tests, that directly access the io module, this uses the ui interface. """ # QUS are there pytest fixtures that ensure the state is # clean on entry and exit? ui.clean() # The Swift PHA file does not have the ANCRFILE/RESPFILE keywords # set up, so the responses have to be manually added. # ui.load_pha(make_data_path(PHAFILE)) ui.load_rmf(make_data_path(RMFFILE)) ui.load_arf(make_data_path(ARFFILE)) assert ui.get_analysis() == 'energy' ui.set_source(ui.powlaw1d.pl) ui.set_par('pl.ampl', 0.0003) # The responses have the first bin start at an energy of 0, # which causes issues for Sherpa. There should be a # RuntimeWarning due to a divide by zero. # with pytest.warns(RuntimeWarning) as record: stat = ui.calc_stat() # The exact form of the message depends on the Python version; # this could be checked, but it feels excessive for this # particular test, which is just a regression check, so use a # more lax approach. # assert len(record) == 1 assert record[0].message.args[0] in \ ['divide by zero encountered in divide', 'divide by zero encountered in true_divide'] # The stat value depends on what power-law model is used. With # xspowerlaw it is NaN, but with powlaw1d it is finite. # # This check is purely a regression test, so the value has # not been externally validated. # # assert np.isnan(stat) assert_allclose(stat, 58.2813692358182) # Manually adjust the first bin to avoid this problem. # Add in asserts just in case this gets "fixed" in the # I/O layer (as XSPEC does). # arf = ui.get_arf() rmf = ui.get_rmf() assert arf.energ_lo[0] == 0.0 assert rmf.energ_lo[0] == 0.0 assert rmf.e_min[0] == 0.0 # The bin widths are ~ 0.005 or ~ 0.01 keV, so pick a value # smaller than this. # ethresh = 1e-6 arf.energ_lo[0] = ethresh rmf.energ_lo[0] = ethresh rmf.e_min[0] = ethresh # Pick an energy range which isn't affected by the first # bin. # # Unfortunately, using a range of 0.3-8.0 gives 771 bins # in XSPEC - channels 30 to 800 - but 772 bins in Sherpa. # If I use ignore(None, 0.3); ignore(8.0, None) instead # then the result is 771 bins. This is because the e_min/max # of the RMF has channel widths of 0.01 keV, starting at 0, # so both 0.3 and 8.0 fall on a bin boundary. So, it's either # a difference in < or <= (or > vs >=), or a rounding issue # due to floating-point conversion leading to one bin boundary # being slightly different in Sherpa vs XSPEC). # # When using ui.notice(0.3, 8.0); ui.get_indep(filter=True) # returns 772 channels, 30 to 801. # # Using ui.notice(0.3, 7.995) selects channels 30 to 800. So # this range is used. Alternatively, channel 801 could have been # excluded explicitly. # # ui.notice(0.3, 8.0) ui.notice(0.3, 7.995) # XSPEC 12.9.1b calculation of the statistic: # chi sq = 203.88 from 771 bins with 769 dof # cstat = 568.52 # # There are known differences between XSPEC and Sherpa # with chi2xspecvar. This only affects data sets where # there is background subtraction, which is not the case # here. See https://github.com/sherpa/sherpa/issues/356 # ui.set_stat('chi2xspecvar') stat_xvar = ui.get_stat_info() assert len(stat_xvar) == 1 stat_xvar = stat_xvar[0] assert stat_xvar.numpoints == 771 assert stat_xvar.dof == 769 assert_allclose(stat_xvar.statval, 203.88, rtol=0, atol=0.005) ui.set_stat('cstat') stat_cstat = ui.get_stat_info() assert len(stat_cstat) == 1 stat_cstat = stat_cstat[0] assert stat_cstat.numpoints == 771 assert stat_cstat.dof == 769 assert_allclose(stat_cstat.statval, 568.52, rtol=0, atol=0.005) ui.clean()
def test_plot_order_multi(make_data_path, clean_astro_ui): """Rather than fake data, use a known dataset. Here we pretend we have three orders but with the same response (except that the ARF is 0.5, 0.4, 0.25 of the normal ARF). """ pha = make_data_path('3c273.pi') ui.load_pha(pha) # It has already loaded in one response arf = ui.get_arf(resp_id=1) arf.specresp *= 0.5 for order, scale in enumerate([0.4, 0.25], 2): ui.load_arf(make_data_path('3c273.arf'), resp_id=order) ui.load_rmf(make_data_path('3c273.rmf'), resp_id=order) arf = ui.get_arf(resp_id=order) arf.specresp *= scale ui.set_source(ui.powlaw1d.pl) ui.notice(0.5, 7) ui.ignore(3, 4) fplot = ui.get_fit_plot() oplot = ui.get_order_plot() # The idea is to compare the X range of plot_fit to plot_order # (but accessed just using the plot objects rather than creating # an actual plot). # # First some safety checks assert fplot.dataplot.xlo == pytest.approx(fplot.modelplot.xlo) assert fplot.dataplot.xhi == pytest.approx(fplot.modelplot.xhi) assert len(oplot.xlo) == 3 assert len(oplot.xhi) == 3 assert len(oplot.y) == 3 assert oplot.xlo[1] == pytest.approx(oplot.xlo[0]) assert oplot.xlo[2] == pytest.approx(oplot.xlo[0]) assert oplot.xhi[1] == pytest.approx(oplot.xhi[0]) assert oplot.xhi[2] == pytest.approx(oplot.xhi[0]) # We know the y values are 0.5, 0.4, 0.25 times the original arf # so we can compare them. # assert oplot.y[1] == pytest.approx(oplot.y[0] * 0.4 / 0.5) assert oplot.y[2] == pytest.approx(oplot.y[0] * 0.25 / 0.5) xlo = oplot.xlo[0] xhi = oplot.xhi[0] assert len(xlo) == 564 assert xlo[0] == pytest.approx(0.46720001101493835) assert xhi[-1] == pytest.approx(9.869600296020508) # The model plot is technically drawn the same way as the order plot # (ungrouped) but it uses different code (sherpa.astro.plot.ModelHistogram) # so let's compare. # mplot = ui.get_model_plot() assert mplot.xlo[0] == pytest.approx(0.46720001101493835) assert mplot.xhi[-1] == pytest.approx(9.869600296020508) # Also compare to the fit plot (which is grouped) # assert fplot.modelplot.xlo[0] == pytest.approx(0.46720001101493835) assert fplot.modelplot.xhi[-1] == pytest.approx(9.869600296020508)
def test_fake_pha_issue_1209(make_data_path, clean_astro_ui, tmp_path): """Check issue #1209. See also sherpa/astro/tests/test_fake_pha.py for test_fake_pha_has_valid_ogip_keywords_all_fake test_fake_pha_has_valid_ogip_keywords_from_real The session fake_pha includes quite a lot of logic which makes that the test case for #1209 should be done at this level, to complement the tests mentioned above. """ infile = make_data_path("acisf01575_001N001_r0085_pha3.fits.gz") ui.load_pha(infile) ui.set_source(ui.powlaw1d.pl) pl.gamma = 1.8 pl.ampl = 1e-4 arf = ui.get_arf() rmf = ui.get_rmf() # check the TOTCTS setting in the input file d1 = ui.get_data() assert d1.header["TOTCTS"] == 855 assert d1.counts.sum() == 855 ui.set_source("newid", pl) ui.fake_pha("newid", exposure=ui.get_exposure(), bkg=ui.get_bkg(), rmf=rmf, arf=arf, backscal=ui.get_backscal()) stat = ui.calc_stat("newid") outfile = tmp_path / "sim.pha" ui.save_pha("newid", str(outfile)) ui.load_pha(3, str(outfile)) d3 = ui.get_data(3) assert isinstance(d3, ui.DataPHA) assert d3.exposure == pytest.approx(37664.157219191) assert d3.areascal == pytest.approx(1.0) assert d3.backscal == pytest.approx(2.2426552620567e-06) assert d3.background_ids == [] assert d3.response_ids == [] # check the header hdr = d3.header assert hdr["TELESCOP"] == "CHANDRA" assert hdr["INSTRUME"] == "ACIS" assert hdr["FILTER"] == "none" # check some other values related to #1209 and #488 (OGIP) # assert "TOTCTS" not in hdr assert hdr["GROUPING"] == 0 assert hdr["QUALITY"] == 0 assert hdr["SYS_ERR"] == 0 # We should get the same value - the responses are not written # to the temporary directory and so we need to load them # directly. # ui.set_rmf(3, rmf) ui.set_arf(3, arf) ui.set_source(3, pl) assert ui.calc_stat(3) == pytest.approx(stat)
def _get_chart_spectrum(id=None, elow=None, ehigh=None, ewidth=None, norm=None): """Helper routine for *_chart_spectrum.""" # What source expression are we using? # get_model/source will throw an IdentifierErr if the expression # is not defined; we do not, at present catch/re-throw this # if id is None: id = s.get_default_id() mdl = s.get_source(id) # What energy grid to use? Since we do not want to restrict users # to only using PHA datasets (i.e. if I just want to create # something simple) then we have to look for a range of errors # from get_arf # if elow is None or ehigh is None or ewidth is None: try: arf = s.get_arf(id) except (IdentifierErr, ArgumentErr): # a) PHA dataset, no ARF # b) Assume this means the dataset is not derived from the # PHA class arf = None if arf is None: emsg = "No ARF found for dataset {} ".format(repr(id)) + \ "so unable to create energy grid" raise TypeError(emsg) if elow is None: elow = arf.energ_lo[0] if ehigh is None: ehigh = arf.energ_hi[-1] if ewidth is None: # Assume constant grid spacing in the ARF de = arf.energ_hi[-1] - arf.energ_lo[0] nelem = np.size(arf.energ_lo) ewidth = de * 1.0 / nelem if elow >= ehigh: emsg = "elow is >= ehigh: " + \ "elow={} ehigh={}".format(elow, ehigh) raise TypeError(emsg) if ewidth <= 0.0: raise TypeError("ewidth is <= 0.0: ewidth={0}".format(ewidth)) # The following is wasteful if we have an ARF and the user # supplies no elow, ehigh, or ewidth arguments. # # Should I check that nbins is a sensible number (e.g. >= 2)? # nbins = 1 + np.rint((ehigh - elow) / ewidth) erange = elow + ewidth * np.arange(nbins) elo = erange[:-1] ehi = erange[1:] flux = mdl(elo, ehi) emid = 0.5 * (ehi + elo) # do we want to renormalize? if norm is not None: flux *= norm return { "x": emid, "xlo": elo, "xhi": ehi, "y": flux, "id": id, "model": mdl.name }
def test_can_use_swift_data(make_data_path): """A basic check that we can read in and use the Swift data. Unlike the previous tests, that directly access the io module, this uses the ui interface. """ # QUS are there pytest fixtures that ensure the state is # clean on entry and exit? ui.clean() # The Swift PHA file does not have the ANCRFILE/RESPFILE keywords # set up, so the responses have to be manually added. # ui.load_pha(make_data_path(PHAFILE)) rmffile = make_data_path(RMFFILE) with warnings.catch_warnings(record=True) as ws: warnings.simplefilter("always") ui.load_rmf(rmffile) validate_replacement_warning(ws, 'RMF', rmffile) arffile = make_data_path(ARFFILE) with warnings.catch_warnings(record=True) as ws: warnings.simplefilter("always") ui.load_arf(arffile) validate_replacement_warning(ws, 'ARF', arffile) assert ui.get_analysis() == 'energy' arf = ui.get_arf() rmf = ui.get_rmf() assert arf.energ_lo[0] == EMIN assert rmf.energ_lo[0] == EMIN assert rmf.e_min[0] == 0.0 ui.set_source(ui.powlaw1d.pl) ui.set_par('pl.ampl', 0.0003) stat = ui.calc_stat() # This check is purely a regression test, so the value has # not been externally validated. # assert_allclose(stat, 58.2813692358182) # Pick an energy range which isn't affected by the first # bin. # # Unfortunately, using a range of 0.3-8.0 gives 771 bins # in XSPEC - channels 30 to 800 - but 772 bins in Sherpa. # If I use ignore(None, 0.3); ignore(8.0, None) instead # then the result is 771 bins. This is because the e_min/max # of the RMF has channel widths of 0.01 keV, starting at 0, # so both 0.3 and 8.0 fall on a bin boundary. So, it's either # a difference in < or <= (or > vs >=), or a rounding issue # due to floating-point conversion leading to one bin boundary # being slightly different in Sherpa vs XSPEC). # # When using ui.notice(0.3, 8.0); ui.get_indep(filter=True) # returns 772 channels, 30 to 801. # # Using ui.notice(0.3, 7.995) selects channels 30 to 800. So # this range is used. Alternatively, channel 801 could have been # excluded explicitly. # # ui.notice(0.3, 8.0) ui.notice(0.3, 7.995) # XSPEC 12.9.1b calculation of the statistic: # chi sq = 203.88 from 771 bins with 769 dof # cstat = 568.52 # # There are known differences between XSPEC and Sherpa # with chi2xspecvar. This only affects data sets where # there is background subtraction, which is not the case # here. See https://github.com/sherpa/sherpa/issues/356 # ui.set_stat('chi2xspecvar') stat_xvar = ui.get_stat_info() assert len(stat_xvar) == 1 stat_xvar = stat_xvar[0] assert stat_xvar.numpoints == 771 assert stat_xvar.dof == 769 assert_allclose(stat_xvar.statval, 203.88, rtol=0, atol=0.005) ui.set_stat('cstat') stat_cstat = ui.get_stat_info() assert len(stat_cstat) == 1 stat_cstat = stat_cstat[0] assert stat_cstat.numpoints == 771 assert stat_cstat.dof == 769 assert_allclose(stat_cstat.statval, 568.52, rtol=0, atol=0.005) ui.clean()
type=str, help="Spectra files (*.pi, *.pha)") args = parser.parse_args() from sherpa.astro.ui import load_pha, get_rmf, get_arf, get_fit_plot, load_table_model, set_xsabund, set_xsxsect, ignore, notice, set_xlog, set_ylog from sherpa.astro.ui import xsapec, set_full_model, get_bkg_model, get_bkg_scale, set_stat, get_bkg, group_adapt, set_analysis, calc_stat, get_data, set_model, get_response, get_model, calc_energy_flux id = 1 filename = args.filenames[0] elo, ehi = args.energyrange.split(':') elo, ehi = float(elo), float(ehi) load_pha(id, filename) try: assert get_rmf(id).energ_lo[0] > 0 assert get_arf(id).energ_lo[0] > 0 assert (get_bkg(id).counts > 0).sum() > 0 except: traceback.print_exc() sys.exit(0) set_xlog() set_ylog() set_stat('cstat') set_xsabund('wilm') set_xsxsect('vern') set_analysis(id, 'ener', 'counts') ignore(None, elo) ignore(ehi, None) notice(elo, ehi)
def test_can_use_swift_data(make_data_path, is_known_warning): """A basic check that we can read in and use the Swift data. Unlike the previous tests, that directly access the io module, this uses the ui interface. """ # QUS are there pytest fixtures that ensure the state is # clean on entry and exit? ui.clean() # The Swift PHA file does not have the ANCRFILE/RESPFILE keywords # set up, so the responses have to be manually added. # ui.load_pha(make_data_path(PHAFILE)) rmffile = make_data_path(RMFFILE) with warnings.catch_warnings(record=True) as ws: warnings.simplefilter("always") ui.load_rmf(rmffile) validate_replacement_warning(ws, 'RMF', rmffile, is_known_warning) arffile = make_data_path(ARFFILE) with warnings.catch_warnings(record=True) as ws: warnings.simplefilter("always") ui.load_arf(arffile) validate_replacement_warning(ws, 'ARF', arffile, is_known_warning) assert ui.get_analysis() == 'energy' arf = ui.get_arf() rmf = ui.get_rmf() assert arf.energ_lo[0] == EMIN assert rmf.energ_lo[0] == EMIN assert rmf.e_min[0] == 0.0 ui.set_source(ui.powlaw1d.pl) ui.set_par('pl.ampl', 0.0003) stat = ui.calc_stat() # This check is purely a regression test, so the value has # not been externally validated. # assert_allclose(stat, 58.2813692358182) # Pick an energy range which isn't affected by the first # bin. # # Unfortunately, using a range of 0.3-8.0 gives 771 bins # in XSPEC - channels 30 to 800 - but 772 bins in Sherpa. # If I use ignore(None, 0.3); ignore(8.0, None) instead # then the result is 771 bins. This is because the e_min/max # of the RMF has channel widths of 0.01 keV, starting at 0, # so both 0.3 and 8.0 fall on a bin boundary. So, it's either # a difference in < or <= (or > vs >=), or a rounding issue # due to floating-point conversion leading to one bin boundary # being slightly different in Sherpa vs XSPEC). # # When using ui.notice(0.3, 8.0); ui.get_indep(filter=True) # returns 772 channels, 30 to 801. # # Using ui.notice(0.3, 7.995) selects channels 30 to 800. So # this range is used. Alternatively, channel 801 could have been # excluded explicitly. # # ui.notice(0.3, 8.0) ui.notice(0.3, 7.995) # XSPEC 12.9.1b calculation of the statistic: # chi sq = 203.88 from 771 bins with 769 dof # cstat = 568.52 # # There are known differences between XSPEC and Sherpa # with chi2xspecvar. This only affects data sets where # there is background subtraction, which is not the case # here. See https://github.com/sherpa/sherpa/issues/356 # ui.set_stat('chi2xspecvar') stat_xvar = ui.get_stat_info() assert len(stat_xvar) == 1 stat_xvar = stat_xvar[0] assert stat_xvar.numpoints == 771 assert stat_xvar.dof == 769 assert_allclose(stat_xvar.statval, 203.88, rtol=0, atol=0.005) ui.set_stat('cstat') stat_cstat = ui.get_stat_info() assert len(stat_cstat) == 1 stat_cstat = stat_cstat[0] assert stat_cstat.numpoints == 771 assert stat_cstat.dof == 769 assert_allclose(stat_cstat.statval, 568.52, rtol=0, atol=0.005) ui.clean()
import datastack from acis_bkg_model import acis_bkg_model import sherpa.astro.ui as ui ds = datastack.DataStack() ds[1].load_pha("acisf04938_000N002_r0043_pha3.fits") ds[2].load_pha("acisf07867_000N001_r0002_pha3.fits") detnam = "acis2i" for dataset in ds.datasets: id_ = dataset["id"] rmf = ui.get_rmf(id_) arf = ui.get_arf(id_) ui.load_bkg_arf(id_, arf.name) bkg_arf = ui.get_bkg_arf(id_) bkg_rmf = ui.get_bkg_rmf(id_) bkg_arf.specresp = bkg_arf.specresp * 0 + 100.0 bkg_scale = ui.get_data(id_).sum_background_data(lambda x, y: 1) bkg_model = bkg_rmf(bkg_arf(ui.const1d.bkg_constID * acis_bkg_model(detnam))) src_model = rmf(arf(ui.const1d.src_constID * ui.powlaw1d.powlaw)) ds[id_].set_full_model(src_model + bkg_scale * bkg_model) ds[id_].set_bkg_full_model(bkg_model) ds[1].set_par("src_const.c0", 1.0) ds[1].freeze("src_const") ds.ignore(None, 0.5) ds.ignore(7, None) ds.fit()