def test_save_source_pha_fits(clean_astro_ui, tmp_path): """Can we write out data for save_source? DataPHA and FITS """ from sherpa.astro.io import read_table_blocks ui.load_arrays(1, [1, 2], [5, 10], ui.DataPHA) # we need a response egrid = np.asarray([0.1, 0.2, 0.4]) elo = egrid[:-1] ehi = egrid[1:] rmf = create_delta_rmf(elo, ehi, e_min=elo, e_max=ehi) ui.set_rmf(rmf) yarf = np.asarray([10, 20]) arf = create_arf(elo, ehi, yarf) ui.set_arf(arf) ui.set_source(ui.const1d.cmdl) cmdl.c0 = 2 out = tmp_path / 'model.dat' outfile = str(out) ui.save_source(outfile) ans = read_table_blocks(outfile) blocks = ans[1] assert len(blocks) == 2 check_table(blocks[2], { 'XLO': [0.1, 0.2], 'XHI': [0.2, 0.4], 'SOURCE': [2, 2] })
def test_default_background_issue_fit(clean_astro_ui): """Test issue #943 with fit See https://github.com/sherpa/sherpa/issues/943#issuecomment-696119982 """ ui.set_default_id('x') # use least-square as we don't really care about the fit ui.set_stat('leastsq') ui.load_arrays('x', [1, 2, 3, 4], [5, 4, 3, 4], ui.DataPHA) bkg = ui.DataPHA('bkg', np.asarray([1, 2, 3, 4]), [1, 1, 0, 1]) arf = ui.create_arf(np.asarray([0.1, 0.2, 0.3, 0.4]), np.asarray([0.2, 0.3, 0.4, 0.5])) ui.set_arf(arf) bkg.set_arf(arf) ui.set_bkg(bkg) # The model being fitted is a constant to 1,1,0,1 for # the background, so that should be 0.75 / 0.1 (as the # bin width is constant), and for the source it is # 5,4,3,4 - <0.75> [here ignoring the bin-width], # so [4.25,3.25,2.25,3.25] -> 13 / 4 -> 3.25 # ui.set_source(ui.const1d.mdl1) ui.set_bkg_source(ui.const1d.mdl2) # Prior to #943 this would give a confusing error. # ui.fit() assert mdl1.c0.val == pytest.approx(3.25 / 0.1) assert mdl2.c0.val == pytest.approx(0.75 / 0.1)
def test_save_model_pha_ascii(clean_astro_ui, tmp_path): """Can we write out data for save_model? DataPHA and ASCII""" ui.load_arrays(1, [1, 2], [5, 10], ui.DataPHA) # we need a response egrid = np.asarray([0.1, 0.2, 0.4]) elo = egrid[:-1] ehi = egrid[1:] rmf = create_delta_rmf(elo, ehi, e_min=elo, e_max=ehi) ui.set_rmf(rmf) yarf = np.asarray([10, 20]) arf = create_arf(elo, ehi, yarf) ui.set_arf(arf) ui.set_source(ui.const1d.cmdl) cmdl.c0 = 2 out = tmp_path / 'model.dat' ui.save_model(str(out), ascii=True) cts = out.read_text() check_output(cts, ['XLO', 'XHI', 'MODEL'], [[0.1, 0.2, 20], [0.2, 0.4, 40]])
def test_save_resid_datapha_fits(tmp_path): """Residual, DataPHA, FITS""" from sherpa.astro.io import read_table_blocks ui.load_arrays(1, [1, 2], [5, 10], ui.DataPHA) # we need a response egrid = np.asarray([0.1, 0.2, 0.4]) elo = egrid[:-1] ehi = egrid[1:] rmf = create_delta_rmf(elo, ehi, e_min=elo, e_max=ehi) ui.set_rmf(rmf) yarf = np.asarray([10, 20]) arf = create_arf(elo, ehi, yarf) ui.set_arf(arf) ui.set_source(ui.const1d.cmdl) cmdl.c0 = 2 out = tmp_path / 'resid.out' outfile = str(out) ui.save_resid(outfile) ans = read_table_blocks(outfile) blocks = ans[1] assert len(blocks) == 2 check_table(blocks[2], {'X': [0.15, 0.3], 'RESID': [30, 10]})
def test_save_resid_datapha(tmp_path): """Residual, DataPHA, ASCII""" ui.load_arrays(1, [1, 2], [5, 10], ui.DataPHA) # we need a response egrid = np.asarray([0.1, 0.2, 0.4]) elo = egrid[:-1] ehi = egrid[1:] rmf = create_delta_rmf(elo, ehi, e_min=elo, e_max=ehi) ui.set_rmf(rmf) yarf = np.asarray([10, 20]) arf = create_arf(elo, ehi, yarf) ui.set_arf(arf) ui.set_source(ui.const1d.cmdl) cmdl.c0 = 2 out = tmp_path / 'resid.out' outfile = str(out) ui.save_resid(outfile, ascii=True) cts = out.read_text() check_output(cts, ['X', 'RESID'], [[0.15, 30], [0.3, 10]])
def test_fake_pha_background_model(clean_astro_ui, reset_seed): """Check we can add a background component. See also test_fake_pha_basic. For simplicity we use perfect responses. """ np.random.seed(27347) id = 'qwerty' channels = np.arange(1, 4, dtype=np.int16) counts = np.ones(3, dtype=np.int16) bcounts = 100 * counts ui.load_arrays(id, channels, counts, ui.DataPHA) ui.set_exposure(id, 100) ui.set_backscal(id, 0.1) bkg = ui.DataPHA('bkg', channels, bcounts, exposure=200, backscal=0.4) ebins = np.asarray([1.1, 1.2, 1.4, 1.6]) elo = ebins[:-1] ehi = ebins[1:] arf = ui.create_arf(elo, ehi) rmf = ui.create_rmf(elo, ehi, e_min=elo, e_max=ehi) mdl = ui.create_model_component('const1d', 'mdl') mdl.c0 = 0 bkgmdl = ui.create_model_component('const1d', 'mdl') bkgmdl.c0 = 2 ui.set_source(id, mdl) ui.set_bkg(id, bkg) ui.set_bkg_source(id, bkgmdl) ui.set_arf(id, arf, bkg_id=1) ui.set_rmf(id, rmf, bkg_id=1) ui.fake_pha(id, arf, rmf, 1000.0, bkg='model') faked = ui.get_data(id) assert faked.exposure == pytest.approx(1000.0) assert (faked.channel == channels).all() # check we've faked counts (the scaling is such that it is # very improbable that this condition will fail) assert (faked.counts > counts).all() # For reference the predicted source signal is # [200, 400, 400] # and the background signal is # [125, 125, 125] # so, even with randomly drawn values, the following # checks should be robust. # predicted_by_source = 1000 * mdl(elo, ehi) predicted_by_bkg = (1000 / 200) * (0.1 / 0.4) * bcounts assert (faked.counts > predicted_by_source).all() assert (faked.counts > predicted_by_bkg).all()
def test_eval_multi_arf(clean_astro_ui): """See also test_eval_multi_arf_reorder""" arf1, arf2, dset = make_arf2() ui.set_data(1, dset) ui.set_arf(id=1, arf=arf1, resp_id=1) ui.set_arf(id=1, arf=arf2, resp_id=2) check_eval_multi_arf()
def test_eval_multi_arf_reorder(clean_astro_ui): """Change the order of setting the ARFs Should be the same as test_eval_multi_arf """ arf1, arf2, dset = make_arf2() ui.set_data(1, dset) ui.set_arf(id=1, arf=arf2, resp_id=2) ui.set_arf(id=1, arf=arf1, resp_id=1) check_eval_multi_arf()
def test_set_multi_arfs_reorder(id, clean_astro_ui): arf1, arf2, dset = make_arf2() ui.set_data(id, dset) d = ui.get_data(id=id) assert d.response_ids == [] ui.set_arf(id=id, arf=arf2, resp_id=2) assert d.response_ids == [2] ui.set_arf(id=id, arf=arf1, resp_id=1) assert d.response_ids == [2, 1]
def test_get_axes_datapha_arf(): """ARF only""" ui.load_arrays(1, [1, 2, 3], [1, 2, 3], ui.DataPHA) ebins = np.asarray([0.1, 0.2, 0.4, 0.8]) elo = ebins[:-1] ehi = ebins[1:] ui.set_arf(ui.create_arf(elo, ehi)) ax = ui.get_axes() assert len(ax) == 2 assert ax[0] == pytest.approx([0.1, 0.2, 0.4]) assert ax[1] == pytest.approx([0.2, 0.4, 0.8])
def test_fake_pha_basic_arfrmf_set_in_advance(clean_astro_ui, reset_seed): """Similar to test_fake_pha_basic but instead of passing in the RMF, we set it before. The result should be the same, so we don't have ot go through all the parameterization of that test. """ np.random.seed(20348) channels = np.arange(1, 4, dtype=np.int16) counts = np.ones(3, dtype=np.int16) ui.load_arrays('id123', channels, counts, ui.DataPHA) ui.set_exposure('id123', 100) ebins = np.asarray([1.1, 1.2, 1.4, 1.6]) elo = ebins[:-1] ehi = ebins[1:] arf = ui.create_arf(elo, ehi) rmf = ui.create_rmf(elo, ehi, e_min=elo, e_max=ehi) ui.set_rmf('id123', rmf) ui.set_arf('id123', arf) mdl = ui.create_model_component('const1d', 'mdl') mdl.c0 = 2 ui.set_source('id123', mdl) ui.fake_pha('id123', None, None, 1000.0) faked = ui.get_data('id123') assert faked.exposure == pytest.approx(1000.0) assert (faked.channel == channels).all() assert faked.name == 'faked' assert faked.background_ids == [] # check we've faked counts (the scaling is such that it is # very improbable that this condition will fail) assert (faked.counts > counts).all() # For reference the predicted source signal is # [200, 400, 400] # # What we'd like to say is that the predicted counts are # similar, but this is not easy to do. What we can try # is summing the counts (to average over the randomness) # and then a simple check # assert (faked.counts.sum() > 200) and (faked.counts.sum() < 3000) # This is more likely to fail by chance, but still very unlikely assert faked.counts[1] > faked.counts[0]
def test_get_axes_datapha_rsp(): """Let's have a RMF and ARF for fun""" ui.load_arrays(1, [1, 2, 3], [1, 2, 3], ui.DataPHA) ebins = np.asarray([0.1, 0.2, 0.4, 0.8]) elo = ebins[:-1] ehi = ebins[1:] ui.set_arf(ui.create_arf(elo, ehi)) ui.set_rmf(ui.create_rmf(elo, ehi, e_min=elo, e_max=ehi)) ax = ui.get_axes() assert len(ax) == 2 assert ax[0] == pytest.approx([0.1, 0.2, 0.4]) assert ax[1] == pytest.approx([0.2, 0.4, 0.8])
def test_grouped_pha_all_bad_response(arf, rmf, chantype, exp_counts, exp_xlo, exp_xhi, lo1, hi1, lo2, hi2, clean_astro_ui): """Helpdesk ticket: low-count data had no valid bins after grouping #790 A simple PHA dataset is created, which has no "good" grouped data (1 group, but with a quality of 2). Several checks are made to ensure we can filter/notice/plot the data even when it is empty. Checks are done for - arf-only - rmf-only - arf+rmf analysis in case there's a difference in the code paths """ chans = numpy.arange(1, 6, dtype=numpy.int16) counts = numpy.asarray([0, 1, 2, 0, 1], dtype=numpy.int16) grouping = numpy.asarray([1, -1, -1, -1, -1], dtype=numpy.int16) quality = numpy.asarray([2, 2, 2, 2, 2], dtype=numpy.int16) dset = ui.DataPHA('low', chans, counts, grouping=grouping, quality=quality) ui.set_data(dset) egrid = numpy.asarray([0.1, 0.2, 0.3, 0.4, 0.5, 0.6]) elo = egrid[:-1] ehi = egrid[1:] # it is required that at least one of arf or rmf is set but this # is not enforced # if arf: ui.set_arf(ui.create_arf(elo, ehi)) if rmf: # NOTE: need to set e_min/max otherwise get a 'noenergybins' # error from sherpa.astro.data.DataPHA._get_ebins # ui.set_rmf(ui.create_rmf(elo, ehi, e_min=elo, e_max=ehi)) # plot units depend on analysis type; # ui.set_analysis(chantype) # Run tests check_bad_grouping(exp_xlo, exp_xhi, exp_counts, lo1, hi1, lo2, hi2)
def setUp(self): ui.dataspace1d(0.2, 10, 0.01, id=1) ui.dataspace1d(2, 5, 0.1, id="tst") ui.dataspace1d(0.1, 1, 0.1, id="not-used") ui.dataspace1d(0.1, 1, 0.1, id="no-arf") ui.dataspace1d(0.1, 11, 0.01, id='arf1', dstype=DataPHA) ui.dataspace1d(0.2, 10, 0.01, id='flatarf', dstype=DataPHA) # self.nbins = {} # for idval in [1, 'tst']: # self.nbins[idval] = ui.get_data(1).xlo.size self.nbins = {1: 980, 'tst': 30, 'arf1': 1090, 'arf1-arf': 489} self.grid = { 1: (0.2, 10, 0.01), 'tst': (2.0, 5.0, 0.1), 'arf1': (0.1, 11, 0.01), 'arf1-arf': (0.2, 9.98, 0.02) # note: ehigh is not 10.0 } ui.set_source(1, ui.powlaw1d.pl1) ui.set_source("tst", ui.powlaw1d.pltst) ui.set_source('no-arf', pl1) ui.set_source('arf1', pltst) ui.set_source('flatarf', pltst) ui.set_source('no-arf-flat', ui.const1d.c1) pl1.gamma = 0.0 pl1.ampl = 1.2 pltst.gamma = -1.0 pltst.ampl = 2.1 arfgrid = np.arange(0.2, 10, 0.02) arflo = arfgrid[:-1] arfhi = arfgrid[1:] amid = (arflo + arfhi) / 2.0 flatarf = DataARF('flat', energ_lo=arflo, energ_hi=arfhi, specresp=arflo * 0 + 10.1) arf = DataARF('arf', energ_lo=arflo, energ_hi=arfhi, specresp=20 - (4.5 - amid)**2) ui.set_arf('arf1', arf) ui.set_arf('flatarf', flatarf)
def test_cache_copy(self): # fake up a PHA data set chans = numpy.arange(1, 11, dtype=numpy.int8) counts = numpy.ones(chans.size) # bin width is not 0.1 but something slightly different ebins = numpy.linspace(0.1, 1.2, num=chans.size + 1) elo = ebins[:-1] ehi = ebins[1:] dset = ui.DataPHA('test', chans, counts) # make sure ARF isn't 1 to make sure it's being applied arf = ui.create_arf(elo, ehi, specresp=0.7 * numpy.ones(chans.size)) rmf = ui.create_rmf(elo, ehi, e_min=elo, e_max=ehi) ui.set_data(1, dset) ui.set_arf(1, arf) ui.set_rmf(1, rmf) ui.set_source(ui.const1d.mdl) # again not 1 mdl.c0 = 8 # Copy the values from the plot structures, since get_xxx_plot # returns the same object so m1.y == m2.y will not note a difference. # d1y = ui.get_data_plot().y.copy() m1y = ui.get_model_plot().y.copy() s1y = ui.get_source_plot().y.copy() d2y = ui.get_data_plot().y.copy() m2y = ui.get_model_plot().y.copy() s2y = ui.get_source_plot().y.copy() rtol = 1.0e-4 atol = 1.0e-4 numpy.testing.assert_allclose(d1y, d2y, rtol, atol) numpy.testing.assert_allclose(m1y, m2y, rtol, atol) numpy.testing.assert_allclose(s1y, s2y, rtol, atol)
def test_fake_pha_issue_1209(make_data_path, clean_astro_ui, tmp_path): """Check issue #1209. See also sherpa/astro/tests/test_fake_pha.py for test_fake_pha_has_valid_ogip_keywords_all_fake test_fake_pha_has_valid_ogip_keywords_from_real The session fake_pha includes quite a lot of logic which makes that the test case for #1209 should be done at this level, to complement the tests mentioned above. """ infile = make_data_path("acisf01575_001N001_r0085_pha3.fits.gz") ui.load_pha(infile) ui.set_source(ui.powlaw1d.pl) pl.gamma = 1.8 pl.ampl = 1e-4 arf = ui.get_arf() rmf = ui.get_rmf() # check the TOTCTS setting in the input file d1 = ui.get_data() assert d1.header["TOTCTS"] == 855 assert d1.counts.sum() == 855 ui.set_source("newid", pl) ui.fake_pha("newid", exposure=ui.get_exposure(), bkg=ui.get_bkg(), rmf=rmf, arf=arf, backscal=ui.get_backscal()) stat = ui.calc_stat("newid") outfile = tmp_path / "sim.pha" ui.save_pha("newid", str(outfile)) ui.load_pha(3, str(outfile)) d3 = ui.get_data(3) assert isinstance(d3, ui.DataPHA) assert d3.exposure == pytest.approx(37664.157219191) assert d3.areascal == pytest.approx(1.0) assert d3.backscal == pytest.approx(2.2426552620567e-06) assert d3.background_ids == [] assert d3.response_ids == [] # check the header hdr = d3.header assert hdr["TELESCOP"] == "CHANDRA" assert hdr["INSTRUME"] == "ACIS" assert hdr["FILTER"] == "none" # check some other values related to #1209 and #488 (OGIP) # assert "TOTCTS" not in hdr assert hdr["GROUPING"] == 0 assert hdr["QUALITY"] == 0 assert hdr["SYS_ERR"] == 0 # We should get the same value - the responses are not written # to the temporary directory and so we need to load them # directly. # ui.set_rmf(3, rmf) ui.set_arf(3, arf) ui.set_source(3, pl) assert ui.calc_stat(3) == pytest.approx(stat)