def test_dataspace1d_datapha(clean_astro_ui): """Explicitly test dataspace1d for DataPHA""" assert ui.list_data_ids() == [] # Note the grid is ignored, other than the number of bins ui.dataspace1d(20, 30, step=2.5, id='x', dstype=ui.DataPHA) assert ui.list_data_ids() == ['x'] assert ui.get_data('x').name == 'dataspace1d' grid = ui.get_indep('x') assert len(grid) == 1 expected = numpy.asarray([1, 2, 3, 4, 5]) assert grid[0] == pytest.approx(expected) y = ui.get_dep('x') assert y == pytest.approx(numpy.zeros(5)) assert ui.get_exposure('x') is None assert ui.get_grouping('x') is None assert ui.get_quality('x') is None assert ui.get_data('x').subtracted is False with pytest.raises(IdentifierErr): ui.get_bkg('x')
def test_load_pha2(make_data_path, caplog): """Basic test that a pha2 file can be read in.""" basename = '3c120_pha2' orig_ids = ui.list_data_ids() assert orig_ids == [] # The file is stored gzip-encoded infile = make_data_path(basename) ui.load_pha(infile) pha_ids = ui.list_data_ids() assert len(pha_ids) == 12 # list_data_ids doesn't guarantee an order # Do an explicit check, rather than via a set (testing # all at once) to make it easier to see what is missing # (if any) # for i in range(1, 13): assert i in pha_ids for i in range(1, 13): d = ui.get_data(i) validate_pha(d, bkg=True) # There is no indication of what "part" this data set # represents in the file name # assert d.name == infile b = ui.get_bkg(i, bkg_id=1) validate_pha(b, bkg=False) assert b.name == infile b = ui.get_bkg(i, bkg_id=2) validate_pha(b, bkg=False) assert b.name == infile # Test Log messages msg_one = "systematic errors were not found in file '{}'".format(infile) msg_two = """statistical errors were found in file '{}' but not used; to use them, re-read with use_errors=True""".format(infile) msg_three = "read background_up into a dataset from file {}".format(infile) msg_four = "read background_down into a dataset from file {}".format( infile) msg_five = "Multiple data sets have been input: 1-12" assert caplog.record_tuples == [ ('sherpa.astro.io', logging.WARNING, msg_one), ('sherpa.astro.io', logging.INFO, msg_two), ('sherpa.astro.io', logging.INFO, msg_three), ('sherpa.astro.io', logging.INFO, msg_four), ('sherpa.astro.ui.utils', logging.INFO, msg_five), ]
def test_image_with_id(make_data_path, clean_astro_ui): """Call load_image with an identifier""" img = make_data_path('img.fits') assert ui.list_data_ids() == [] ui.load_image('ix', img) assert ui.list_data_ids() == ['ix'] d = ui.get_data('ix') assert isinstance(d, ui.DataIMG) assert d.name.endswith('img.fits')
def test_dataspace1d_datapha_bkg(clean_astro_ui): """Explicitly test dataspace1d for DataPHA (background)""" # list_bkg_ids will error out until the dataset exists assert ui.list_data_ids() == [] # We don't use the grid range or step size since numbins has been # given. ui.dataspace1d(20, 30, step=2.5, numbins=10, id='x', dstype=ui.DataPHA) assert ui.list_data_ids() == ['x'] assert ui.list_bkg_ids('x') == [] ui.dataspace1d(20, 30, step=2.5, numbins=10, id='x', bkg_id=2, dstype=ui.DataPHA) assert ui.list_data_ids() == ['x'] assert ui.list_bkg_ids('x') == [2] assert ui.get_data('x').name == 'dataspace1d' # I've explicitly not chosen the default background identifier with pytest.raises(IdentifierErr): ui.get_bkg('x') assert ui.get_bkg('x', 2).name == 'bkg_dataspace1d' grid = ui.get_indep('x', bkg_id=2) assert len(grid) == 1 expected = numpy.asarray([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) assert grid[0] == pytest.approx(expected) y = ui.get_dep('x', bkg_id=2) assert y == pytest.approx(numpy.zeros(10)) assert ui.get_exposure('x', bkg_id=2) is None assert ui.get_grouping('x', bkg_id=2) is None assert ui.get_quality('x', bkg_id=2) is None assert ui.get_bkg('x', bkg_id=2).subtracted is False # check we can subtract the dataset; as the data is all zeros # we don't bother checking the result. # ui.subtract('x')
def test_ui_ascii_noarg(setup_files, clean_astro_ui): """Don't give a dataset id It also lets us actually check the results of the load_table call """ assert ui.list_data_ids() == [] ui.load_ascii(setup_files.ascii) assert ui.list_data_ids() == [1] d = ui.get_data() assert d.name.endswith('sim.poisson.1.dat') assert isinstance(d, ui.Data1D) assert len(d.x) == 50 assert d.x[0:5] == pytest.approx([0.5, 1, 1.5, 2, 2.5]) assert d.y[0:5] == pytest.approx([27, 27, 20, 28, 27])
def test_ui_table_noarg(setup_files, clean_astro_ui): """Don't give a dataset id It also lets us actually check the results of the load_table call """ assert ui.list_data_ids() == [] ui.load_table(setup_files.fits, colkeys=['RMID', 'COUNTS']) assert ui.list_data_ids() == [1] d = ui.get_data() assert d.name.endswith('1838_rprofile_rmid.fits') assert isinstance(d, ui.Data1D) assert len(d.x) == 38 assert d.x[0:5] == pytest.approx([12.5, 17.5, 22.5, 27.5, 32.5]) assert d.y[0:5] == pytest.approx([1529, 2014, 2385, 2158, 2013])
def validate_pha(self, idval): """Check that the PHA dataset in id=idval is as expected. """ self.assertEqual(ui.list_data_ids(), [idval]) pha = ui.get_data(idval) self.assertIsInstance(pha, DataPHA) arf = ui.get_arf(idval) self.assertIsInstance(arf, ARF1D) rmf = ui.get_rmf(idval) self.assertIsInstance(rmf, RMF1D) bpha = ui.get_bkg(idval, bkg_id=1) self.assertIsInstance(bpha, DataPHA) barf = ui.get_arf(idval, bkg_id=1) self.assertIsInstance(barf, ARF1D) brmf = ui.get_rmf(idval, bkg_id=1) self.assertIsInstance(brmf, RMF1D) # normally the background data set would have a different name, # but this is a PHA Type 3 file. # self.assertEqual(pha.name, bpha.name) self.assertEqual(arf.name, barf.name) self.assertEqual(rmf.name, brmf.name)
def validate_pha(idval): """Check that the PHA dataset in id=idval is as expected. """ assert ui.list_data_ids() == [idval] pha = ui.get_data(idval) assert isinstance(pha, DataPHA) arf = ui.get_arf(idval) assert isinstance(arf, ARF1D) rmf = ui.get_rmf(idval) assert isinstance(rmf, RMF1D) bpha = ui.get_bkg(idval, bkg_id=1) assert isinstance(bpha, DataPHA) barf = ui.get_arf(idval, bkg_id=1) assert isinstance(barf, ARF1D) brmf = ui.get_rmf(idval, bkg_id=1) assert isinstance(brmf, RMF1D) # normally the background data set would have a different name, # but this is a PHA Type 3 file. # assert pha.name == bpha.name assert arf.name == barf.name assert rmf.name == brmf.name
def test_dataspace1d_data1dint(clean_astro_ui): """Explicitly test dataspace1d for Data1DInt""" assert ui.list_data_ids() == [] ui.dataspace1d(20, 30, step=2.5, id='x', dstype=ui.Data1DInt) assert ui.list_data_ids() == ['x'] assert ui.get_data('x').name == 'dataspace1d' grid = ui.get_indep('x') assert len(grid) == 2 expected = numpy.asarray([20, 22.5, 25, 27.5, 30.0]) assert grid[0] == pytest.approx(expected[:-1]) assert grid[1] == pytest.approx(expected[1:]) y = ui.get_dep('x') assert y == pytest.approx(numpy.zeros(4))
def test_load_data(loader, make_data_path, clean_astro_ui, caplog): """Ensure that loading a single file to a non-integer id works. This is just to make sure that the support for PHA2 files in both load_data and load_pha does not change the single-file case. """ infile = make_data_path('3c273.pi') bgfile = make_data_path('3c273_bg.pi') arf = make_data_path('3c273.arf') rmf = make_data_path('3c273.rmf') assert ui.list_data_ids() == [] with SherpaVerbosity('INFO'): loader('foo', infile) assert ui.list_data_ids() == ['foo'] msg1 = f"systematic errors were not found in file '{infile}'" msg2 = f"statistical errors were found in file '{infile}' \n" + \ "but not used; to use them, re-read with use_errors=True" msg3 = f"read ARF file {arf}" msg4 = f"read RMF file {rmf}" msg5 = f"systematic errors were not found in file '{bgfile}'" msg6 = f"statistical errors were found in file '{bgfile}' \n" + \ "but not used; to use them, re-read with use_errors=True" msg7 = f"read background file {bgfile}" assert caplog.record_tuples[0] == ('sherpa.astro.io', logging.WARNING, msg1) assert caplog.record_tuples[1] == ('sherpa.astro.io', logging.INFO, msg2) assert caplog.record_tuples[2] == ('sherpa.astro.io', logging.INFO, msg3) assert caplog.record_tuples[3] == ('sherpa.astro.io', logging.INFO, msg4) assert caplog.record_tuples[4] == ('sherpa.astro.io', logging.WARNING, msg5) assert caplog.record_tuples[5] == ('sherpa.astro.io', logging.INFO, msg6) assert caplog.record_tuples[6] == ('sherpa.astro.io', logging.INFO, msg7) assert len(caplog.records) == 7
def load_stack_data(obsid, detid, spectra_path): ds = dsmod.DataStack() # Create stack file for existing spectra in the observation with NamedTemporaryFile("w", dir=".") as temp: stack_tempfile = _write_spec_files(temp, obsid, detid, spectra_path) ds.load_pha(f"@{stack_tempfile}", use_errors=True) ids = shp.list_data_ids() for id in ids: dsmod.ignore_bad(id=id) return ds, ids
def wfit(dataids=None): listids = () if dataids is None: listids = sau.list_data_ids() else: listids = dataids wstat = w_statistic(listids) sau.load_user_stat("mystat", wstat, wstat.CATstat_err_LV) sau.set_stat(mystat) sau.set_method("neldermead") # set_method("moncar") sau.set_conf_opt("max_rstat", 1000) # We don't use a specific maximum reduced statistic value # since we don't expect the cstat to be anywhere near the # large number limit sau.fit(*listids) sau.conf()
def wfit(dataids=None): listids = () if dataids is None: listids = sau.list_data_ids() else: listids = dataids wstat = w_statistic(listids) sau.load_user_stat("mystat", wstat, wstat.CATstat_err_LV) sau.set_stat(mystat) sau.set_method("neldermead") # set_method("moncar") sau.set_conf_opt( "max_rstat", 1000) # We don't use a specific maximum reduced statistic value # since we don't expect the cstat to be anywhere near the # large number limit sau.fit(*listids) sau.conf()
def test_ui_filter_ascii_with_id(setup_files): ui.load_filter(1, setup_files.filter_single_int_ascii) assert ui.list_data_ids() == [1] f = ui.get_filter() assert f == '2.0000,4.0000,6.0000,8.0000,10.0000:250.0000,751.0000:1000.0000'
def test_load_pha2_compare_meg_order1(make_data_path): """Do we read in the MEG +/-1 orders?""" # The MEG -1 order is dataset 9 # The MEG +1 order is dataset 10 # pha2file = make_data_path('3c120_pha2') meg_p1file = make_data_path('3c120_meg_1.pha') meg_m1file = make_data_path('3c120_meg_-1.pha') ui.load_pha('meg_p1', meg_p1file) ui.load_pha('meg_m1', meg_m1file) orig_ids = set(ui.list_data_ids()) assert 'meg_p1' in orig_ids assert 'meg_m1' in orig_ids ui.load_pha(pha2file) for n, lbl in zip([9, 10], ["-1", "1"]): h = '3c120_meg_{}'.format(lbl) ui.load_arf(n, make_data_path(h + '.arf')) ui.load_rmf(n, make_data_path(h + '.rmf')) # check that loading the pha2 file doesn't overwrite existing # data new_ids = set(ui.list_data_ids()) for i in range(1, 13): orig_ids.add(i) assert orig_ids == new_ids # Check that the same model gives the same statistic # value; this should check that the data and response are # read in, that grouping and filtering work, and that # model evaluation is the same, without having to # check these steps individually. # # The model is not meant to be physically meaningful, # just one that reasonably represents the data and # can be evaluated without requiring XSPEC. # pmdl = ui.create_model_component('powlaw1d', 'pmdl') pmdl.gamma = 0.318 pmdl.ampl = 2.52e-3 ncts = 20 for i in [9, 10, "meg_m1", "meg_p1"]: ui.set_analysis(i, 'wave') ui.group_counts(i, ncts) ui.notice_id(i, 2, 12) ui.set_source(i, pmdl) ui.set_stat('chi2datavar') s9 = ui.calc_stat(9) s10 = ui.calc_stat(10) sm1 = ui.calc_stat('meg_m1') sp1 = ui.calc_stat('meg_p1') # Since these should be the same, we use an equality test # rather than approximation. At least until it becomes # a problem. # assert s9 == sm1 assert s10 == sp1 # The values were calculated using CIAO 4.9, Linux64, with # Python 3.5. # assert s9 == pytest.approx(1005.4378559390879) assert s10 == pytest.approx(1119.980439489647)
def test_load_pha2(loader, id0, ids, make_data_path, caplog, clean_astro_ui): """Basic test that a pha2 file can be read in.""" basename = '3c120_pha2' orig_ids = ui.list_data_ids() assert orig_ids == [] # The file is stored gzip-encoded infile = make_data_path(basename) if id0 is None: loader(infile) else: loader(id0, infile) pha_ids = ui.list_data_ids() assert len(pha_ids) == 12 # list_data_ids doesn't guarantee an order # Do an explicit check, rather than via a set (testing # all at once) to make it easier to see what is missing # (if any) # for i in ids: assert i in pha_ids d = ui.get_data(i) validate_pha(d, bkg=True) # There is no indication of what "part" this data set # represents in the file name # assert d.name == infile b = ui.get_bkg(i, bkg_id=1) validate_pha(b, bkg=False) assert b.name == infile b = ui.get_bkg(i, bkg_id=2) validate_pha(b, bkg=False) assert b.name == infile # Test Log messages msg_one = "systematic errors were not found in file '{}'".format(infile) # Editors can remove trailing spaces from lines, so split into # separate lines so the space after the file name is included. # Perhaps this space should be removed from the warning message? # msg_two = "statistical errors were found in file '{}' \n".format(infile) + \ "but not used; to use them, re-read with use_errors=True" msg_three = "read background_up into a dataset from file {}".format(infile) msg_four = "read background_down into a dataset from file {}".format(infile) msg_five = "Multiple data sets have been input: " + \ "{}-{}".format(ids[0], ids[11]) assert caplog.record_tuples == [ ('sherpa.astro.io', logging.WARNING, msg_one), ('sherpa.astro.io', logging.INFO, msg_two), ('sherpa.astro.io', logging.INFO, msg_three), ('sherpa.astro.io', logging.INFO, msg_four), ('sherpa.astro.ui.utils', logging.INFO, msg_five), ]