def test_dataspace1d_datapha_bkg_nopha(clean_astro_ui): """We need a PHA to create a background dataset""" with pytest.raises(IdentifierErr) as exc: ui.dataspace1d(20, 30, step=2.5, id='x', bkg_id=2, dstype=ui.DataPHA) assert str(exc.value) == 'data set x has not been set'
def setup_files(make_data_path): out = namedtuple('store1', ['ascii', 'fits', 'img' 'singledat', 'singletbl', 'doubledat', 'doubletbl', 'filter_single_int_ascii', 'filter_single_int_table', 'filter_single_log_table']) out.ascii = make_data_path('sim.poisson.1.dat') out.fits = make_data_path('1838_rprofile_rmid.fits') out.singledat = make_data_path('single.dat') out.singletbl = make_data_path('single.fits') out.doubledat = make_data_path('double.dat') out.doubletbl = make_data_path('double.fits') out.img = make_data_path('img.fits') out.filter_single_int_ascii = make_data_path( 'filter_single_integer.dat') out.filter_single_int_table = make_data_path( 'filter_single_integer.fits') out.filter_single_log_table = make_data_path( 'filter_single_logical.fits') ui.dataspace1d(1, 1000, dstype=ui.Data1D) return out
def _test_can_evaluate_thcompc(): """Does this redistribute some emission? It does not test the result is actualy meaningful, but does check it's done something """ ui.clean() ui.dataspace1d(0.1, 10, 0.01, id='unconv') ui.dataspace1d(0.1, 10, 0.01, id='conv') mconv = ui.create_model_component('xsthcompc', 'conv') ui.set_source('conv', mconv(ui.xsgaussian.m1)) m1 = ui.get_model_component('m1') ui.set_source('unconv', m1) m1.lineE = 5.0 m1.Sigma = 1.0 yunconv = ui.get_model_plot('unconv').y.copy() yconv = ui.get_model_plot('conv').y.copy() assert (yunconv > 0).any() assert (yconv > 0).any() # not guaranteed the peak will be reduced (depends on what # the convolution is doing), and I would hope that flux # is at best conserved (ie not created), and that we don't # have to worry about numerical artifacts here. # assert yunconv.max() > yconv.max() assert yunconv.sum() >= yconv.sum()
def test_dataspace1d_datapha(clean_astro_ui): """Explicitly test dataspace1d for DataPHA""" assert ui.list_data_ids() == [] # Note the grid is ignored, other than the number of bins ui.dataspace1d(20, 30, step=2.5, id='x', dstype=ui.DataPHA) assert ui.list_data_ids() == ['x'] assert ui.get_data('x').name == 'dataspace1d' grid = ui.get_indep('x') assert len(grid) == 1 expected = numpy.asarray([1, 2, 3, 4, 5]) assert grid[0] == pytest.approx(expected) y = ui.get_dep('x') assert y == pytest.approx(numpy.zeros(5)) assert ui.get_exposure('x') is None assert ui.get_grouping('x') is None assert ui.get_quality('x') is None assert ui.get_data('x').subtracted is False with pytest.raises(IdentifierErr): ui.get_bkg('x')
def test_psf_model1d(self): ui.dataspace1d(1, 10) for model in self.models1d: try: ui.load_psf("psf1d", model + ".mdl") ui.set_psf("psf1d") mdl = ui.get_model_component("mdl") self.assert_((numpy.array(mdl.get_center()) == numpy.array([4])).all()) except: print model raise
def test_psf_model1d(self): ui.dataspace1d(1, 10) for model in self.models1d: try: ui.load_psf('psf1d', model + '.mdl') ui.set_psf('psf1d') mdl = ui.get_model_component('mdl') self.assertTrue((numpy.array(mdl.get_center()) == numpy.array([4])).all()) except: print model raise
def test_psf_model1d(self): ui.dataspace1d(1, 10) for model in self.models1d: try: ui.load_psf('psf1d', model + '.mdl') ui.set_psf('psf1d') mdl = ui.get_model_component('mdl') self.assertTrue( (numpy.array(mdl.get_center()) == numpy.array([4])).all()) except: print model raise
def test_dataspace1d_datapha_bkg(clean_astro_ui): """Explicitly test dataspace1d for DataPHA (background)""" # list_bkg_ids will error out until the dataset exists assert ui.list_data_ids() == [] # We don't use the grid range or step size since numbins has been # given. ui.dataspace1d(20, 30, step=2.5, numbins=10, id='x', dstype=ui.DataPHA) assert ui.list_data_ids() == ['x'] assert ui.list_bkg_ids('x') == [] ui.dataspace1d(20, 30, step=2.5, numbins=10, id='x', bkg_id=2, dstype=ui.DataPHA) assert ui.list_data_ids() == ['x'] assert ui.list_bkg_ids('x') == [2] assert ui.get_data('x').name == 'dataspace1d' # I've explicitly not chosen the default background identifier with pytest.raises(IdentifierErr): ui.get_bkg('x') assert ui.get_bkg('x', 2).name == 'bkg_dataspace1d' grid = ui.get_indep('x', bkg_id=2) assert len(grid) == 1 expected = numpy.asarray([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) assert grid[0] == pytest.approx(expected) y = ui.get_dep('x', bkg_id=2) assert y == pytest.approx(numpy.zeros(10)) assert ui.get_exposure('x', bkg_id=2) is None assert ui.get_grouping('x', bkg_id=2) is None assert ui.get_quality('x', bkg_id=2) is None assert ui.get_bkg('x', bkg_id=2).subtracted is False # check we can subtract the dataset; as the data is all zeros # we don't bother checking the result. # ui.subtract('x')
def setUp(self): self.ascii = self.datadir + "/threads/ascii_table/sim.poisson.1.dat" self.fits = self.datadir + "/1838_rprofile_rmid.fits" self.singledat = self.datadir + "/single.dat" self.singletbl = self.datadir + "/single.fits" self.doubledat = self.datadir + "/double.dat" self.doubletbl = self.datadir + "/double.fits" self.img = self.datadir + "/img.fits" self.filter_single_int_ascii = self.datadir + "/filter_single_integer.dat" self.filter_single_int_table = self.datadir + "/filter_single_integer.fits" self.filter_single_log_table = self.datadir + "/filter_single_logical.fits" self.func = lambda x: x ui.dataspace1d(1, 1000, dstype=ui.Data1D)
def setUp(self): self.ascii = self.make_path('threads/ascii_table/sim.poisson.1.dat') self.fits = self.make_path('1838_rprofile_rmid.fits') self.singledat = self.make_path('single.dat') self.singletbl = self.make_path('single.fits') self.doubledat = self.make_path('double.dat') self.doubletbl = self.make_path('double.fits') self.img = self.make_path('img.fits') self.filter_single_int_ascii = self.make_path('filter_single_integer.dat') self.filter_single_int_table = self.make_path('filter_single_integer.fits') self.filter_single_log_table = self.make_path('filter_single_logical.fits') self.func = lambda x: x ui.dataspace1d(1,1000,dstype=ui.Data1D)
def setUp(self): self.ascii = self.datadir + '/threads/ascii_table/sim.poisson.1.dat' self.fits = self.datadir + '/1838_rprofile_rmid.fits' self.singledat = self.datadir + '/single.dat' self.singletbl = self.datadir + '/single.fits' self.doubledat = self.datadir + '/double.dat' self.doubletbl = self.datadir + '/double.fits' self.img = self.datadir + '/img.fits' self.filter_single_int_ascii = self.datadir + '/filter_single_integer.dat' self.filter_single_int_table = self.datadir + '/filter_single_integer.fits' self.filter_single_log_table = self.datadir + '/filter_single_logical.fits' self.func = lambda x: x ui.dataspace1d(1,1000,dstype=ui.Data1D)
def test_ui_regrid1d_non_overlapping_not_allowed(): """Integrated data space must not overlap""" ui.dataspace1d(1, 100, 2, dstype=Data1DInt) b1 = Box1D() ui.set_model(b1) b1.xlow = 10 b1.xhi = 80 b1.ampl.max = 100 grid_hi = np.linspace(2, 101, 600) grid_lo = np.linspace(1, 100, 600) with pytest.raises(ModelErr) as excinfo: rb1 = b1.regrid(grid_lo, grid_hi) assert ModelErr.dict['needsint'] in str(excinfo.value)
def test_dataspace1d_data1dint(clean_astro_ui): """Explicitly test dataspace1d for Data1DInt""" assert ui.list_data_ids() == [] ui.dataspace1d(20, 30, step=2.5, id='x', dstype=ui.Data1DInt) assert ui.list_data_ids() == ['x'] assert ui.get_data('x').name == 'dataspace1d' grid = ui.get_indep('x') assert len(grid) == 2 expected = numpy.asarray([20, 22.5, 25, 27.5, 30.0]) assert grid[0] == pytest.approx(expected[:-1]) assert grid[1] == pytest.approx(expected[1:]) y = ui.get_dep('x') assert y == pytest.approx(numpy.zeros(4))
def setUp(self): self._old_logger_level = logger.getEffectiveLevel() logger.setLevel(logging.ERROR) self.ascii = self.make_path('sim.poisson.1.dat') self.fits = self.make_path('1838_rprofile_rmid.fits') self.singledat = self.make_path('single.dat') self.singletbl = self.make_path('single.fits') self.doubledat = self.make_path('double.dat') self.doubletbl = self.make_path('double.fits') self.img = self.make_path('img.fits') self.filter_single_int_ascii = self.make_path( 'filter_single_integer.dat') self.filter_single_int_table = self.make_path( 'filter_single_integer.fits') self.filter_single_log_table = self.make_path( 'filter_single_logical.fits') self.func = lambda x: x ui.dataspace1d(1, 1000, dstype=ui.Data1D)
def setUp(self): ui.dataspace1d(0.01, 11, 0.01, id=1) ui.dataspace1d(2, 5, 0.1, id="tst") ui.dataspace1d(0.1, 1, 0.1, id="not-used") # self.nbins = {} # for idval in [1, 'tst']: # self.nbins[idval] = ui.get_data(1).xlo.size self.nbins = {1: 1099, 'tst': 30} ui.set_source(1, ui.powlaw1d.pl1) ui.set_source("tst", ui.powlaw1d.pltst) # when gamma=0, weight is the same for each bin (when equally # spaced) pl1.gamma = 0.0 pl1.ampl = 1.2 pltst.gamma = -1.0 pltst.ampl = 2.1 arfgrid = np.arange(0.5, 5, 0.02) self.arflo = arfgrid[:-1] self.arfhi = arfgrid[1:] self.flatarf = self.arflo * 0 + 10.1 amid = (self.arflo + self.arfhi) / 2.0 self.arf = 10 - (3.0 - amid)**2
def test_can_evaluate_additive_models(mname): """Does this create some emission? It does not test the result is actualy meaningful, and relies on the slightly-more-involved tests in test_xspeclmodels.py for the model evaluation. """ ui.clean() m1 = ui.create_model_component(mname, 'm1') # test out a combined model; not really needed but this is # closer to how people will be using it. # ui.dataspace1d(0.1, 10, 0.01) ui.set_source(ui.xsphabs.m2 * m1) # rely on test_xspeclmodels.py for a more-complete test of # the model calling y = ui.get_model_plot().y.copy() # Assume there is some emission assert (y > 0).any()
def test_psf_model1d(model, center, clean_astro_ui): ui.dataspace1d(1, 10) ui.load_psf('psf1d', model + '.mdl') ui.set_psf('psf1d') mdl = ui.get_model_component('mdl') assert mdl.get_center() == (center, )
def setUp(self): ui.dataspace1d(0.2, 10, 0.01, id=1) ui.dataspace1d(2, 5, 0.1, id="tst") ui.dataspace1d(0.1, 1, 0.1, id="not-used") ui.dataspace1d(0.1, 1, 0.1, id="no-arf") ui.dataspace1d(0.1, 11, 0.01, id='arf1', dstype=DataPHA) ui.dataspace1d(0.2, 10, 0.01, id='flatarf', dstype=DataPHA) # self.nbins = {} # for idval in [1, 'tst']: # self.nbins[idval] = ui.get_data(1).xlo.size self.nbins = {1: 980, 'tst': 30, 'arf1': 1090, 'arf1-arf': 489} self.grid = { 1: (0.2, 10, 0.01), 'tst': (2.0, 5.0, 0.1), 'arf1': (0.1, 11, 0.01), 'arf1-arf': (0.2, 9.98, 0.02) # note: ehigh is not 10.0 } ui.set_source(1, ui.powlaw1d.pl1) ui.set_source("tst", ui.powlaw1d.pltst) ui.set_source('no-arf', pl1) ui.set_source('arf1', pltst) ui.set_source('flatarf', pltst) ui.set_source('no-arf-flat', ui.const1d.c1) pl1.gamma = 0.0 pl1.ampl = 1.2 pltst.gamma = -1.0 pltst.ampl = 2.1 arfgrid = np.arange(0.2, 10, 0.02) arflo = arfgrid[:-1] arfhi = arfgrid[1:] amid = (arflo + arfhi) / 2.0 flatarf = DataARF('flat', energ_lo=arflo, energ_hi=arfhi, specresp=arflo * 0 + 10.1) arf = DataARF('arf', energ_lo=arflo, energ_hi=arfhi, specresp=20 - (4.5 - amid)**2) ui.set_arf('arf1', arf) ui.set_arf('flatarf', flatarf)