def clean_astro_ui(): """Ensure sherpa.astro.ui.clean is called before AND after the test. This also resets the XSPEC settings (if XSPEC support is provided). See Also -------- clean_ui Notes ----- It does NOT change the logging level; perhaps it should, but the screen output is useful for debugging at this time. """ from sherpa.astro import ui if has_xspec: old_xspec = xspec.get_xsstate() else: old_xspec = None ui.clean() yield ui.clean() if old_xspec is not None: xspec.set_xsstate(old_xspec)
def check_integrals(): """Check that Sherpa normed models integrate to 1.""" from sherpa.astro import ui from sherpa.astro.ui import normgauss2d from models import normdisk2d, normshell2d ui.clean() g = normgauss2d('g') g.xpos, g.ypos, g.ampl, g.fwhm = 100, 100, 42, 5 d = normdisk2d('d') d.xpos, d.ypos, d.ampl, d.r0 = 100, 100, 42, 50 s = normshell2d('s') s.xpos, s.ypos, s.ampl, s.r0, s.width = 100, 100, 42, 30, 20 models = [g, d, s] ui.dataspace2d((200, 200)) for model in models: ui.set_model(model) # In sherpa normed model values are flux per pixel area. # So to get the total flux (represented by the `ampl` parameter) # one can simply sum over all pixels, because a pixel has area 1 pix^2. # :-) integral = ui.get_model_image().y.sum() print model.name, integral
def tearDown(self): ui.clean() try: logger.setLevel(self._old_logger_level) except AttributeError: pass
def test_xspecvar_no_grouping_no_bg_comparison_xspec(make_data_path, l, h, ndp, ndof, statval): """Compare chi2xspecvar values for a data set to XSPEC. The data set has no background. See test_cstat_comparison_xspec. Note that at present Sherpa and XSPEC treat bins with 0 values in them differently: see https://github.com/sherpa/sherpa/issues/356 so for this test all bins are forced to have at least one count in them (source -> 5 is added per channel,background -> 3 is added per channel). The XSPEC version used was 12.9.0o. """ dset = create_xspec_comparison_dataset(make_data_path, keep_background=False) # Lazy, so add it to "bad" channels too dset.counts += 5 ui.clean() ui.set_data(dset) ui.set_source(ui.powlaw1d.pl) ui.set_par('pl.ampl', 5e-4) ui.set_stat('chi2xspecvar') ui.set_analysis('energy') validate_xspec_result(l, h, ndp, ndof, statval) ui.clean()
def setUp(self): datastack.clear_stack() ui.clean() datastack.set_template_id("__ID") self._this_dir = os.path.dirname(sys.modules[self.__module__].__file__) self.loggingLevel = logger.getEffectiveLevel() logger.setLevel(logging.ERROR)
def setup_model(make_data_path): """Set up a model that is reasonably close to the data. Returns the expected statistic values for various filters. """ infile = make_data_path('q1127_src1_grp30.pi') ui.clean() ui.load_pha(infile) ui.subtract() ui.set_stat('chi2datavar') ui.set_source(ui.powlaw1d.pl) pl = ui.get_model_component('pl') pl.ampl = 5.28e-4 pl.gamma = 1.04 # These statistic values were created using CIAO 4.9 on a # Ubuntu machine. The quality=2 values are for high energies # (above ~ 10 keV or so), and so a filter of 0.5-8.0 keV should # give the same answer with or without ignore_bad. # return { 'all': 2716.7086246284807, 'bad': 2716.682482792285, '0.5-8.0': 1127.7165108405597 }
def setUp(self): datastack.clear_stack() datastack.set_template_id("__ID") ui.clean() self.ds = datastack.DataStack() self.loggingLevel = logger.getEffectiveLevel() logger.setLevel(logging.ERROR)
def test_cstat_comparison_xspec(make_data_path, l, h, ndp, ndof, statval): """Compare CSTAT values for a data set to XSPEC. This checks that the "UI layer" works, although ideally there should be a file that can be read in rather than having to manipulate it (the advantage here is that it means there is no messing around with adding a file to the test data set). The XSPEC version used was 12.9.0o. """ dset = create_xspec_comparison_dataset(make_data_path, keep_background=False) ui.clean() ui.set_data(dset) # use powlaw1d rather than xspowerlaw so do not need XSPEC ui.set_source(ui.powlaw1d.pl) ui.set_par('pl.ampl', 1e-4) ui.set_stat('cstat') ui.set_analysis('channel') validate_xspec_result(l, h, ndp, ndof, statval) ui.clean()
def setup(request): ui.clean() def fin(): ui.clean() request.addfinalizer(fin)
def ds_setup_object(): """Setup and teardown code for each test. Could try and be clever and re-use ds_setup here, but just repeat it to be simpler. """ # Setup # ds = datastack.DataStack() datastack.clear_stack() ui.clean() loggingLevel = logger.getEffectiveLevel() logger.setLevel(logging.ERROR) datastack.set_stack_verbosity(logging.ERROR) datastack.set_template_id("__ID") # Run test, returning the stack object # yield ds # Cleanup # ds.clear_stack() datastack.clear_stack() ui.clean() datastack.set_template_id("__ID") logger.setLevel(loggingLevel)
def test_xspecvar_no_grouping_comparison_xspec(make_data_path, l, h, ndp, ndof, statval): """Compare chi2xspecvar values for a data set to XSPEC. The data set has a background. See test_xspecvar_no_grouping_no_bg_comparison_xspec The XSPEC version used was 12.9.0o. """ dset = create_xspec_comparison_dataset(make_data_path, keep_background=True) # Lazy, so add it to "bad" channels too dset.counts += 5 dset.get_background().counts += 3 ui.clean() ui.set_data(dset) ui.subtract() ui.set_source(ui.powlaw1d.pl) ui.set_par('pl.ampl', 5e-4) ui.set_stat('chi2xspecvar') ui.set_analysis('energy') validate_xspec_result(l, h, ndp, ndof, statval) ui.clean()
def _test_can_evaluate_thcompc(): """Does this redistribute some emission? It does not test the result is actualy meaningful, but does check it's done something """ ui.clean() ui.dataspace1d(0.1, 10, 0.01, id='unconv') ui.dataspace1d(0.1, 10, 0.01, id='conv') mconv = ui.create_model_component('xsthcompc', 'conv') ui.set_source('conv', mconv(ui.xsgaussian.m1)) m1 = ui.get_model_component('m1') ui.set_source('unconv', m1) m1.lineE = 5.0 m1.Sigma = 1.0 yunconv = ui.get_model_plot('unconv').y.copy() yconv = ui.get_model_plot('conv').y.copy() assert (yunconv > 0).any() assert (yconv > 0).any() # not guaranteed the peak will be reduced (depends on what # the convolution is doing), and I would hope that flux # is at best conserved (ie not created), and that we don't # have to worry about numerical artifacts here. # assert yunconv.max() > yconv.max() assert yunconv.sum() >= yconv.sum()
def setUp(self): self._old_level = logger.getEffectiveLevel() logger.setLevel(logging.ERROR) self.x = np.asarray([1, 2, 3]) self.x2 = self.x + 1 self.y = np.asarray([1, 2, 3]) ui.clean()
def cleanup_astro_session(request): """Ensure sherpa.astro.ui is cleaned before and after the test.""" ui.clean() def fin(): ui.clean() request.addfinalizer(fin)
def setUp(self): # hide warning messages from file I/O self._old_logger_level = logger.level logger.setLevel(logging.ERROR) ui.clean() self.head = self.make_path('acisf01575_001N001_r0085')
def tearDown(self): datastack.clear_stack() ui.clean() datastack.set_template_id("__ID") os.remove(self.lisname) os.remove(self.name1) os.remove(self.name2) datastack.set_stack_verbose(False) logger.setLevel(self.loggingLevel)
def run_thread(self, name, scriptname='fit.py'): ui.clean() ui.set_model_autoassign_func(self.assign_model) self.locals = {} cwd = os.getcwd() os.chdir(self.make_path('ciao4.3', name)) try: execfile(scriptname, {}, self.locals) finally: os.chdir(cwd)
def set_sherpa_env(stat=None): shp.clean() dsmod.clean() shp.set_conf_opt("sigma", 1.6) shp.set_conf_opt("numcores", 4) shp.set_proj_opt("sigma", 1.6) shp.set_proj_opt("numcores", 4) if stat: shp.set_stat(stat)
def tearDown(self): # there's an issue in CIAO 4.8 with delete_model and # delete_model_component, so just call clean """ for idval in [1, "tst"]: ui.delete_model(id=idval) ui.delete_model_component("pl{}".format(idval)) ui.delete_data(id=idval) ui.delete_data(id="not-used") """ ui.clean()
def test_load_xstable_model_fails_with_dir(): """Check that the function fails with invalid input: directory The temporary directory is used for this (the test is skipped if it does not exist). """ ui.clean() assert ui.list_model_components() == [] with pytest.raises(IOError): ui.load_xstable_model('tmpdir', tmpdir) assert ui.list_model_components() == []
def test_create_thcompc(): """Can we create a thcompc instance?""" ui.clean() # mdl = ui.xsthcompc.conv mdl = ui.create_model_component('xsthcompc', 'conv') assert isinstance(mdl, XSConvolutionKernel) assert mdl.type == 'xsthcompc' assert mdl.name == 'xsthcompc.conv' assert len(mdl.pars) == 3 assert mdl.pars[0].name == 'gamma_tau' assert mdl.pars[1].units == 'keV' assert mdl.pars[1].val == pytest.approx(50)
def test_load_table_model_fails_with_dev_null(): """Check that load_table_model fails with invalid input: /dev/null This simulates an empty file (and relies on the system containing a /dev/null file that reads in 0 bytes). """ ui.clean() assert ui.list_model_components() == [] # The error depends on the load function with pytest.raises(ValueError): ui.load_table_model('devnull', '/dev/null') assert ui.list_model_components() == []
def test_load_xstable_model_fails_with_dir(tmp_path): """Check that the function fails with invalid input: directory The temporary directory is used for this. """ tmpdir = tmp_path / 'load_xstable_model' tmpdir.mkdir() ui.clean() assert ui.list_model_components() == [] with pytest.raises(IOError): ui.load_xstable_model('tmpdir', str(tmpdir)) assert ui.list_model_components() == []
def clean(): """Remove the models and data from the data stack and Sherpa. This function clears out the models and data set up in the data stack and in the Sherpa session. See Also -------- clear_models, clear_stack sherpa.astro.ui.clean """ DATASTACK.clear_models() DATASTACK.clear_stack() ui.clean() logger.warning("clean() will invalidate any existing DataStack instances by removing all the datasets from the " + "Sherpa session")
def run_hspec_fit(self, model, thres_low, thres_high): """Run the gammapy.hspec fit Parameters ---------- model : str Sherpa model thres_high : `~gammapy.spectrum.Energy` Upper threshold of the spectral fit thres_low : `~gammapy.spectrum.Energy` Lower threshold of the spectral fit """ log.info("Starting HSPEC") import sherpa.astro.ui as sau from ..hspec import wstat from sherpa.models import PowLaw1D if model == 'PL': p1 = PowLaw1D('p1') p1.gamma = 2.2 p1.ref = 1e9 p1.ampl = 6e-19 else: raise ValueError('Desired Model is not defined') thres = thres_low.to('keV').value emax = thres_high.to('keV').value sau.freeze(p1.ref) sau.set_conf_opt("max_rstat", 100) list_data = [] for obs in self.observations: datid = obs.phafile.parts[-1][7:12] sau.load_data(datid, str(obs.phafile)) sau.notice_id(datid, thres, emax) sau.set_source(datid, p1) list_data.append(datid) wstat.wfit(list_data) sau.covar() fit_val = sau.get_covar_results() fit_attrs = ('parnames', 'parvals', 'parmins', 'parmaxes') fit = dict((attr, getattr(fit_val, attr)) for attr in fit_attrs) fit = self.apply_containment(fit) sau.clean() self.fit = fit
def test_user_model_stat_docs(): """ This test reproduces the documentation shown at: http://cxc.harvard.edu/sherpa4.4/statistics/#userstat and: http://cxc.harvard.edu/sherpa/threads/user_model/ I tried to be as faithful as possible to the original, although the examples in thedocs are not completely self-contained, so some changes were necessary. I changed the numpy reference, as it is imported as `np` here, and added a clean up of the environment before doing anything. For the model, the difference is that I am not importing the function from an external module, plus the dataset is different. Also, the stats docs do not perform a fit. """ def my_stat_func(data, model, staterror, syserror=None, weight=None): # A simple function to replicate χ2 fvec = ((data - model) / staterror)**2 stat = fvec.sum() return (stat, fvec) def my_staterr_func(data): # A simple staterror function return np.sqrt(data) def myline(pars, x): return pars[0]*x + pars[1] x = [1, 2, 3] y = [4, 5, 6.01] ui.clean() ui.load_arrays(1, x, y) ui.load_user_stat("mystat", my_stat_func, my_staterr_func) ui.set_stat(eval('mystat')) ui.load_user_model(myline, "myl") ui.add_user_pars("myl", ["m", "b"]) ui.set_model(eval('myl')) ui.fit() assert ui.get_par("myl.m").val == approx(1, abs=0.01) assert ui.get_par("myl.b").val == approx(3, abs=0.01)
def clean(): """Remove the models and data from the data stack and Sherpa. This function clears out the models and data set up in the data stack and in the Sherpa session. See Also -------- clear_models, clear_stack sherpa.astro.ui.clean """ DATASTACK.clear_models() DATASTACK.clear_stack() ui.clean() logger.warning("clean() will invalidate any existing DataStack " + "instances by removing all the datasets from the " + "Sherpa session")
def test_create_zkerrbb(): """Can we create a zkerrbb instance?""" ui.clean() # mdl = ui.xszkerrbb.zb mdl = ui.create_model_component('xszkerrbb', 'zb') assert isinstance(mdl, XSAdditiveModel) assert mdl.type == 'xszkerrbb' assert mdl.name == 'xszkerrbb.zb' assert len(mdl.pars) == 10 assert mdl.pars[0].name == 'eta' assert mdl.pars[2].units == 'degree' assert mdl.pars[3].frozen assert mdl.pars[5].val == pytest.approx(0.01) assert mdl.pars[8].alwaysfrozen assert mdl.pars[9].name == 'norm'
def clean_ui(): """Ensure sherpa.ui.clean is called before AND after the test. See Also -------- clean_astro_ui Notes ----- It does NOT change the logging level; perhaps it should, but the screen output is useful for debugging at this time. """ from sherpa import ui ui.clean() yield ui.clean()
def image_model_sherpa(exposure, psf, sources, model_image, overwrite): """Compute source model image with Sherpa. Inputs: * Source list (JSON file) * PSF (JSON file) * Exposure image (FITS file) Outputs: * Source model flux image (FITS file) * Source model excess image (FITS file) """ import sherpa.astro.ui as sau from ..image.models.psf import Sherpa from ..image.models.utils import read_json log.info('Reading exposure: {0}'.format(exposure)) # Note: We don't really need the exposure as data, # but this is a simple way to init the dataspace to the correct shape sau.load_data(exposure) sau.load_table_model('exposure', exposure) log.info('Reading PSF: {0}'.format(psf)) Sherpa(psf).set() log.info('Reading sources: {0}'.format(sources)) read_json(sources, sau.set_source) name = sau.get_source().name full_model = 'exposure * psf({})'.format(name) sau.set_full_model(full_model) log.info('Computing and writing model_image: {0}'.format(model_image)) sau.save_model(model_image, clobber=overwrite) sau.clean() sau.delete_psf()
def test_load_xstable_model_fails_with_text_column(make_data_path): """Check that load_table_model fails with invalid input: text column The first column is text (and an ASCII file) so it is expected to fail. """ # Check that this file hasn't been changed (as I am re-using it for # this test) infile = make_data_path('table.txt') assert os.path.isfile(infile) ui.clean() assert ui.list_model_components() == [] # The error depends on the load function. with pytest.raises(Exception): ui.load_xstable_model('stringcol', infile) assert ui.list_model_components() == []
def test_wstat_comparison_xspec(make_data_path, l, h, ndp, ndof, statval): """Compare WSTAT values for a data set to XSPEC. See test_cstat_comparison_xspec. The XSPEC version used was 12.9.0o. """ dset = create_xspec_comparison_dataset(make_data_path, keep_background=True) ui.clean() ui.set_data(dset) ui.set_source(ui.powlaw1d.pl) ui.set_par('pl.ampl', 1e-4) ui.set_stat('wstat') ui.set_analysis('channel') validate_xspec_result(l, h, ndp, ndof, statval) ui.clean()
def print_values_sherpa(): """Print some Sherpa model values that can be used for unit tests.""" from sherpa.astro import ui from sherpa.astro.ui import normgauss2d from models import normdisk2d, normshell2d ui.clean() g = normgauss2d('g2') g.ampl, g.fwhm = INTEGRAL, GAUSS_FWHM d = normdisk2d('d') d.ampl, d.r0 = INTEGRAL, DISK_R0 s = normshell2d('s') s.ampl, s.r0, s.width = INTEGRAL, SHELL_R0, SHELL_WIDTH models = [g, d, s] for model in models: for theta in THETAS: value = model(0, theta) print model.name, theta, value
def setUp(self): # defensive programming (one of the tests has been seen to fail # when the whole test suite is run without this) ui.clean() self._old_logger_level = logger.getEffectiveLevel() logger.setLevel(logging.ERROR) ui.set_stat('wstat') infile = self.make_path('3c273.pi') ui.load_pha(1, infile) ui.set_source(1, ui.powlaw1d.pl) # The powerlaw slope and normalization are # intended to be "a reasonable approximation" # to the data, just to make sure that any statistic # calculation doesn't blow-up too much. # ui.set_par("pl.gamma", 1.782) ui.set_par("pl.ampl", 1.622e-4)
def setUp(self): self.img = self.datadir + "/img.fits" logger.setLevel(logging.ERROR) ui.clean()
def fin(): ui.clean()
def clean(): DATASTACK.clear_models() DATASTACK.clear_stack() ui.clean() logger.warning("clean() will invalidate any existing DataStack instances by removing all the datasets from the " + "Sherpa session")
def setUp(self): self.data = self.datadir + "/threads/chi2/3c273.pi" ui.clean()
def setUp(self): self.img = self.make_path('img.fits') self.loggingLevel = logger.getEffectiveLevel() logger.setLevel(logging.ERROR) ui.clean()
def setUp(self): self._old_logger_level = logger.getEffectiveLevel() logger.setLevel(logging.ERROR) self.data = self.make_path('3c273.pi') ui.clean()
def setUp(self): ui.clean()