Ejemplo n.º 1
0
 def setUp(self):
     datastack.clear_stack()
     datastack.set_template_id("__ID")
     ui.clean()
     self.ds = datastack.DataStack()
     self.loggingLevel = logger.getEffectiveLevel()
     logger.setLevel(logging.ERROR)
Ejemplo n.º 2
0
def ds_setup_object():
    """Setup and teardown code for each test.

    Could try and be clever and re-use ds_setup here,
    but just repeat it to be simpler.
    """

    # Setup
    #
    ds = datastack.DataStack()
    datastack.clear_stack()
    ui.clean()
    loggingLevel = logger.getEffectiveLevel()
    logger.setLevel(logging.ERROR)
    datastack.set_stack_verbosity(logging.ERROR)
    datastack.set_template_id("__ID")

    # Run test, returning the stack object
    #
    yield ds

    # Cleanup
    #
    ds.clear_stack()
    datastack.clear_stack()
    ui.clean()
    datastack.set_template_id("__ID")
    logger.setLevel(loggingLevel)
Ejemplo n.º 3
0
def test_query_case_7(ds_setup, ds_datadir):

    datadir = ds_datadir
    stkname = '@' + '/'.join((datadir, 'pha.lis'))
    datastack.load_pha(stkname)

    f = datastack.query_by_header_keyword('INSTRUME', 'ACIS')

    assert f == [1, 2]

    f = datastack.query_by_obsid(7867)

    assert f == [2]

    ds = datastack.DataStack()

    ds.load_pha(stkname)

    f = ds.query_by_obsid('4938')

    assert f == [3]

    f = datastack.query_by_obsid(ds, '7867')

    assert f == [4]
Ejemplo n.º 4
0
    def get_sherpa_datastack(self):
        """return a sherpa datastack object

         http://cxc.harvard.edu/sherpa/ahelp/datastack.html
         depending on the instrument selected,
         set also the appropriate fit range
         """
        import sherpa.astro.datastack as sh

        data = sh.DataStack()

        if self.name == "joint":
            pha_list = glob.glob(
                f"{self.main_repo_path}/results/spectra/*/pha_*.fits")
        else:
            pha_list = glob.glob(f"{self.spectra_path}/pha_*.fits")

        # load all the OGIP files in the DataStack object
        for pha in pha_list:
            sh.load_data(data, pha)

        # convert the fit range to keV and strip units
        fit_range = self.energy_range.to("keV").value
        data.notice(*fit_range)

        return data
Ejemplo n.º 5
0
    def __init__(self, name, phalist, stat):
        # load data and set statistics
        self.name = name
        self.ds = datastack.DataStack()
        for phaname in phalist:
            self.ds.load_pha(phaname)
        # TODO add manual specs of bkg, arf, and rmf

        datastack.ui.set_stat(stat)

        # Effective area correction is disabled by default, i.e.,
        # the nuisance parameter is fixed to 1
        self.nuisanceParameters = {}
Ejemplo n.º 6
0
def load_stack_data(obsid, detid, spectra_path):
    ds = dsmod.DataStack()

    # Create stack file for existing spectra in the observation
    with NamedTemporaryFile("w", dir=".") as temp:
        stack_tempfile = _write_spec_files(temp, obsid, detid, spectra_path)
        ds.load_pha(f"@{stack_tempfile}", use_errors=True)

    ids = shp.list_data_ids()
    for id in ids:
        dsmod.ignore_bad(id=id)

    return ds, ids
Ejemplo n.º 7
0
def test_show_empty_datastack(capsys):
    '''Test output for an empty datastack.

    For a change, we try the OO interface here/
    '''
    # clear out the current output
    captured = capsys.readouterr()

    mystack = datastack.DataStack()
    mystack.show_stack()
    captured = capsys.readouterr()
    lines = captured.out.split('\n')
    assert len(lines) == 2
    assert 'empty datastack' in lines[0]
    assert lines[1].strip() == ''
    # Now, the check the html representation
    html = mystack._repr_html_()
    assert 'datastack with 0 datasets' in html
Ejemplo n.º 8
0
    def _run_sherpa_fit(self):
        """Plain sherpa fit using the session object
        """
        from sherpa.astro import datastack
        log.info("Starting SHERPA")
        log.info(self.info())
        ds = datastack.DataStack()
        ds.load_pha(self.pha_list)

        # Make model amplitude O(1e0)
        model = self.model * self.FLUX_FACTOR
        ds.set_source(model)
        thres_lo = self.energy_threshold_low.to('keV').value
        thres_hi = self.energy_threshold_high.to('keV').value

        namedataset = []
        for i in range(len(ds.datasets)):
            datastack.notice_id(i + 1, thres_lo[i], thres_hi[i])
            namedataset.append(i + 1)
        datastack.set_stat(self.statistic)
        ds.fit(*namedataset)
        datastack.covar(*namedataset)
        covar = datastack.get_covar_results()
        efilter = datastack.get_filter()

        # First go on calculation flux points following
        # http://cxc.harvard.edu/sherpa/faq/phot_plot.html
        # This should be split out and improved
        xx = datastack.get_fit_plot().dataplot.x
        dd = datastack.get_fit_plot().dataplot.y
        ee = datastack.get_fit_plot().dataplot.yerr
        mm = datastack.get_fit_plot().modelplot.y
        src = datastack.get_source()(xx)
        points = dd / mm * src
        errors = ee / mm * src
        flux_graph = dict(energy=xx,
                          flux=points,
                          flux_err_hi=errors,
                          flux_err_lo=errors)

        from gammapy.spectrum.results import SpectrumFitResult
        self.result = SpectrumFitResult.from_sherpa(covar, efilter, self.model)
        ds.clear_stack()
        ds.clear_models()
Ejemplo n.º 9
0
    def test_case_7(self):
        datastack.load_pha('@' + '/'.join((self._this_dir, 'data', 'pha.lis')))

        f = datastack.query_by_header_keyword('INSTRUME', 'ACIS')

        assert f == [1, 2]

        f = datastack.query_by_obsid(7867)

        assert f == [2]

        ds = datastack.DataStack()

        ds.load_pha('@' + '/'.join((self._this_dir, 'data', 'pha.lis')))

        f = ds.query_by_obsid('4938')

        assert f == [3]

        f = datastack.query_by_obsid(ds, '7867')

        assert f == [4]
Ejemplo n.º 10
0
def test_query_missing_keyword(ds_setup, ds_datadir):
    """What happens when the keyword does not exist?

    This only checks a case where the keyword is missing in
    both files.
    """

    datadir = ds_datadir
    stkname = '@' + '/'.join((datadir, 'pha.lis'))

    # Note: since there is a conversion between float to string,
    # there's a possibility this check may fail on some systems
    #
    key1 = 'EXPOSUR2'
    val1 = '50441.752296469'
    key2 = 'EXPOSUR7'
    val2 = '21860.439777374'

    datastack.load_pha(stkname)
    f = datastack.query_by_header_keyword('MISSKEY', 'ACIS')
    assert f == []

    f = datastack.query_by_header_keyword(key1, val1)
    assert f == [1]

    f = datastack.query_by_header_keyword(key2, val2)
    assert f == [2]

    ds = datastack.DataStack()
    ds.load_pha(stkname)
    f = ds.query_by_header_keyword('MISSKEY', 'ACIS')
    assert f == []

    f = ds.query_by_header_keyword(key1, val1)
    assert f == [3]

    f = ds.query_by_header_keyword(key2, val2)
    assert f == [4]
Ejemplo n.º 11
0
def test_default_instantiation():
    datastack.DataStack._default_instantiated = False
    ds = datastack.DataStack()
    assert ds._default_instance
    ds = datastack.DataStack()
    assert not ds._default_instance
Ejemplo n.º 12
0
    def test_case_1(self):
        datadir = '/'.join((self._this_dir, 'data'))
        ls = '@' + '/'.join((datadir, '3c273.lis'))
        datastack.load_pha(ls, use_errors=True)

        assert 2 == len(datastack.DATASTACK.datasets)
        assert 2 == len(ui._session._data)

        datastack.load_pha("myid", '/'.join((datadir, "3c273.pi")))

        assert 2 == len(datastack.DATASTACK.datasets)
        assert 3 == len(ui._session._data)

        datastack.load_pha('/'.join((datadir, "3c273.pi")))

        assert 3 == len(datastack.DATASTACK.datasets)
        assert 4 == len(ui._session._data)

        datastack.load_pha([], '/'.join((datadir, "3c273.pi")))

        assert 4 == len(datastack.DATASTACK.datasets)
        assert 5 == len(ui._session._data)

        ds = datastack.DataStack()

        datastack.load_pha(ds, ls)

        assert 4 == len(datastack.DATASTACK.datasets)
        assert 7 == len(ui._session._data)
        assert 2 == len(ds.datasets)

        datastack.load_pha(ds, '/'.join((datadir, "3c273.pi")))

        assert 4 == len(datastack.DATASTACK.datasets)
        assert 8 == len(ui._session._data)
        assert 3 == len(ds.datasets)

        dids = datastack.DATASTACK.get_stack_ids()
        assert dids == [1, 2, 3, 4]

        sids = set(ui._session._data.keys())
        assert sids == {1, 2, 3, 4, 5, 6, 7, "myid"}

        datastack.set_source([1, 2], "powlaw1d.pID")
        datastack.set_source([3, 4], "brokenpowerlaw.bpID")

        dsids = ds.get_stack_ids()
        assert dsids == [5, 6, 7]

        p1 = ui._session._model_components['p1']
        p2 = ui._session._model_components['p2']
        bp3 = ui._session._model_components['bp3']
        bp4 = ui._session._model_components['bp4']

        assert p1 is not None
        assert p2 is not None
        assert bp3 is not None
        assert bp4 is not None

        datastack.set_source(1, "polynom1d.poly1")
        datastack.set_source([2, 3, 4], "atten.attID")

        poly1 = ui._session._model_components['poly1']
        a2 = ui._session._model_components['att2']
        a3 = ui._session._model_components['att3']
        a4 = ui._session._model_components['att4']

        assert poly1 is not None
        assert a2 is not None
        assert a3 is not None
        assert a4 is not None

        datastack.clean()

        assert 0 == len(datastack.DATASTACK.datasets)
        assert 0 == len(ui._session._data)
        assert 3 == len(ds.datasets)
Ejemplo n.º 13
0
 def setUp(self):
     datastack.clear_stack()
     datastack.set_template_id("__ID")
     ui.clean()
     self.ds = datastack.DataStack()