Esempio n. 1
0
    def __init__(self, name, filename):  # name means datid!!
        self.name = name
        sau.load_pha(name, filename)
        self.data = sau.get_data(name)
        self.arf = sau.get_data(name)
        self.rmf = sau.get_data(name)

        try:  # Read keywords from pha header
            self.threshold = self.data.header['ETH']
        except KeyError:
            print(" ! WARNING: no threshold found, using 200 GeV")
            self.threshold = 2e8  # default value 200 GeV
        self.emax = 1e11  # default value 100 TeV

        try:
            self.zenith = self.data.header['ZENITH']
        except KeyError:
            print("WARNING: no mean zenith angle found, using 45 deg")
            self.zenith = 45.0  # default value 200 GeV

        try:
            self.offset = self.data.header['OFFSET']
        except KeyError:
            print("WARNING: no offset angle found, using 1.0 deg")
            self.offset = 1.0  # default value 200 GeV

        try:
            self.telcode = self.data.header['TELCODE']
        except KeyError:
            print("WARNING: no telcode found, using 0")
            self.telcode = 0  # default value 200 GeV
Esempio n. 2
0
    def test_ARFModelPHA(self):
        from sherpa.astro import ui
        ui.load_pha(self.make_path("3c120_meg_1.pha"))

        # remove the RMF to ensure this is an ARF-only analysis
        # (which is what is needed to trigger the bug that lead to #699)
        ui.get_data().set_rmf(None)

        ui.group_counts(20)
        ui.notice(0.5, 6)
        ui.subtract()
        ui.set_model(ui.xsphabs.abs1 * (ui.xsapec.bubble + ui.powlaw1d.p1))
        ui.set_xsabund('angr')
        ui.set_xsxsect('vern')
        abs1.nh = 0.163
        abs1.nh.freeze()
        p1.ampl = 0.017
        p1.gamma = 1.9
        bubble.kt = 0.5
        bubble.norm = 4.2e-5
        tol = 1.0e-2
        ui.set_method_opt('ftol', tol)
        ui.fit()
        result = ui.get_fit_results()
        assert result.numpoints == self._fit_using_ARFModelPHA['numpoints']
        assert result.dof == self._fit_using_ARFModelPHA['dof']
Esempio n. 3
0
def test_dataspace1d_datapha(clean_astro_ui):
    """Explicitly test dataspace1d for DataPHA"""

    assert ui.list_data_ids() == []

    # Note the grid is ignored, other than the number of bins
    ui.dataspace1d(20, 30, step=2.5, id='x', dstype=ui.DataPHA)

    assert ui.list_data_ids() == ['x']
    assert ui.get_data('x').name == 'dataspace1d'

    grid = ui.get_indep('x')
    assert len(grid) == 1

    expected = numpy.asarray([1, 2, 3, 4, 5])
    assert grid[0] == pytest.approx(expected)

    y = ui.get_dep('x')
    assert y == pytest.approx(numpy.zeros(5))

    assert ui.get_exposure('x') is None
    assert ui.get_grouping('x') is None
    assert ui.get_quality('x') is None

    assert ui.get_data('x').subtracted is False

    with pytest.raises(IdentifierErr):
        ui.get_bkg('x')
Esempio n. 4
0
    def __init__(self, name, filename):  # name means datid!!
        self.name = name
        sau.load_pha(name, filename)
        self.data = sau.get_data(name)
        self.arf = sau.get_data(name)
        self.rmf = sau.get_data(name)

        try:  # Read keywords from pha header
            self.threshold = self.data.header['ETH']
        except KeyError:
            print(" ! WARNING: no threshold found, using 200 GeV")
            self.threshold = 2e8  # default value 200 GeV
        self.emax = 1e11  # default value 100 TeV

        try:
            self.zenith = self.data.header['ZENITH']
        except KeyError:
            print("WARNING: no mean zenith angle found, using 45 deg")
            self.zenith = 45.0  # default value 200 GeV

        try:
            self.offset = self.data.header['OFFSET']
        except KeyError:
            print("WARNING: no offset angle found, using 1.0 deg")
            self.offset = 1.0  # default value 200 GeV

        try:
            self.telcode = self.data.header['TELCODE']
        except KeyError:
            print("WARNING: no telcode found, using 0")
            self.telcode = 0  # default value 200 GeV
Esempio n. 5
0
    def test_fits_io(self):
        """
        Test that basic FITS I/O functions work.

        This test ensures that the FITS backend can be used to perform basic
        I/O functions.
        """

        from sherpa.astro import datastack
        folder = os.path.dirname(datastack.__file__)
        infile = os.path.join(folder, "tests", "data",
                              "acisf07867_000N001_r0002_pha3.fits")

        ui.load_pha(infile)
        with NamedTemporaryFile() as f:
            ui.save_pha(f.name, ascii=False, clobber=True)

            # And can we read it back in?
            ui.load_pha(2, f.name)

        # Check the data is the same (note: although counts/channels are integers
        # we use approximate equality checks here as easier to do).
        d1 = ui.get_data(1)
        d2 = ui.get_data(2)
        assert_almost_equal(d2.channel, d1.channel)
        assert_almost_equal(d2.counts, d1.counts)
        assert_almost_equal(d2.exposure, d1.exposure)
        assert_almost_equal(np.log10(d2.backscal), np.log10(d1.backscal))
Esempio n. 6
0
    def __init__(self, name, filename=None):
        self.name = name
        if filename is not None:
            sau.load_pha(name, filename)
            self.data = sau.get_data(name)
            self.arf = self.data.get_arf()
            self.rmf = self.data.get_rmf()

            # Read keywords from pha header
            try:
                self.threshold = self.data.header['ETH']
            except KeyError:
                print("WARNING: no threshold found using 200 GeV")
                self.threshold = 2e8  # default value 200 GeV
                self.emax = 1e11  # default value 100 TeV

            try:
                self.zenith = self.data.header['ZENITH']
            except KeyError:
                print("WARNING: no mean zenith angle found using 45 deg")
                self.zenith = 45.0  # default value 45 deg

            try:
                self.offset = self.data.header['OFFSET']
            except KeyError:
                print("WARNING: no offset angle found using 1.0 deg")
                self.offset = 1.0  # default value 1 deg

            try:
                self.n_tels = self.data.header['N_TELS']
            except KeyError:
                print("WARNING: no number of telescopes found using 0")
                self.n_tels = 0  # default value

            try:
                self.eff = self.data.header['EFFICIEN']
            except KeyError:
                print("WARNING: no efficiency found using 1.0")
                self.eff = 1.00  # default value

            try:
                self.tstart = self.data.header['TSTART']
            except KeyError:
                print("WARNING: no tstart found using 0")
                self.tstart = 0.  # default value

            try:
                self.tstop = self.data.header['TSTOP']
            except KeyError:
                print("WARNING: no tstop found using tsart+1800")
                self.tstop = self.tstart + 1800  # default value

        else:
            self.data = sau.get_data(name)
            self.arf = self.data.get_arf()
            self.rmf = self.data.get_rmf()
Esempio n. 7
0
    def setUp(self):

        self._old_logger_level = logger.getEffectiveLevel()
        logger.setLevel(logging.ERROR)

        ui.set_stat('wstat')

        infile = self.make_path('3c273.pi')
        ui.load_pha(1, infile)

        # Change the backscale value slightly so that the
        # results are different to other runs with this file.
        #
        nbins = ui.get_data(1).get_dep(False).size
        bscal = 0.9 * np.ones(nbins) * ui.get_backscal(1)
        ui.set_backscal(1, backscale=bscal)

        ui.set_source(1, ui.powlaw1d.pl)

        # The powerlaw slope and normalization are
        # intended to be "a reasonable approximation"
        # to the data, just to make sure that any statistic
        # calculation doesn't blow-up too much.
        #
        ui.set_par("pl.gamma", 1.7)
        ui.set_par("pl.ampl", 1.7e-4)
Esempio n. 8
0
 def test_gro_delta_rms(self):
     ui.load_ascii_with_errors(1,
                               self.gro_delta_fname,
                               func=self.rms,
                               delta=True)
     data = ui.get_data(1)
     self.fit_asymmetric_err(self._results_bench_rms, data)
Esempio n. 9
0
 def query(self, func):
     output = []
     for dataset in self.filter_datasets():
         id = dataset['id']
         if func(ui.get_data(id)):
             output.append(id)
     return output
Esempio n. 10
0
    def validate_pha(self, idval):
        """Check that the PHA dataset in id=idval is
        as expected.
        """

        self.assertEqual(ui.list_data_ids(), [idval])

        pha = ui.get_data(idval)
        self.assertIsInstance(pha, DataPHA)

        arf = ui.get_arf(idval)
        self.assertIsInstance(arf, ARF1D)

        rmf = ui.get_rmf(idval)
        self.assertIsInstance(rmf, RMF1D)

        bpha = ui.get_bkg(idval, bkg_id=1)
        self.assertIsInstance(bpha, DataPHA)

        barf = ui.get_arf(idval, bkg_id=1)
        self.assertIsInstance(barf, ARF1D)

        brmf = ui.get_rmf(idval, bkg_id=1)
        self.assertIsInstance(brmf, RMF1D)

        # normally the background data set would have a different name,
        # but this is a  PHA Type 3 file.
        # self.assertEqual(pha.name, bpha.name)
        self.assertEqual(arf.name, barf.name)
        self.assertEqual(rmf.name, brmf.name)
Esempio n. 11
0
 def __init__(self, name='delta2dint'):
     # Gauss source parameters
     self.xpos = Parameter(name, 'xpos', 0)  # p[0]
     self.ypos = Parameter(name, 'ypos', 0)  # p[1]
     self.ampl = Parameter(name, 'ampl', 1)  # p[2]
     self.shape = sau.get_data().shape
     ArithmeticModel.__init__(self, name, (self.xpos, self.ypos, self.ampl))
Esempio n. 12
0
def test_resample_rmd(make_data_path):
    infile = make_data_path('gro_delta.txt')
    ui.load_ascii_with_errors(1, infile, delta=True, func=rms)
    base = ui.get_data(1)
    data = Data1DAsymmetricErrs(2, base.x, base.y, base.elo, base.ehi,
                                base.staterror, base.syserror)
    resample_data(data, RESAMPLE_BENCH, RESULTS_BENCH_RMS)
Esempio n. 13
0
def get_spectrum(filename, data_id="1"):
    ui.load_data(id=data_id, filename=filename)
    d = ui.get_data(data_id)
    arf = d.get_arf()
    rmf = d.get_rmf()

    return d, arf, rmf
Esempio n. 14
0
 def query(self, func):
     output = []
     for dataset in self.filter_datasets():
         id = dataset['id']
         if func(ui.get_data(id)):
             output.append(id)
     return output
Esempio n. 15
0
 def test_AsymmetricErrs_rms(self):
     ui.load_ascii_with_errors(1, self.gro_delta_fname, func=self.rms,
                                  delta=True)
     tmp = ui.get_data(1)
     data = Data1DAsymmetricErrs(2, tmp.x, tmp.y, tmp.elo,
                                 tmp.ehi, tmp.staterror, tmp.syserror)
     self.fit_asymmetric_err(self._results_bench_rms, data)        
Esempio n. 16
0
 def test_AsymmetricErrors_resample_avg(self):
     ui.load_ascii_with_errors(1, self.gro_delta_fname, delta=True)
     tmp = ui.get_data(1)
     data = Data1DAsymmetricErrs(1, tmp.x, tmp.y, tmp.elo,
                                 tmp.ehi, tmp.staterror, tmp.syserror)
     self.resample_data(data, self._resample_bench,
                        self._results_bench_avg)
Esempio n. 17
0
def validate_pha(idval):
    """Check that the PHA dataset in id=idval is
    as expected.
    """

    assert ui.list_data_ids() == [idval]

    pha = ui.get_data(idval)
    assert isinstance(pha, DataPHA)

    arf = ui.get_arf(idval)
    assert isinstance(arf, ARF1D)

    rmf = ui.get_rmf(idval)
    assert isinstance(rmf, RMF1D)

    bpha = ui.get_bkg(idval, bkg_id=1)
    assert isinstance(bpha, DataPHA)

    barf = ui.get_arf(idval, bkg_id=1)
    assert isinstance(barf, ARF1D)

    brmf = ui.get_rmf(idval, bkg_id=1)
    assert isinstance(brmf, RMF1D)

    # normally the background data set would have a different name,
    # but this is a  PHA Type 3 file.
    # assert pha.name == bpha.name
    assert arf.name == barf.name
    assert rmf.name == brmf.name
Esempio n. 18
0
def testWrite(make_data_path):

    fname = make_data_path('3c273.pi')
    ui.load_pha(1, fname)
    pha_orig = ui.get_data(1)

    ofh = tempfile.NamedTemporaryFile(suffix='sherpa_test')
    ui.save_pha(1, ofh.name, ascii=False, clobber=True)

    # limited checks
    pha = ui.unpack_pha(ofh.name)
    assert isinstance(pha, DataPHA)

    for key in ["channel", "counts"]:
        newval = getattr(pha, key)
        oldval = getattr(pha_orig, key)
        assert_allclose(oldval, newval, err_msg=key)

    # at present grouping and quality are not written out

    for key in ["exposure", "backscal", "areascal"]:
        newval = getattr(pha, key)
        oldval = getattr(pha_orig, key)
        assert newval == pytest.approx(oldval), key
    """
Esempio n. 19
0
def test_fake_pha_multi_file(make_data_path, clean_astro_ui, reset_seed):
    '''Test fake_pha using multiple real input files.

    Note that HEG orders -1 and +1 should really be treated spearately,
    but for this test we just need two files to load.
    '''

    np.random.seed(22349)

    ui.set_source("gauss1d.g1")
    g1 = ui.get_source()
    g1.pos = 3
    g1.FWHM = .5

    ui.fake_pha(None, [
        make_data_path('3c120_heg_-1.arf.gz'),
        make_data_path('3c120_heg_1.arf.gz')
    ], [
        make_data_path('3c120_heg_-1.rmf.gz'),
        make_data_path('3c120_heg_1.rmf.gz')
    ], 500.)
    data = ui.get_data()
    # Even with noise, maximum should be close to 3 keV
    assert np.isclose(data.get_x()[np.argmax(data.counts)], 3., atol=.2)

    # This is not a test from first principles, but at least a check of
    # the current behaviour
    assert data.counts.sum() > 5000
    assert data.counts.sum() < 10000
Esempio n. 20
0
def test_load_table_fits(clean_astro_ui):
    # QUS: why is this not in the sherpa-test-data repository?
    this_dir = os.path.dirname(os.path.abspath(__file__))
    ui.load_table(1, os.path.join(this_dir, 'data', 'two_column_x_y.fits.gz'))
    data = ui.get_data(1)
    assert data.x == pytest.approx([1, 2, 3])
    assert data.y == pytest.approx([4, 5, 6])
Esempio n. 21
0
 def test_load_table_fits(self):
     # QUS: why is this not in the sherpa-test-data repository?
     this_dir = os.path.dirname(os.path.abspath(__file__))
     ui.load_table(1, os.path.join(this_dir, 'data', 'two_column_x_y.fits.gz'))
     data = ui.get_data(1)
     self.assertEqualWithinTol(data.x, [1, 2, 3])
     self.assertEqualWithinTol(data.y, [4, 5, 6])
Esempio n. 22
0
    def setUp(self):

        self._old_logger_level = logger.getEffectiveLevel()
        logger.setLevel(logging.ERROR)

        ui.set_stat('wstat')

        infile = self.make_path('3c273.pi')
        ui.load_pha(1, infile)

        # Change the backscale value slightly so that the
        # results are different to other runs with this file.
        #
        nbins = ui.get_data(1).get_dep(False).size
        bscal = 0.9 * np.ones(nbins) * ui.get_backscal(1)
        ui.set_backscal(1, backscale=bscal)

        ui.set_source(1, ui.powlaw1d.pl)

        # The powerlaw slope and normalization are
        # intended to be "a reasonable approximation"
        # to the data, just to make sure that any statistic
        # calculation doesn't blow-up too much.
        #
        ui.set_par("pl.gamma", 1.7)
        ui.set_par("pl.ampl", 1.7e-4)
Esempio n. 23
0
def test_constructor_rms(make_data_path):
    infile = make_data_path('gro_delta.txt')
    ui.load_ascii_with_errors(1, infile, delta=True, func=rms)
    base = ui.get_data(1)
    data = Data1DAsymmetricErrs(2, base.x, base.y, base.elo, base.ehi,
                                base.staterror, base.syserror)
    fit_asymmetric_err(RESULTS_BENCH_RMS, data)
Esempio n. 24
0
 def _add_dataset(self, dataid):
     dataset = dict(id=dataid,
                    args=[],
                    model_comps={},
                    data=ui.get_data(dataid))
     _all_dataset_ids[dataid] = dataset
     self.dataset_ids[dataid] = dataset
     self.datasets.append(dataset)
Esempio n. 25
0
 def test_load_table_fits(self):
     # QUS: why is this not in the sherpa-test-data repository?
     this_dir = os.path.dirname(os.path.abspath(__file__))
     ui.load_table(1,
                   os.path.join(this_dir, 'data', 'two_column_x_y.fits.gz'))
     data = ui.get_data(1)
     self.assertEqualWithinTol(data.x, [1, 2, 3])
     self.assertEqualWithinTol(data.y, [4, 5, 6])
Esempio n. 26
0
def test_fake_pha_background_model(clean_astro_ui, reset_seed):
    """Check we can add a background component.

    See also test_fake_pha_basic.

    For simplicity we use perfect responses.
    """

    np.random.seed(27347)

    id = 'qwerty'
    channels = np.arange(1, 4, dtype=np.int16)
    counts = np.ones(3, dtype=np.int16)
    bcounts = 100 * counts

    ui.load_arrays(id, channels, counts, ui.DataPHA)
    ui.set_exposure(id, 100)
    ui.set_backscal(id, 0.1)

    bkg = ui.DataPHA('bkg', channels, bcounts, exposure=200, backscal=0.4)

    ebins = np.asarray([1.1, 1.2, 1.4, 1.6])
    elo = ebins[:-1]
    ehi = ebins[1:]
    arf = ui.create_arf(elo, ehi)
    rmf = ui.create_rmf(elo, ehi, e_min=elo, e_max=ehi)

    mdl = ui.create_model_component('const1d', 'mdl')
    mdl.c0 = 0
    bkgmdl = ui.create_model_component('const1d', 'mdl')
    bkgmdl.c0 = 2
    ui.set_source(id, mdl)
    ui.set_bkg(id, bkg)
    ui.set_bkg_source(id, bkgmdl)
    ui.set_arf(id, arf, bkg_id=1)
    ui.set_rmf(id, rmf, bkg_id=1)

    ui.fake_pha(id, arf, rmf, 1000.0, bkg='model')

    faked = ui.get_data(id)
    assert faked.exposure == pytest.approx(1000.0)
    assert (faked.channel == channels).all()

    # check we've faked counts (the scaling is such that it is
    # very improbable that this condition will fail)
    assert (faked.counts > counts).all()

    # For reference the predicted source signal is
    #    [200, 400, 400]
    # and the background signal is
    #    [125, 125, 125]
    # so, even with randomly drawn values, the following
    # checks should be robust.
    #
    predicted_by_source = 1000 * mdl(elo, ehi)
    predicted_by_bkg = (1000 / 200) * (0.1 / 0.4) * bcounts
    assert (faked.counts > predicted_by_source).all()
    assert (faked.counts > predicted_by_bkg).all()
Esempio n. 27
0
def test_xmm2(run_thread, fix_xspec):

    with warnings.catch_warnings(record=True) as ws:
        warnings.simplefilter("always")
        run_thread('xmm2')

    # NOTE: if this test is run on its own it can generate three warnings,
    # with the first being a RuntimeWarnnig about numpy.ndarray size
    # changed. We filter this out as it's not at all clear what is going
    # on and we have filters in conftest to remove similar warnings
    #
    ws = [
        w for w in ws
        if not (w.category == RuntimeWarning and str(w.message).startswith(
            'numpy.ndarray size changed, may indicate binary incompatibility.')
                )
    ]
    assert len(ws) == 2
    cats = set([w.category for w in ws])
    assert cats == set([UserWarning])

    # The order of reading the ARF and RMF is not guaranteed,
    # so do not force it here when testing the two warning
    # messages.
    #
    arffile = 'MNLup_2138_0670580101_EMOS1_S001_spec.arf'
    rmffile = 'MNLup_2138_0670580101_EMOS1_S001_spec.rmf'
    emsg_arf = "The minimum ENERG_LO in the ARF " + \
               "'{}' ".format(arffile) + \
               "was 0 and has been replaced by {}".format(EMIN)
    emsg_rmf = "The minimum ENERG_LO in the RMF " + \
               "'{}' ".format(rmffile) + \
               "was 0 and has been replaced by {}".format(EMIN)

    emsgs = set([emsg_arf, emsg_rmf])
    wmsgs = set([str(w.message) for w in ws])
    assert wmsgs == emsgs

    assert ui.get_data().channel[0] == approx(1.0, rel=1e-4)
    rmf = ui.get_rmf()
    arf = ui.get_arf()
    assert rmf.detchans == 800
    assert len(rmf.energ_lo) == 2400
    assert len(rmf.energ_hi) == 2400
    assert len(rmf.n_grp) == 2400
    assert len(rmf.f_chan) == 2394
    assert len(rmf.n_chan) == 2394
    assert len(rmf.matrix) == 1281216
    assert rmf.offset == 0
    assert len(rmf.e_min) == 800
    assert len(rmf.e_max) == 800
    assert len(arf.energ_lo) == 2400
    assert len(arf.energ_hi) == 2400
    assert len(arf.specresp) == 2400

    etol = EMIN / 100.0
    assert rmf.energ_lo[0] == approx(EMIN, rel=etol)
    assert arf.energ_lo[0] == approx(EMIN, rel=etol)
Esempio n. 28
0
    def __init__(self, id=None, fluxtype="photon"):
        "If id is None the default id will be used."

        if id is None:
            self.id = ui.get_default_id()
        else:
            self.id = id

        if fluxtype in self._valid_fluxtypes:
            self.fluxtype = fluxtype
        else:
            emsg = "fluxtype set to {} but must be one of: {}".format(
                fluxtype, " ".join(self._valid_fluxtypes))
            raise ValueError(emsg)

        # Set up the xlo/xhi/xmid arrays
        d = ui.get_data(self.id)
        self._calc_bins(d)
        self._apply_mask(d)

        # Important to use get_source and not get_model as we do not
        # want to apply any instrument model to the evaluation.
        #
        # Note that we do not hold onto the model expression object,
        # which is probably not an issue here.
        #
        mdl = ui.get_source(id)
        self.modelexpr = mdl.name

        # We do not use xlo/xhi but the _xlo/_xhi attributes which
        # contain an extra bin, in case of X-Spec models
        #
        src = mdl(self._xlo, self._xhi)[:-1]
        if np.any(src < 0.0):
            emsg = "There are negative values in your source " + \
                "model (id={0})!".format(self.id)
            raise RuntimeError(emsg)
        if np.all(src <= 0.0):
            emsg = "The source model for id={0} ".format(self.id) + \
                "evaluates to 0!"
            raise RuntimeError(emsg)

        # Conversion to a single datatype is a bit excessive here.
        #
        dtype = src.dtype

        if self.fluxtype == "erg":
            norm = _charge_e * np.sum(src * self.xmid)
        else:
            norm = np.sum(src)

        self.weight = src / norm

        self.weight = self.weight.astype(dtype)
        self.xlo = self.xlo.astype(dtype)
        self.xhi = self.xhi.astype(dtype)
        self.xmid = self.xmid.astype(dtype)
Esempio n. 29
0
    def setUp(self):
        # hide warning messages from file I/O
        self._old_logger_level = logger.level
        logger.setLevel(logging.ERROR)

        self._id = 1
        fname = self.make_path('3c273.pi')
        ui.load_pha(self._id, fname)
        self._pha = ui.get_data(self._id)
Esempio n. 30
0
    def setUp(self):
        # hide warning messages from file I/O
        self._old_logger_level = logger.level
        logger.setLevel(logging.ERROR)

        self._id = 1
        fname = self.make_path('3c273.pi')
        ui.load_pha(self._id, fname)
        self._pha = ui.get_data(self._id)
Esempio n. 31
0
def test_load_pha2(make_data_path, caplog):
    """Basic test that a pha2 file can be read in."""

    basename = '3c120_pha2'

    orig_ids = ui.list_data_ids()
    assert orig_ids == []

    # The file is stored gzip-encoded
    infile = make_data_path(basename)
    ui.load_pha(infile)

    pha_ids = ui.list_data_ids()
    assert len(pha_ids) == 12

    # list_data_ids doesn't guarantee an order
    # Do an explicit check, rather than via a set (testing
    # all at once) to make it easier to see what is missing
    # (if any)
    #
    for i in range(1, 13):
        assert i in pha_ids

    for i in range(1, 13):
        d = ui.get_data(i)
        validate_pha(d, bkg=True)

        # There is no indication of what "part" this data set
        # represents in the file name
        #
        assert d.name == infile

        b = ui.get_bkg(i, bkg_id=1)
        validate_pha(b, bkg=False)
        assert b.name == infile

        b = ui.get_bkg(i, bkg_id=2)
        validate_pha(b, bkg=False)
        assert b.name == infile

    # Test Log messages
    msg_one = "systematic errors were not found in file '{}'".format(infile)
    msg_two = """statistical errors were found in file '{}' 
but not used; to use them, re-read with use_errors=True""".format(infile)
    msg_three = "read background_up into a dataset from file {}".format(infile)
    msg_four = "read background_down into a dataset from file {}".format(
        infile)
    msg_five = "Multiple data sets have been input: 1-12"

    assert caplog.record_tuples == [
        ('sherpa.astro.io', logging.WARNING, msg_one),
        ('sherpa.astro.io', logging.INFO, msg_two),
        ('sherpa.astro.io', logging.INFO, msg_three),
        ('sherpa.astro.io', logging.INFO, msg_four),
        ('sherpa.astro.ui.utils', logging.INFO, msg_five),
    ]
Esempio n. 32
0
    def _check_stat(self, nbins, expected):

        # check the filter sizes (mainly so that these tests
        # get flagged as in need of a look if anything changes
        # in other parts of the code, such as filtering and binning
        #
        self.assertEqual(nbins, ui.get_data().get_dep(True).size)

        stat = ui.calc_stat()
        self.assertAlmostEqual(expected, stat, places=7)
Esempio n. 33
0
    def _check_stat(self, nbins, expected):

        # check the filter sizes (mainly so that these tests
        # get flagged as in need of a look if anything changes
        # in other parts of the code, such as filtering and binning
        #
        self.assertEqual(nbins, ui.get_data().get_dep(True).size)

        stat = ui.calc_stat()
        self.assertAlmostEqual(expected, stat, places=7)
Esempio n. 34
0
def get_instmap_weights(id=None, fluxtype="photon"):
    """Returns the weights information for use by mkinstmap.

    Parameters
    ----------
    id : int or string
        If id is None then the default Sherpa id is used. This
        dataset must have a grid and source model defined.
    fluxtype : 'photon' or 'erg'
        The units of the instrument map are
        cm^2 count / ``fluxtype``.

    Return
    ------
    weights
        A weights object. When ``fluxtype="photon"`` the
        weights will sum to 1.

    See Also
    --------
    estimate_weighted_expmap
    plot_instmap_weights
    save_instmap_weights

    Notes
    -----
    An error will be thrown if the model evaluates to
    a negative value, or there is no flux.

    This is intended for use with a dataset "faked" using::

        dataspace1d(elow, ehigh, estep)
        set_source(...)

    although there is an attempt to support DataPHA objects
    (either for spectra that have been loaded in or "faked"
    using ``dataspace1d``, specifying the data type explicitly).
    """

    if id is None:
        id = ui.get_default_id()

    # Since sherpa.astro.data.DataPHA is a subclass of
    # sherpa.data.Data1DInt we need to check for it first.
    #
    d = ui.get_data(id)
    if isinstance(d, DataPHA):
        return InstMapWeightsPHA(id, fluxtype=fluxtype)
    elif isinstance(d, Data1DInt):
        return InstMapWeights1DInt(id, fluxtype=fluxtype)
    else:
        emsg = "Unable to calculate weights from a dataset " + \
            "of type {0}.{1}".format(d.__class__.__module__,
                                     d.__class__.__name__)
        raise RuntimeError(emsg)
Esempio n. 35
0
 def test_warning(self):
     ui.load_ascii_with_errors(1, self.gro_fname)
     data = ui.get_data(1)
     powlaw1d = PowLaw1D('p1')
     ui.set_model(powlaw1d)
     fit = Fit(data, powlaw1d)
     results = fit.fit()
     with warnings.catch_warnings(record=True) as w:
         warnings.simplefilter("always")
         ui.resample_data(1, 3)
         assert len(w) == 0
Esempio n. 36
0
 def center_psf(self):
     """Set xpos and ypos of the PSF to the dataspace center"""
     import sherpa.astro.ui as sau
     try:
         ny, nx = sau.get_data().shape
         for par in sau.get_psf().kernel.pars:
             if par.name is 'xpos':
                 par.val = (nx + 1) / 2.
             elif par.name is 'ypos':
                 par.val = (ny + 1) / 2.
     except:
         logging.warning('PSF is not centered.')
Esempio n. 37
0
    def testReadImplicit(self):
        """Exclude .gz from the file name"""

        idval = "13"
        fname = self.head + '_pha3.fits'
        ui.load_pha(idval, fname)

        self.validate_pha(idval)

        pha = ui.get_data(idval)
        bpha = ui.get_bkg(idval, bkg_id=1)
        self.assertEqual(pha.name, bpha.name)
Esempio n. 38
0
def test_image_with_id(make_data_path, clean_astro_ui):
    """Call load_image with an identifier"""

    img = make_data_path('img.fits')

    assert ui.list_data_ids() == []
    ui.load_image('ix', img)
    assert ui.list_data_ids() == ['ix']

    d = ui.get_data('ix')
    assert isinstance(d, ui.DataIMG)
    assert d.name.endswith('img.fits')
Esempio n. 39
0
 def center_psf(self):
     """Set ``xpos`` and ``ypos`` of the PSF to the dataspace center."""
     import sherpa.astro.ui as sau
     try:
         ny, nx = sau.get_data().shape
         for par in sau.get_psf().kernel.pars:
             if par.name is 'xpos':
                 par.val = (nx + 1) / 2.
             elif par.name is 'ypos':
                 par.val = (ny + 1) / 2.
     except:
         raise Exception('PSF is not centered.')
Esempio n. 40
0
    def center_psf(self):
        """Set ``xpos`` and ``ypos`` of the PSF to the dataspace center."""
        import sherpa.astro.ui as sau
        try:
            ny, nx = sau.get_data().shape
            for _ in ['psf1', 'psf2', 'psf3']:
                par = sau.get_par(_ + '.xpos')
                par.val = nx / 2.

                par = sau.get_par(_ + '.ypos')
                par.val = ny / 2.
        except:
            raise Exception('PSF is not centered.')
Esempio n. 41
0
 def containment_fraction(self, theta, npix=1000):
     """Compute fraction of PSF contained inside theta."""
     import sherpa.astro.ui as sau
     sau.dataspace2d((npix, npix))
     self.set()
     # x_center = get_psf().kernel.pars.xpos
     # y_center = get_psf().kernel.pars.ypos
     x_center, y_center = sau.get_psf().model.center
     x_center, y_center = x_center + 0.5, y_center + 0.5  # shift seen on image.
     x, y = sau.get_data().x0, sau.get_data().x1
     # @note Here we have to use the source image, before I used
     # get_model_image(), which returns the PSF-convolved PSF image,
     # which is a factor of sqrt(2) ~ 1.4 too wide!!!
     p = sau.get_source_image().y.flatten()
     p /= np.nansum(p)
     mask = (x - x_center) ** 2 + (y - y_center) ** 2 < theta ** 2
     fraction = np.nansum(p[mask])
     if 0:  # debug
         sau.get_data().y = p
         sau.save_data('psf_sherpa.fits', clobber=True)
         sau.get_data().y = mask.astype('int')
         sau.save_data('mask_sherpa.fits', clobber=True)
     return fraction
Esempio n. 42
0
    def testReadExplicit(self):
        """Include .gz in the file name"""

        idval = 12
        fname = self.head + '_pha3.fits.gz'
        ui.load_pha(idval, fname)

        self.validate_pha(idval)

        # TODO: does this indicate that the file name, as read in,
        #       should have the .gz added to it to match the data
        #       read in, or left as is?
        pha = ui.get_data(idval)
        bpha = ui.get_bkg(idval, bkg_id=1)
        self.assertEqual(pha.name, bpha.name + '.gz')
Esempio n. 43
0
 def test_xmm2(self):
     self.run_thread('xmm2')
     self.assertEqualWithinTol(ui.get_data().channel[0], 1.0, 1e-4)
     self.assertEqual(ui.get_rmf().detchans, 800)
     self.assertEqual(len(ui.get_rmf().energ_lo), 2400)
     self.assertEqual(len(ui.get_rmf().energ_hi), 2400)
     self.assertEqual(len(ui.get_rmf().n_grp), 2400)
     self.assertEqual(len(ui.get_rmf().f_chan), 2394)
     self.assertEqual(len(ui.get_rmf().n_chan), 2394)
     self.assertEqual(len(ui.get_rmf().matrix), 1281216)
     self.assertEqual(ui.get_rmf().offset, 0)
     self.assertEqual(len(ui.get_rmf().e_min), 800)
     self.assertEqual(len(ui.get_rmf().e_max), 800)
     self.assertEqual(len(ui.get_arf().energ_lo), 2400)
     self.assertEqual(len(ui.get_arf().energ_hi), 2400)
     self.assertEqual(len(ui.get_arf().specresp), 2400)
Esempio n. 44
0
    def __init__(self, dataids):
        self.datasets = []
        self.tot_excess = None
        self.tot_expo = None

        for dataid in dataids:
            spec = spectral_data(sau.get_data(dataid), sau.get_bkg(dataid))
            self.datasets.append(spec)
        self.bkg = np.concatenate([a.fit_bkg for a in self.datasets])
        self.alpha = np.concatenate([a.fit_alpha for a in self.datasets])
        self.Ttot = np.concatenate([a.fit_Ttot for a in self.datasets])
        self.ONexpo = np.concatenate([a.fit_ONexpo for a in self.datasets])

        for a in self.datasets:
            # Carefull we are assuming that all the spectra have the same binning
            if self.tot_excess is None:
                self.tot_excess = np.zeros_like(a.excess)
            if self.tot_expo is None:
                self.tot_expo = np.zeros_like(a.excess)

            self.tot_excess += a.excess
            self.tot_expo += a.full_expo
Esempio n. 45
0
    def query(self, func):
        """Return the data sets identified by a function.

        Parameters
        ----------
        func
           A function which accepts a Sherpa Data object and
           returns ``True`` if the data set matches.

        Returns
        -------
        matches : list of int or str
           A list of the data set identifiers that match. This
           list may be empty.

        See Also
        --------
        query_by_header_keyword, query_by_obsid

        Examples
        --------

        This query function selects those data sets which have
        been grouped, or had their background subtracted, or both.

        >>> myquery = lambda d: d.subtracted or d.grouped
        >>> query(myquery)
        []

        """

        output = []
        for dataset in self.filter_datasets():
            id = dataset['id']
            if func(ui.get_data(id)):
                output.append(id)
        return output
Esempio n. 46
0
        def _load(self, *args, **kwargs):
            """Load a dataset and add to the datasets for stacked analysis.
            """
            if self.getitem_ids:
                dataid = self.getitem_ids[0]
                self.getitem_ids = None
            else:
                dataid = 1
                while dataid in _all_dataset_ids:
                    dataid += 1
                
            if dataid in self.dataset_ids:
                raise ValueError('Data ID = {0} is already in the DataStack'.format(dataset['id']))

            logger.info('Loading dataset id %s' % dataid)
            out = load_func(dataid, *args, **kwargs)

            dataset = dict(id=dataid, args=args, model_comps={}, data=ui.get_data(dataid))
            dataset.update(kwargs)  # no sherpa load func 'args' keyword so no conflict
            _all_dataset_ids[dataid] = dataset
            self.dataset_ids[dataid] = dataset
            self.datasets.append(dataset)

            return out
Esempio n. 47
0
def mainloop(mymodel, fwhm, id = None, maxiter = 5, mindist = 0., do_plots = 0):
    
    if id is None:
        id = ui.get_default_id()
    data = ui.get_data(id)
    wave = data.get_indep()[0]
    error = data.get_error()[0]
    
    # model could habe been initalized with arbitrary values
    ui.fit(id) 

    for i in range(maxiter):
        oldmodel = smh.get_model_parts(id)
        res_flux = ui.get_resid_plot(id).y
        if smoothwindow is not None:
            fwhminpix = int(fwhm / np.diff(wave).mean())
            y = smooth(res_flux/error, window_len = 3*fwhminpix, window = smoothwindow)
        else:
            y = res_flux/error
        peaks = findlines(wave, y, fwhm, smoothwindow = None, sigma_threshold = sigma_threshold)
        if has_mpl and (do_plots > 2):
            plt.figure()
            plt.plot(wave, res_flux/error, 's')
            for pos in mymodel.line_value_list('pos'):
                plt.plot([pos, pos], plt.ylim(),'k:')
            for peak in peaks:
                plt.plot([wave[peak], wave[peak]], plt.ylim())
            plt.plot(wave, y)
            plt.draw()
            
        for peak in peaks:
            if (len(mymodel.line_value_list('pos')) == 0) or (min(np.abs(mymodel.line_value_list('pos') - wave[peak])) >= mindist):
                mymodel.add_line(**mymodel.guess(wave, smooth(res_flux, window_len = 3*fwhminpix, window = smoothwindow), peak, fwhm = fwhm))
        newmodel = smh.get_model_parts(id)
        print 'Iteration {0:3n}: {1:3n} lines added'.format(i, len(newmodel) - len(oldmodel))
        
        if set(newmodel) == set(oldmodel):
            print 'No new lines added this step - fitting finished'
            break
        # Now do the fitting in Sherpa
        #ui.set_method('simplex')
        ui.fit(id)
        #ui.set_method('moncar')
        #ui.fit(id)
        
        if has_mpl and (do_plots > 0):
            if do_plots > 1:
                plt.figure()
            else:
                plt.clf()
            ui.plot_fit(id)
            for pos in mymodel.line_value_list('pos'):
                plt.plot([pos, pos], plt.ylim(),'k:')
            for peak in peaks:
                plt.plot([wave[peak], wave[peak]], plt.ylim())
            plt.plot(wave, res_flux)
            plt.draw()
        

    else:
        print 'Max number of iterations reached'
    #model.cleanup() #remove lines running to 0 etc.
    return mymodel
Esempio n. 48
0
ds = datastack.DataStack()

ds[1].load_pha("acisf04938_000N002_r0043_pha3.fits")
ds[2].load_pha("acisf07867_000N001_r0002_pha3.fits")

detnam = "acis2i"
for dataset in ds.datasets:
    id_ = dataset["id"]
    rmf = ui.get_rmf(id_)
    arf = ui.get_arf(id_)
    ui.load_bkg_arf(id_, arf.name)
    bkg_arf = ui.get_bkg_arf(id_)
    bkg_rmf = ui.get_bkg_rmf(id_)
    bkg_arf.specresp = bkg_arf.specresp * 0 + 100.0

    bkg_scale = ui.get_data(id_).sum_background_data(lambda x, y: 1)
    bkg_model = bkg_rmf(bkg_arf(ui.const1d.bkg_constID * acis_bkg_model(detnam)))
    src_model = rmf(arf(ui.const1d.src_constID * ui.powlaw1d.powlaw))
    ds[id_].set_full_model(src_model + bkg_scale * bkg_model)
    ds[id_].set_bkg_full_model(bkg_model)

ds[1].set_par("src_const.c0", 1.0)
ds[1].freeze("src_const")

ds.ignore(None, 0.5)
ds.ignore(7, None)
ds.fit()
ds.plot_fit()
ds.group_counts(16)
ds.ignore(None, 0.5)
ds.ignore(7, None)
Esempio n. 49
0
 def test_pha_read(self):
     self.run_thread('pha_read')
     self.assertEqual(type(ui.get_data()), DataPHA)
Esempio n. 50
0
    def load_pha(self, specfile, annulus):
        """Load a pha file and add to the datasets for stacked analysis.

        It is required that datasets for all annuli are loaded before
        the source model is created (to ensure that components are
        created for each annulus).

        Parameters
        ----------
        specfile : str or sherpa.astro.data.DataPHA object
            If a string, the name of the file containing the source spectrum,
            which must be in PHA format (the data is expected to be extracted
            on the PI column). If a DataPHA object, then this is used (and
            is assumed to contain any needed background data).
        annulus : int
            The annulus number for the data.

        Returns
        -------
        dataid : int
            The Sherpa dataset identifier used for this spectrum.

        Examples
        --------

        Load the data for four annuli from the files 'ann1.pi' to 'ann4.pi'.

        >>> dep.load_pha('ann1.pi', 0)
        >>> dep.load_pha('ann2.pi', 1)
        >>> dep.load_pha('ann3.pi', 2)
        >>> dep.load_pha('ann4.pi', 3)

        Load in the PHA files into Sherpa DataPHA objects, and then use
        these objects:

        >>> s1 = ui.unpack_pha('src1.pi')
        >>> s2 = ui.unpack_pha('src2.pi')
        >>> s3 = ui.unpack_pha('src3.pi')
        >>> dep.load_pha(s1, 0)
        >>> dep.load_pha(s2, 1)
        >>> dep.load_pha(s3, 2)

        """

        dataid = len(self.datasets)

        # If the input has a counts attribute then assume a DataPHA
        # style object.
        #
        if hasattr(specfile, 'counts'):
            print('Using spectrum {} '.format(specfile.name) +
                  ' as dataset id {}'.format(dataid))
            ui.set_data(dataid, specfile)

        else:
            print('Loading spectrum file {} '.format(specfile) +
                  ' as dataset id {}'.format(dataid))
            ui.load_pha(dataid, specfile)

        data = ui.get_data(dataid)
        try:
            obsid = int(data.header['OBS_ID'])
        except (KeyError, TypeError, ValueError):
            obsid = 0

        dataset = dict(file=specfile,
                       obsid=obsid,
                       id=dataid,
                       annulus=annulus
                       )
        self.datasets.append(dataset)
        self.obsids.add(obsid)
        return dataid
Esempio n. 51
0
 def _add_dataset(self, dataid):
     dataset = dict(id=dataid, args=[], model_comps={}, data=ui.get_data(dataid))
     _all_dataset_ids[dataid] = dataset
     self.dataset_ids[dataid] = dataset
     self.datasets.append(dataset)
Esempio n. 52
0
 def test_load_table_fits(self):
     this_dir = os.path.dirname(os.path.abspath(__file__))
     ui.load_table(1, os.path.join(this_dir, "data", "two_column_x_y.fits.gz"))
     data = ui.get_data(1)
     self.assertEqualWithinTol(data.x, [1, 2, 3])
     self.assertEqualWithinTol(data.y, [4, 5, 6])
Esempio n. 53
0
    def group(self, new_ext='_group', valid=None):
        """Group spectra.
        """
        totON = None
        totOFF = None
        tot_time = 0.
        tot_alpha = 0.
        tot_arf = None
        tot_rmf = None
        ntrue = 0
        nrec = 0

        group_dat = None
        group_bkg = None
        group_arf = None
        group_rmf = None

        newname = self.name + new_ext

        if valid is None:
            group_ids = np.ones((len(self.listids)), dtype=bool)
        elif valid.sum() > 0:  # need a better type check obviously
            group_ids = valid
        else:
            print("Empty group. Do nothing.")
            return

        # loop over all datasets
        for datid in self.listids[valid]:
            mydat = datid.data
            if totON is None:
                totON = np.zeros_like(mydat.counts)
                totOFF = np.zeros_like(mydat.get_background().counts)
                sau.copy_data(datid.name, newname)
                group_dat = sau.get_data(newname)
                group_dat.name = newname
                group_bkg = group_dat.get_background()

            # sum total ON and OFF
            totON += mydat.counts
            totOFF += mydat.get_background().counts

            # here we assume the background rate is the same with in each run so that we average alpha with time
            tot_alpha += mydat.exposure / mydat.get_background_scale()
            tot_time += mydat.exposure

            # compute average arf
            c_arf = mydat.get_arf().get_y()
            if tot_arf is None:
                tot_arf = np.zeros_like(c_arf)
                group_arf = group_dat.get_arf()

            tot_arf += c_arf * mydat.exposure

            # Compute average RMF
            c_rmf = mydat.get_rmf()

            # for now, we assume that n_grp is always equal to 1 which is the case for HESS rmfs generated with START
            # the channels to be used in the matrix are given by the cumulative sum of n_chan
            chans = c_rmf.n_chan.cumsum()

            # if not created, instantiate tmp_rmf
            if tot_rmf is None:
                group_rmf = group_dat.get_rmf()
                ntrue = int(c_rmf.get_dims()[0])
                nrec = int(c_rmf.detchans)
                tot_rmf = np.zeros((ntrue, nrec))

            c_rmf.matrix[np.where(np.isnan(c_rmf.matrix))] = 0.
            for i in np.arange(ntrue):
                irec_lo = c_rmf.f_chan[i]
                irec_hi = c_rmf.f_chan[i] + c_rmf.n_chan[i]
                indmin = chans[i]
                indmax = chans[i] + c_rmf.n_chan[i]
                if indmax < c_rmf.matrix.shape[0]:
                    tot_rmf[i, irec_lo:irec_hi] += c_rmf.matrix[indmin:indmax] * c_arf[i] * mydat.exposure

        tot_arf /= tot_time
        tot_arf = np.abs(tot_arf)
        for i in np.arange(nrec):
            tot_rmf[:, i] /= tot_arf * tot_time

        tot_rmf[np.isnan(tot_rmf)] = 0.
        tot_alpha = tot_time / tot_alpha

        group_dat.counts = totON
        group_dat.exposure = tot_time

        group_bkg.name = newname + '_bkg'
        group_bkg.counts = totOFF
        group_bkg.backscal = 1. / tot_alpha
        group_bkg.exposure = tot_time

        group_rmf.name = newname + '_rmf'
        (ntrue, nrec) = tot_rmf.shape
        tot_rmf = np.abs(tot_rmf)  # this is a hack and correct as long as negative elements modulus is <<1
        # reproject total rmf into new rmf with correct f_chan, n_chan and matrix
        ix, iy = np.where(tot_rmf > 0.)
        tmp = np.insert(np.diff(ix), 0, 1)
        new_index = np.where(tmp)[0]

        # Get first channel for a given true energy
        group_rmf.f_chan *= 0
        group_rmf.f_chan[ix[new_index]] = np.uint32(iy[new_index])

        # Find the number of channels
        group_rmf.n_chan *= 0
        group_rmf.n_chan[ix[new_index]] = np.uint32(np.append(iy[new_index - 1][1:], iy[-1]) - iy[new_index] + 1)
        group_rmf.matrix = tot_rmf[ix, iy]

        group_arf.name = newname + '_arf'
        group_arf.specresp = tot_arf

        group_dat.set_background(group_bkg)
        group_dat.set_arf(group_arf)
        group_dat.set_rmf(group_rmf)

        res = HESS_spec(newname)
        res.threshold = np.min(np.array([run.threshold for run in self.listids[valid]]))
        res.emax = 1e11
        return res