def test_dataspace1d_datapha(clean_astro_ui):
    """Explicitly test dataspace1d for DataPHA"""

    assert ui.list_data_ids() == []

    # Note the grid is ignored, other than the number of bins
    ui.dataspace1d(20, 30, step=2.5, id='x', dstype=ui.DataPHA)

    assert ui.list_data_ids() == ['x']
    assert ui.get_data('x').name == 'dataspace1d'

    grid = ui.get_indep('x')
    assert len(grid) == 1

    expected = numpy.asarray([1, 2, 3, 4, 5])
    assert grid[0] == pytest.approx(expected)

    y = ui.get_dep('x')
    assert y == pytest.approx(numpy.zeros(5))

    assert ui.get_exposure('x') is None
    assert ui.get_grouping('x') is None
    assert ui.get_quality('x') is None

    assert ui.get_data('x').subtracted is False

    with pytest.raises(IdentifierErr):
        ui.get_bkg('x')
Beispiel #2
0
def test_get_bkg_fit_plot_energy(idval):
    """Basic testing of get_bkg_fit_plot: energy
    """

    setup_example_bkg_model(idval)
    if idval is None:
        ui.set_analysis('energy')
        ui.get_bkg().units = 'energy'
        fp = ui.get_bkg_fit_plot()
    else:
        ui.set_analysis(idval, 'energy')
        ui.get_bkg(idval).units = 'energy'
        fp = ui.get_bkg_fit_plot(idval)

    dp = fp.dataplot
    mp = fp.modelplot

    assert dp.title == 'example-bkg'
    assert mp.title == 'Background Model Contribution'

    for plot in [dp, mp]:
        assert plot.xlabel == 'Energy (keV)'
        assert plot.ylabel == 'Counts/sec/keV'
        assert plot.x == pytest.approx(_data_chan)

    yexp = _data_bkg / 1201.0 / _bexpscale
    assert dp.y == pytest.approx(dp.y)

    yexp = _arf / 100.0 / _bexpscale
    assert mp.y == pytest.approx(yexp)
Beispiel #3
0
def test_load_pha2(make_data_path, caplog):
    """Basic test that a pha2 file can be read in."""

    basename = '3c120_pha2'

    orig_ids = ui.list_data_ids()
    assert orig_ids == []

    # The file is stored gzip-encoded
    infile = make_data_path(basename)
    ui.load_pha(infile)

    pha_ids = ui.list_data_ids()
    assert len(pha_ids) == 12

    # list_data_ids doesn't guarantee an order
    # Do an explicit check, rather than via a set (testing
    # all at once) to make it easier to see what is missing
    # (if any)
    #
    for i in range(1, 13):
        assert i in pha_ids

    for i in range(1, 13):
        d = ui.get_data(i)
        validate_pha(d, bkg=True)

        # There is no indication of what "part" this data set
        # represents in the file name
        #
        assert d.name == infile

        b = ui.get_bkg(i, bkg_id=1)
        validate_pha(b, bkg=False)
        assert b.name == infile

        b = ui.get_bkg(i, bkg_id=2)
        validate_pha(b, bkg=False)
        assert b.name == infile

    # Test Log messages
    msg_one = "systematic errors were not found in file '{}'".format(infile)
    msg_two = """statistical errors were found in file '{}' 
but not used; to use them, re-read with use_errors=True""".format(infile)
    msg_three = "read background_up into a dataset from file {}".format(infile)
    msg_four = "read background_down into a dataset from file {}".format(
        infile)
    msg_five = "Multiple data sets have been input: 1-12"

    assert caplog.record_tuples == [
        ('sherpa.astro.io', logging.WARNING, msg_one),
        ('sherpa.astro.io', logging.INFO, msg_two),
        ('sherpa.astro.io', logging.INFO, msg_three),
        ('sherpa.astro.io', logging.INFO, msg_four),
        ('sherpa.astro.ui.utils', logging.INFO, msg_five),
    ]
def test_dataspace1d_datapha_bkg(clean_astro_ui):
    """Explicitly test dataspace1d for DataPHA (background)"""

    # list_bkg_ids will error out until the dataset exists
    assert ui.list_data_ids() == []

    # We don't use the grid range or step size since numbins has been
    # given.
    ui.dataspace1d(20, 30, step=2.5, numbins=10, id='x', dstype=ui.DataPHA)

    assert ui.list_data_ids() == ['x']
    assert ui.list_bkg_ids('x') == []

    ui.dataspace1d(20,
                   30,
                   step=2.5,
                   numbins=10,
                   id='x',
                   bkg_id=2,
                   dstype=ui.DataPHA)

    assert ui.list_data_ids() == ['x']
    assert ui.list_bkg_ids('x') == [2]

    assert ui.get_data('x').name == 'dataspace1d'

    # I've explicitly not chosen the default background identifier
    with pytest.raises(IdentifierErr):
        ui.get_bkg('x')

    assert ui.get_bkg('x', 2).name == 'bkg_dataspace1d'

    grid = ui.get_indep('x', bkg_id=2)
    assert len(grid) == 1

    expected = numpy.asarray([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
    assert grid[0] == pytest.approx(expected)

    y = ui.get_dep('x', bkg_id=2)
    assert y == pytest.approx(numpy.zeros(10))

    assert ui.get_exposure('x', bkg_id=2) is None
    assert ui.get_grouping('x', bkg_id=2) is None
    assert ui.get_quality('x', bkg_id=2) is None

    assert ui.get_bkg('x', bkg_id=2).subtracted is False

    # check we can subtract the dataset; as the data is all zeros
    # we don't bother checking the result.
    #
    ui.subtract('x')
def validate_pha(idval):
    """Check that the PHA dataset in id=idval is
    as expected.
    """

    assert ui.list_data_ids() == [idval]

    pha = ui.get_data(idval)
    assert isinstance(pha, DataPHA)

    arf = ui.get_arf(idval)
    assert isinstance(arf, ARF1D)

    rmf = ui.get_rmf(idval)
    assert isinstance(rmf, RMF1D)

    bpha = ui.get_bkg(idval, bkg_id=1)
    assert isinstance(bpha, DataPHA)

    barf = ui.get_arf(idval, bkg_id=1)
    assert isinstance(barf, ARF1D)

    brmf = ui.get_rmf(idval, bkg_id=1)
    assert isinstance(brmf, RMF1D)

    # normally the background data set would have a different name,
    # but this is a  PHA Type 3 file.
    # assert pha.name == bpha.name
    assert arf.name == barf.name
    assert rmf.name == brmf.name
Beispiel #6
0
    def validate_pha(self, idval):
        """Check that the PHA dataset in id=idval is
        as expected.
        """

        self.assertEqual(ui.list_data_ids(), [idval])

        pha = ui.get_data(idval)
        self.assertIsInstance(pha, DataPHA)

        arf = ui.get_arf(idval)
        self.assertIsInstance(arf, ARF1D)

        rmf = ui.get_rmf(idval)
        self.assertIsInstance(rmf, RMF1D)

        bpha = ui.get_bkg(idval, bkg_id=1)
        self.assertIsInstance(bpha, DataPHA)

        barf = ui.get_arf(idval, bkg_id=1)
        self.assertIsInstance(barf, ARF1D)

        brmf = ui.get_rmf(idval, bkg_id=1)
        self.assertIsInstance(brmf, RMF1D)

        # normally the background data set would have a different name,
        # but this is a  PHA Type 3 file.
        # self.assertEqual(pha.name, bpha.name)
        self.assertEqual(arf.name, barf.name)
        self.assertEqual(rmf.name, brmf.name)
Beispiel #7
0
def get_identity_response(i):
    n = ui.get_bkg(i).counts.size
    rmf = ui.get_rmf(i)
    try:
        arf = ui.get_arf(i)
        return lambda model: IdentityResponse(n, model, arf=arf, rmf=rmf)
    except:
        return lambda model: IdentityRMF(n, model, rmf=rmf)
Beispiel #8
0
    def testReadImplicit(self):
        """Exclude .gz from the file name"""

        idval = "13"
        fname = self.head + '_pha3.fits'
        ui.load_pha(idval, fname)

        self.validate_pha(idval)

        pha = ui.get_data(idval)
        bpha = ui.get_bkg(idval, bkg_id=1)
        self.assertEqual(pha.name, bpha.name)
def test_pha3_read_implicit(make_data_path, clean_astro_ui):
    """Exclude .gz from the file name"""

    idval = "13"
    fname = make_data_path(FILE_NAME)
    ui.load_pha(idval, fname)

    validate_pha(idval)

    pha = ui.get_data(idval)
    bpha = ui.get_bkg(idval, bkg_id=1)
    assert pha.name == bpha.name
    assert pha.name == fname
Beispiel #10
0
def replace_bkg_identity_response(i=1):
    """
    The PileupRMFModel(), by default, only calculate convolved model at noticed channel.
    See https://github.com/sherpa/sherpa/blob/master/sherpa/astro/instrument.py

    Here, simply replace the response of the background that calculates at all channels to
    that caclculate background component at only noticed channel.
    """
    pha = ui.get_bkg(i)
    n = pha.counts.size
    src = ui.get_data(i)  # mask background according to src.
    bkgModel = ui.get_bkg_model().model
    rmf = ui.get_bkg_rmf(i)
    arf = ui.get_bkg_arf(i)
    return IdentityPileupResponse(n, bkgModel, rmf=rmf, arf=arf, pha=src)
Beispiel #11
0
    def testReadExplicit(self):
        """Include .gz in the file name"""

        idval = 12
        fname = self.head + '_pha3.fits.gz'
        ui.load_pha(idval, fname)

        self.validate_pha(idval)

        # TODO: does this indicate that the file name, as read in,
        #       should have the .gz added to it to match the data
        #       read in, or left as is?
        pha = ui.get_data(idval)
        bpha = ui.get_bkg(idval, bkg_id=1)
        self.assertEqual(pha.name, bpha.name + '.gz')
Beispiel #12
0
def test_get_bkg_resid_plot_energy(idval):
    """Basic testing of get_bkg_resid_plot: energy
    """

    setup_example_bkg_model(idval)
    if idval is None:
        ui.set_analysis('energy')
        ui.get_bkg().units = 'energy'
        bp = ui.get_bkg_resid_plot()
    else:
        ui.set_analysis(idval, 'energy')
        ui.get_bkg(idval).units = 'energy'
        bp = ui.get_bkg_resid_plot(idval)

    assert bp.x == pytest.approx(_data_chan)

    # correct the counts by the bin width and exposure time
    #
    yexp = (_data_bkg * 100.0 / 1201.0 - _arf) / (_bexpscale * 100)
    assert bp.y == pytest.approx(yexp)

    assert bp.title == 'Residuals of example-bkg - Bkg Model'
    assert bp.xlabel == 'Energy (keV)'
    assert bp.ylabel == 'Counts/sec/keV'
Beispiel #13
0
def test_get_bkg_model_plot_energy(idval):
    """Basic testing of get_bkg_model_plot: energy
    """

    # The way I have set up the data means that set_analysis
    # doesn't seem to change the setting for the background,
    # which should be tracked down (Sep 2019) but not just now.
    #
    setup_example_bkg_model(idval)
    if idval is None:
        ui.set_analysis('energy')
        ui.get_bkg().units = 'energy'
        bp = ui.get_bkg_model_plot()
    else:
        ui.set_analysis(idval, 'energy')
        ui.get_bkg(idval).units = 'energy'
        bp = ui.get_bkg_model_plot(idval)

    # TODO: is this a bug in the plotting code, or does it just
    # indicate that the test hasn't set up the correct invariants
    # (which may be true as the code above has to change the units
    # setting of the background object)?
    #
    # I was expecting bp.x to return energy and not channel values
    #
    assert bp.xlo == pytest.approx(_data_chan - 0.5)
    assert bp.xhi == pytest.approx(_data_chan + 0.5)

    # TODO: The factor of 100 comes from the bin width (0.1 keV), but
    # why is there a scaling by _bexpscale?
    yexp = _arf / (_bexpscale * 100)
    assert bp.y == pytest.approx(yexp)

    assert bp.title == 'Model'
    assert bp.xlabel == 'Energy (keV)'
    assert bp.ylabel == 'Counts/sec/keV'
Beispiel #14
0
def test_get_bkg_plot_energy(idval):
    """Basic testing of get_bkg_plot: energy
    """

    # The way I have set up the data means that set_analysis
    # doesn't seem to change the setting for the background,
    # which should be tracked down (Sep 2019) but not just now.
    #
    setup_example_bkg(idval)
    if idval is None:
        ui.set_analysis('energy')
        ui.get_bkg().units = 'energy'
        bp = ui.get_bkg_plot()
    else:
        ui.set_analysis(idval, 'energy')
        ui.get_bkg(idval).units = 'energy'
        bp = ui.get_bkg_plot(idval)

    # TODO: is this a bug in the plotting code, or does it just
    # indicate that the test hasn't set up the correct invariants
    # (which may be true as the code above has to change the units
    # setting of the background object)?
    #
    # I was expecting bp.x to return energy and not channel values
    #
    assert bp.x == pytest.approx(_data_chan)

    # normalise by exposure time and bin width, but bin width here
    # is 1 (because it is being measured in channels).
    #
    yexp = _data_bkg / 1201.0 / _bexpscale
    assert bp.y == pytest.approx(yexp)

    assert bp.title == 'example-bkg'
    assert bp.xlabel == 'Energy (keV)'
    assert bp.ylabel == 'Counts/sec/keV'
def test_pha3_read_explicit(make_data_path, clean_astro_ui):
    """Include .gz in the file name"""

    fname = make_data_path(FILE_NAME + '.gz')
    idval = 12
    ui.load_pha(idval, fname)

    validate_pha(idval)

    # TODO: does this indicate that the file name, as read in,
    #       should have the .gz added to it to match the data
    #       read in, or left as is?
    pha = ui.get_data(idval)
    bpha = ui.get_bkg(idval, bkg_id=1)
    assert pha.name == bpha.name + '.gz'
    assert pha.name == fname
Beispiel #16
0
    def __init__(self, id=None, bkgModelDir=None):
        """
        Find background model.

        id: which data id to fit
        bkgModelDir: read background model files from a different directories.

        I analysed the background for many instruments, and stored mean and
        principle components. The data file tells us which instrument we deal with,
        so we load the correct file.
        First guess:
        1) PCA decomposition.
        2) Mean scaled, other components zero
        The one with the better cstat is kept.
        Then start with 0 components and add 1, 2 components until no improvement
        in AIC/cstat.
        """
        self.id = id
        bkg = ui.get_bkg(self.id)
        hdr = bkg.header
        telescope = hdr.get('TELESCOP', '')
        instrument = hdr.get('INSTRUME', '')
        if telescope == '' and instrument == '':
            raise Exception(
                'ERROR: The TELESCOP/INSTRUME headers are not set in the data file.'
            )
        self.data = bkg.counts
        self.ndata = len(self.data)
        self.ngaussians = 0
        if bkgModelDir is None:
            bkgModelDir = os.path.dirname(__file__)
        style1 = os.path.join(bkgModelDir,
                              ('%s_%s_%d.json' %
                               (telescope, instrument, self.ndata)).lower())
        style2 = os.path.join(bkgModelDir,
                              ('%s_%d.json' % (telescope, self.ndata)).lower())
        if os.path.exists(style1):
            self.load(style1)
        elif os.path.exists(style2):
            self.load(style2)
        else:
            raise Exception(
                'ERROR: Could not load PCA components for this detector (%s %s, %d channels). Try the SingleFitter instead.'
                % (telescope, instrument, self.ndata))
Beispiel #17
0
    def __init__(self, dataids):
        self.datasets = []
        self.tot_excess = None
        self.tot_expo = None

        for dataid in dataids:
            spec = spectral_data(sau.get_data(dataid), sau.get_bkg(dataid))
            self.datasets.append(spec)
        self.bkg = np.concatenate([a.fit_bkg for a in self.datasets])
        self.alpha = np.concatenate([a.fit_alpha for a in self.datasets])
        self.Ttot = np.concatenate([a.fit_Ttot for a in self.datasets])
        self.ONexpo = np.concatenate([a.fit_ONexpo for a in self.datasets])

        for a in self.datasets:
            # Carefull we are assuming that all the spectra have the same binning
            if self.tot_excess is None:
                self.tot_excess = np.zeros_like(a.excess)
            if self.tot_expo is None:
                self.tot_expo = np.zeros_like(a.excess)

            self.tot_excess += a.excess
            self.tot_expo += a.full_expo
Beispiel #18
0
def get_bkg_qq_data(id=None, bkg_id=None):
    """Get data for a quantile-quantile plot of the background data and model.

    *id*
      The dataset id for which to get the data; defaults if unspecified.
    *bkg_id*
      The identifier of the background; defaults if unspecified.
    Returns:
      An ndarray of shape ``(3, npts)``. The first slice is the energy axis in
      keV; the second is the observed values in each bin (counts, or rate, or
      rate per keV, etc.); the third is the corresponding model value in each
      bin.

    The inputs are implicit; the data are obtained from the current state of
    the Sherpa ``ui`` module.

    """
    bdata = ui.get_bkg(id=id, bkg_id=bkg_id)
    kev = bdata.get_x()
    obs_data = bdata.counts
    model_data = ui.get_bkg_model(id=id, bkg_id=bkg_id)(kev)
    return np.vstack((kev, obs_data, model_data))
Beispiel #19
0
    def __init__(self, dataids):
        self.datasets = []
        self.tot_excess = None
        self.tot_expo = None

        for dataid in dataids:
            spec = spectral_data(sau.get_data(dataid), sau.get_bkg(dataid))
            self.datasets.append(spec)
        self.bkg = np.concatenate([a.fit_bkg for a in self.datasets])
        self.alpha = np.concatenate([a.fit_alpha for a in self.datasets])
        self.Ttot = np.concatenate([a.fit_Ttot for a in self.datasets])
        self.ONexpo = np.concatenate([a.fit_ONexpo for a in self.datasets])

        for a in self.datasets:
            # Carefull we are assuming that all the spectra have the same binning
            if self.tot_excess is None:
                self.tot_excess = np.zeros_like(a.excess)
            if self.tot_expo is None:
                self.tot_expo = np.zeros_like(a.excess)

            self.tot_excess += a.excess
            self.tot_expo += a.full_expo
Beispiel #20
0
                    help="Spectra files (*.pi, *.pha)")

args = parser.parse_args()

from sherpa.astro.ui import load_pha, get_rmf, get_arf, get_fit_plot, load_table_model, set_xsabund, set_xsxsect, ignore, notice, set_xlog, set_ylog
from sherpa.astro.ui import xsapec, set_full_model, get_bkg_model, get_bkg_scale, set_stat, get_bkg, group_adapt, set_analysis, calc_stat, get_data, set_model, get_response, get_model, calc_energy_flux

id = 1
filename = args.filenames[0]
elo, ehi = args.energyrange.split(':')
elo, ehi = float(elo), float(ehi)
load_pha(id, filename)
try:
    assert get_rmf(id).energ_lo[0] > 0
    assert get_arf(id).energ_lo[0] > 0
    assert (get_bkg(id).counts > 0).sum() > 0
except:
    traceback.print_exc()
    sys.exit(0)

set_xlog()
set_ylog()
set_stat('cstat')
set_xsabund('wilm')
set_xsxsect('vern')
set_analysis(id, 'ener', 'counts')
ignore(None, elo)
ignore(ehi, None)
notice(elo, ehi)

prefix = filename + '_xagnfitter_out_'
Beispiel #21
0
def renorm(id=None, cpt=None, bkg_id=None, names=None, limscale=1000.0):
    """Change the normalization of a model to match the data.

    The idea is to change the normalization to be a better match to
    the data, so that the search can be quicker. It can be considered
    to be like the `guess` command, but for the normalization. It
    is *only* intended to change the normalization to a value near
    the correct one; it *should not* be used for any sort of
    calculation without first doing a fit. It is also only going to
    give reasonable results for models where the predicted data of a
    model is linearly related to the normalization.

    Parameters
    ----------
    id : None, int, or str
       The data set identifier to use. A value of ``None`` uses the
       default identifier.
    cpt
       If not ``None``, the model component to use. When ``None``, the
       full source expression for the data set is used. There is no
       check that the ``id`` argument matches the component (i.e. that
       the component is included in the source model for the data set)
    bkg_id : None, int
       If not None then change the normalization of the model to the
       given background dataset.
    names : None or array of str
       The parameter names that should be changed (a case-insensitive
       comparison is made, and the name does not include the model
       name). If ``None`` then the default set of
       ``['ampl', 'norm']`` is used.
    limscale : float
       The min and max range of the normalization is set to the
       calculated value divided and multiplied by ``limscale``.
       These limits will be modified to match the hard limits of the
       parameter if they exceed them.

    See Also
    --------
    guess, ignore, notice, set_par

    Notes
    -----
    The normalization is computed so that the predicted model counts
    matches the observed counts for the currently-noticed data range,
    as long as parameter names match the ``names`` argument (or
    ['ampl', 'norm'] if that is ``None``) and the parameter is not
    frozen.

    If no matches are found, then no changes are made. Otherwise, a
    scale factor is created by summing up the data counts and dividing
    this by the model sum over the currently-noticed range. This scale
    factor is divided by the number of matching parameters, and then
    the parameter values are multiplied by this value. If a model
    contains multiple parameters matching the contents of the
    ``names`` argument then each one will be changed by this routine.

    It is not intended for use with source expressions
    created with `set_full_model`, and may not work well with
    image models that use a PSF (one set with `set_psf`).

    Examples
    --------

    Adjust the normalization of the gal component before fitting.

    >>> load_pha('src.pi')
    >>> subtract()
    >>> notice(0.5, 7)
    >>> set_source(xsphabs.galabs * xsapec.gal)
    >>> renorm()

    Change the normalization of a 2D model using the 'src' dataset.
    Only the ``src`` component is changed since the default value for
    the ``names`` parameter - that is ['ampl', 'norm'] - does not
    match the normalization parameter of the `const2d` model.

    >>> load_image('src', 'img.fits')
    >>> set_source('src', gauss2d.src + const2d.bgnd)
    >>> renorm('src')

    The names parameter is set so that both components are adjusted,
    and each component is assumed to contribute half the signal.

    >>> load_image(12, 'img.fits')
    >>> notice2d_id(12, 'srcfit.reg')
    >>> set_source(12, gauss2d.src12 + const2d.bgnd12)
    >>> renorm(12, names=['ampl', 'c0'])

    Change the minimum and maximum values of the normalization
    parameter to be the calculated value divided by and multiplied by
    1e4 respectively (these changes are made to the soft limits).

    >>> renorm(limscale=1e4)

    """

    if names is None:
        matches = ['ampl', 'norm']
    elif names == []:
        raise ArgumentErr('bad', 'names argument', '[]')
    else:
        matches = [n.lower() for n in names]

    if bkg_id is None:
        d = ui.get_data(id=id)
        m = ui.get_model(id=id)
    else:
        d = ui.get_bkg(id=id, bkg_id=id)
        m = ui.get_bkg_model(id=id, bkg_id=bkg_id)

    if cpt is not None:
        # In this case the get_[bkg_]model call is not needed above,
        # but leave in as it at least ensures there's a model defined
        # for the data set.
        m = cpt

    pars = [p for p in m.pars if p.name.lower() in matches and not p.frozen]
    npars = len(pars)
    if npars == 0:
        wmsg = "no thawed parameters found matching: {}".format(
            ", ".join(matches))
        warn(wmsg)
        return

    yd = d.get_dep(filter=True).sum()
    ym = d.eval_model_to_fit(m).sum()

    # argh; these are numpy floats, and they do not throw a
    # ZeroDivisionError, rather you get a RuntimeWarning message.
    # So explicitly convert to Python float.
    #
    try:
        scale = float(yd) / float(ym) / npars
    except ZeroDivisionError:
        error("model sum evaluated to 0; no re-scaling attempted")
        return

    for p in pars:
        newval = p.val * scale
        newmin = newval / limscale
        newmax = newval * limscale

        # Could do the limit/range checks and then call set_par,
        # but only do so if there's a problem.
        #
        try:
            ui.set_par(p, val=newval, min=newmin, max=newmax)

        except ParameterErr:
            # The following is not guaranteed to catch all cases;
            # e.g if the new value is outside the hard limits.
            #
            minflag = newmin < p.hard_min
            maxflag = newmax > p.hard_max
            if minflag:
                newmin = p.hard_min
            if maxflag:
                newmax = p.hard_max

            ui.set_par(p, val=newval, min=newmin, max=newmax)

            # provide informational message after changing the
            # parameter
            if minflag and maxflag:
                reason = "to hard min and max limits"
            elif minflag:
                reason = "to the hard minimum limit"
            elif maxflag:
                reason = "to the hard maximum limit"
            else:
                # this should be impossible
                reason = "for an unknown reason"

            info("Parameter {} is restricted ".format(p.fullname) + reason)
Beispiel #22
0
def test_fake_pha_basic(id, has_bkg, clean_astro_ui):
    """No background.

    See also test_fake_pha_add_background

    For simplicity we use perfect responses.

    A background dataset can be added, but it should
    not be used in the simulation.
    """

    channels = np.arange(1, 4, dtype=np.int16)
    counts = np.ones(3, dtype=np.int16)
    bcounts = 100 * counts

    ui.load_arrays(id, channels, counts, ui.DataPHA)
    ui.set_exposure(id, 100)

    if has_bkg:
        bkg = ui.DataPHA('bkg', channels, bcounts, exposure=200, backscal=0.4)
        ui.set_bkg(id, bkg, bkg_id='faked-bkg')

    ebins = np.asarray([1.1, 1.2, 1.4, 1.6])
    elo = ebins[:-1]
    ehi = ebins[1:]
    arf = ui.create_arf(elo, ehi)
    rmf = ui.create_rmf(elo, ehi, e_min=elo, e_max=ehi)

    mdl = ui.create_model_component('const1d', 'mdl')
    mdl.c0 = 2
    ui.set_source(id, mdl)

    ui.fake_pha(id, arf, rmf, 1000.0)

    faked = ui.get_data(id)
    assert faked.exposure == pytest.approx(1000.0)
    assert (faked.channel == channels).all()

    assert faked.name == 'faked'
    assert faked.get_arf().name == 'test-arf'
    assert faked.get_rmf().name == 'delta-rmf'

    if has_bkg and id is not None:
        assert faked.background_ids == ['faked-bkg']
        bkg = ui.get_bkg(id, 'faked-bkg')
        assert bkg.name == 'bkg'
        assert bkg.counts == pytest.approx(bcounts)
        assert bkg.exposure == pytest.approx(200)

    else:
        assert faked.background_ids == []

    # check we've faked counts (the scaling is such that it is
    # very improbable that this condition will fail)
    assert (faked.counts > counts).all()

    # For reference the predicted source signal is
    #    [200, 400, 400]
    #
    # What we'd like to say is that the predicted counts are
    # similar, but this is not easy to do. What we can try
    # is summing the counts (to average over the randomness)
    # and then a simple check
    #
    assert faked.counts.sum() > 200
Beispiel #23
0
def test_load_pha2(loader, id0, ids, make_data_path, caplog, clean_astro_ui):
    """Basic test that a pha2 file can be read in."""

    basename = '3c120_pha2'

    orig_ids = ui.list_data_ids()
    assert orig_ids == []

    # The file is stored gzip-encoded
    infile = make_data_path(basename)
    if id0 is None:
        loader(infile)
    else:
        loader(id0, infile)

    pha_ids = ui.list_data_ids()
    assert len(pha_ids) == 12

    # list_data_ids doesn't guarantee an order
    # Do an explicit check, rather than via a set (testing
    # all at once) to make it easier to see what is missing
    # (if any)
    #
    for i in ids:
        assert i in pha_ids

        d = ui.get_data(i)
        validate_pha(d, bkg=True)

        # There is no indication of what "part" this data set
        # represents in the file name
        #
        assert d.name == infile

        b = ui.get_bkg(i, bkg_id=1)
        validate_pha(b, bkg=False)
        assert b.name == infile

        b = ui.get_bkg(i, bkg_id=2)
        validate_pha(b, bkg=False)
        assert b.name == infile

    # Test Log messages
    msg_one = "systematic errors were not found in file '{}'".format(infile)

    # Editors can remove trailing spaces from lines, so split into
    # separate lines so the space after the file name is included.
    # Perhaps this space should be removed from the warning message?
    #
    msg_two = "statistical errors were found in file '{}' \n".format(infile) + \
              "but not used; to use them, re-read with use_errors=True"

    msg_three = "read background_up into a dataset from file {}".format(infile)
    msg_four = "read background_down into a dataset from file {}".format(infile)

    msg_five = "Multiple data sets have been input: " + \
               "{}-{}".format(ids[0], ids[11])

    assert caplog.record_tuples == [
        ('sherpa.astro.io', logging.WARNING, msg_one),
        ('sherpa.astro.io', logging.INFO, msg_two),
        ('sherpa.astro.io', logging.INFO, msg_three),
        ('sherpa.astro.io', logging.INFO, msg_four),
        ('sherpa.astro.ui.utils', logging.INFO, msg_five),
    ]
Beispiel #24
0
    def get_plot_arrays(self, data_list):
        """Construct arrays of model count rates."""

        sample_model = sau.get_model(data_list[0].name)
        self.get_binning(
            sample_model)  # do this only once assuming that true energy
        # binning does not change from run to run
        obs_exc = np.zeros_like(self.bcenter)
        obs_err = np.zeros_like(self.bcenter)
        tot_on = np.zeros_like(self.bcenter)
        tot_off = np.zeros_like(self.bcenter)
        mod_cnts = np.zeros_like(self.bcenter)
        exp_tot = np.zeros_like(self.etrue_center)
        mod_tot = np.zeros_like(self.etrue_center)

        for dat in data_list:
            datid = dat.name
            exposure = dat.data.exposure
            on_cnt_rate = dat.data.get_y()

            c_bkg = sau.get_bkg(datid)
            bg_cnt_rate = c_bkg.get_y()
            backscal = c_bkg.get_backscal()

            c_mod = sau.get_model(datid)
            arf = c_mod.arf
            arf_vals = arf.get_y()

            # Excess
            bw_expo = self.b_width * exposure
            on_cnts = on_cnt_rate * bw_expo
            off_cnts = bg_cnt_rate * bw_expo / backscal
            c_exc = on_cnts - off_cnts  # excess counts
            c_exc_err2 = on_cnts + off_cnts / backscal  # errors

            # model counts
            c_modcnts = c_mod.calc(self.para,
                                   2.)  # second parameter is dummy...

            # Consider only noticed bins
            valid = dat.data.get_noticed_channels().astype(int)
            valid -= np.ones_like(valid)  # Channel id's start at 1!

            obs_exc[valid] = obs_exc[valid] + c_exc[
                valid]  # Total excess in noticed bins
            obs_err[valid] = obs_err[valid] + c_exc_err2[
                valid]  # Total error square
            tot_on[valid] = tot_on[valid] + on_cnts[valid]
            tot_off[valid] = tot_off[valid] + off_cnts[valid]
            mod_cnts[valid] = mod_cnts[valid] + c_modcnts[
                valid]  # Total noticed model counts
            valid_arf = self.ener_map[valid].sum(
                0) > 0  # valid pixels in true energy

            self.get_mod_val(self.totmodel, self.etrue_center)

            # Add run exposure*area*model for valid true energy bins only
            exp_tot[valid_arf] = exp_tot[valid_arf] + \
                                 arf_vals[valid_arf] * self.mod_val[valid_arf] * exposure
            ''' Not used, may be useful to produce upper limits
            #significance per bin:
            signis = significance(n_observed=tot_on, mu_background=tot_off, method='lima')
            some_significant = False
            #makeUL = []
            for i,signi in enumerate(signis):
            if signi<2:
            print('WARNING: Energy bin from', round(binmin[i]/1e9,1), 'to', \
            round(binmax[i]/1e9,1), 'TeV has', round(signi,2), 'sigma only.')
            print('...may want to convert to upper limit') # NOT YET IMPLEMENTED
            continue
            #makeUL.append(True)
            if np.isinf(signi) or np.isnan(signi): #isinf when Non = Noff = 0?
            if some_significant: # otherwise we are probably below threshold
            print('WARNING: Energy bin from', round(binmin[i]/1e9,1), 'to', \
            round(binmax[i]/1e9,1), 'TeV contains no events.')
            continue
            else:
            some_significant = True
            '''

        # compute average exposure (time*area) in each measured energy bin
        mean_expo = np.zeros(obs_exc.shape)
        for i in range(obs_exc.shape[0]):
            mean_expo[i] = exp_tot[self.ener_map[i, :]].sum() / \
                           self.mod_val[self.ener_map[i, :]].sum()
            bw_meanexpo = self.b_width * mean_expo

        # get flux and error per cm^2/s/TeV
        self.mean_flux = 1e9 * obs_exc / bw_meanexpo
        self.mean_flux[np.isnan(self.mean_flux)] = 0

        self.mean_err = 1e9 * np.sqrt(
            obs_err) / bw_meanexpo  # mean_flux/signis

        # Compute residuals where model counts >0
        self.resid = (-mod_cnts + obs_exc) / np.sqrt(obs_err)

        # Model spectral points
        self.bcenter /= 1e9  # keV? Nope, real high energy...
Beispiel #25
0
    def get_plot_arrays(self, data_list):
        """Construct arrays of model count rates."""

        sample_model = sau.get_model(data_list[0].name)
        self.get_binning(sample_model)  # do this only once assuming that true energy
        # binning does not change from run to run
        obs_exc = np.zeros_like(self.bcenter)
        obs_err = np.zeros_like(self.bcenter)
        tot_on = np.zeros_like(self.bcenter)
        tot_off = np.zeros_like(self.bcenter)
        mod_cnts = np.zeros_like(self.bcenter)
        exp_tot = np.zeros_like(self.etrue_center)
        mod_tot = np.zeros_like(self.etrue_center)

        for dat in data_list:
            datid = dat.name
            exposure = dat.data.exposure
            on_cnt_rate = dat.data.get_y()

            c_bkg = sau.get_bkg(datid)
            bg_cnt_rate = c_bkg.get_y()
            backscal = c_bkg.get_backscal()

            c_mod = sau.get_model(datid)
            arf = c_mod.arf
            arf_vals = arf.get_y()

            # Excess
            bw_expo = self.b_width * exposure
            on_cnts = on_cnt_rate * bw_expo
            off_cnts = bg_cnt_rate * bw_expo / backscal
            c_exc = on_cnts - off_cnts  # excess counts
            c_exc_err2 = on_cnts + off_cnts / backscal  # errors

            # model counts
            c_modcnts = c_mod.calc(self.para, 2.)  # second parameter is dummy...

            # Consider only noticed bins
            valid = dat.data.get_noticed_channels().astype(int)
            valid -= np.ones_like(valid)  # Channel id's start at 1!

            obs_exc[valid] = obs_exc[valid] + c_exc[valid]  # Total excess in noticed bins
            obs_err[valid] = obs_err[valid] + c_exc_err2[valid]  # Total error square
            tot_on[valid] = tot_on[valid] + on_cnts[valid]
            tot_off[valid] = tot_off[valid] + off_cnts[valid]
            mod_cnts[valid] = mod_cnts[valid] + c_modcnts[valid]  # Total noticed model counts
            valid_arf = self.ener_map[valid].sum(0) > 0  # valid pixels in true energy

            self.get_mod_val(self.totmodel, self.etrue_center)

            # Add run exposure*area*model for valid true energy bins only
            exp_tot[valid_arf] = exp_tot[valid_arf] + \
                                 arf_vals[valid_arf] * self.mod_val[valid_arf] * exposure

            ''' Not used, may be useful to produce upper limits
            #significance per bin:
            signis = significance(n_observed=tot_on, mu_background=tot_off, method='lima')
            some_significant = False
            #makeUL = []
            for i,signi in enumerate(signis):
            if signi<2:
            print('WARNING: Energy bin from', round(binmin[i]/1e9,1), 'to', \
            round(binmax[i]/1e9,1), 'TeV has', round(signi,2), 'sigma only.')
            print('...may want to convert to upper limit') # NOT YET IMPLEMENTED
            continue
            #makeUL.append(True)
            if np.isinf(signi) or np.isnan(signi): #isinf when Non = Noff = 0?
            if some_significant: # otherwise we are probably below threshold
            print('WARNING: Energy bin from', round(binmin[i]/1e9,1), 'to', \
            round(binmax[i]/1e9,1), 'TeV contains no events.')
            continue
            else:
            some_significant = True
            '''

        # compute average exposure (time*area) in each measured energy bin
        mean_expo = np.zeros(obs_exc.shape)
        for i in range(obs_exc.shape[0]):
            mean_expo[i] = exp_tot[self.ener_map[i, :]].sum() / \
                           self.mod_val[self.ener_map[i, :]].sum()
            bw_meanexpo = self.b_width * mean_expo

        # get flux and error per cm^2/s/TeV
        self.mean_flux = 1e9 * obs_exc / bw_meanexpo
        self.mean_flux[np.isnan(self.mean_flux)] = 0

        self.mean_err = 1e9 * np.sqrt(obs_err) / bw_meanexpo  # mean_flux/signis

        # Compute residuals where model counts >0
        self.resid = (-mod_cnts + obs_exc) / np.sqrt(obs_err)

        # Model spectral points
        self.bcenter /= 1e9  # keV? Nope, real high energy...
Beispiel #26
0
def test_fake_pha_issue_1209(make_data_path, clean_astro_ui, tmp_path):
    """Check issue #1209.

    See also sherpa/astro/tests/test_fake_pha.py for

        test_fake_pha_has_valid_ogip_keywords_all_fake
        test_fake_pha_has_valid_ogip_keywords_from_real

    The session fake_pha includes quite a lot of logic which
    makes that the test case for #1209 should be done at this
    level, to complement the tests mentioned above.

    """

    infile = make_data_path("acisf01575_001N001_r0085_pha3.fits.gz")
    ui.load_pha(infile)
    ui.set_source(ui.powlaw1d.pl)
    pl.gamma = 1.8
    pl.ampl = 1e-4

    arf = ui.get_arf()
    rmf = ui.get_rmf()

    # check the TOTCTS setting in the input file
    d1 = ui.get_data()
    assert d1.header["TOTCTS"] == 855
    assert d1.counts.sum() == 855

    ui.set_source("newid", pl)
    ui.fake_pha("newid",
                exposure=ui.get_exposure(),
                bkg=ui.get_bkg(),
                rmf=rmf,
                arf=arf,
                backscal=ui.get_backscal())
    stat = ui.calc_stat("newid")

    outfile = tmp_path / "sim.pha"
    ui.save_pha("newid", str(outfile))

    ui.load_pha(3, str(outfile))
    d3 = ui.get_data(3)
    assert isinstance(d3, ui.DataPHA)

    assert d3.exposure == pytest.approx(37664.157219191)
    assert d3.areascal == pytest.approx(1.0)
    assert d3.backscal == pytest.approx(2.2426552620567e-06)

    assert d3.background_ids == []
    assert d3.response_ids == []

    # check the header
    hdr = d3.header
    assert hdr["TELESCOP"] == "CHANDRA"
    assert hdr["INSTRUME"] == "ACIS"
    assert hdr["FILTER"] == "none"

    # check some other values related to #1209 and #488 (OGIP)
    #
    assert "TOTCTS" not in hdr
    assert hdr["GROUPING"] == 0
    assert hdr["QUALITY"] == 0
    assert hdr["SYS_ERR"] == 0

    # We should get the same value - the responses are not written
    # to the temporary directory and so we need to load them
    # directly.
    #
    ui.set_rmf(3, rmf)
    ui.set_arf(3, arf)
    ui.set_source(3, pl)
    assert ui.calc_stat(3) == pytest.approx(stat)