Example #1
0
def example_pha_data():
    """Create an example data set."""

    etime = 1201.0
    d = ui.DataPHA('example',
                   _data_chan.copy(),
                   _data_counts.copy(),
                   exposure=etime,
                   backscal=0.1)

    a = ui.create_arf(_energies[:-1].copy(),
                      _energies[1:].copy(),
                      specresp=_arf.copy(),
                      exposure=etime)

    r = ui.create_rmf(_energies[:-1].copy(),
                      _energies[1:].copy(),
                      e_min=_energies[:-1].copy(),
                      e_max=_energies[1:].copy(),
                      startchan=1,
                      fname=None)

    d.set_arf(a)
    d.set_rmf(r)
    return d
Example #2
0
def test_fake_pha_background_model(clean_astro_ui, reset_seed):
    """Check we can add a background component.

    See also test_fake_pha_basic.

    For simplicity we use perfect responses.
    """

    np.random.seed(27347)

    id = 'qwerty'
    channels = np.arange(1, 4, dtype=np.int16)
    counts = np.ones(3, dtype=np.int16)
    bcounts = 100 * counts

    ui.load_arrays(id, channels, counts, ui.DataPHA)
    ui.set_exposure(id, 100)
    ui.set_backscal(id, 0.1)

    bkg = ui.DataPHA('bkg', channels, bcounts, exposure=200, backscal=0.4)

    ebins = np.asarray([1.1, 1.2, 1.4, 1.6])
    elo = ebins[:-1]
    ehi = ebins[1:]
    arf = ui.create_arf(elo, ehi)
    rmf = ui.create_rmf(elo, ehi, e_min=elo, e_max=ehi)

    mdl = ui.create_model_component('const1d', 'mdl')
    mdl.c0 = 0
    bkgmdl = ui.create_model_component('const1d', 'mdl')
    bkgmdl.c0 = 2
    ui.set_source(id, mdl)
    ui.set_bkg(id, bkg)
    ui.set_bkg_source(id, bkgmdl)
    ui.set_arf(id, arf, bkg_id=1)
    ui.set_rmf(id, rmf, bkg_id=1)

    ui.fake_pha(id, arf, rmf, 1000.0, bkg='model')

    faked = ui.get_data(id)
    assert faked.exposure == pytest.approx(1000.0)
    assert (faked.channel == channels).all()

    # check we've faked counts (the scaling is such that it is
    # very improbable that this condition will fail)
    assert (faked.counts > counts).all()

    # For reference the predicted source signal is
    #    [200, 400, 400]
    # and the background signal is
    #    [125, 125, 125]
    # so, even with randomly drawn values, the following
    # checks should be robust.
    #
    predicted_by_source = 1000 * mdl(elo, ehi)
    predicted_by_bkg = (1000 / 200) * (0.1 / 0.4) * bcounts
    assert (faked.counts > predicted_by_source).all()
    assert (faked.counts > predicted_by_bkg).all()
Example #3
0
def test_calc_flux_pha_density_bin_edges(clean_astro_ui):
    """What happens when filter edges partially overlap bins? flux density

    Later tests may also cover this condition, but here we use
    faked data that is made to make the behavior "obvious".
    """

    chans = np.arange(1, 11, 1, dtype=np.int)
    counts = np.zeros(chans.size, dtype=np.int)

    # "perfect" response
    energies = np.arange(1, 12, 1)
    elo, ehi = energies[:-1], energies[1:]
    flat = np.ones(chans.size, dtype=np.int)

    d = ui.DataPHA('example', chans, counts)
    arf = ui.create_arf(elo, ehi, flat)
    rmf = ui.create_rmf(elo,
                        ehi,
                        e_min=elo,
                        e_max=elo,
                        startchan=1,
                        fname=None)

    d.set_arf(arf)
    d.set_rmf(rmf)
    ui.set_data(1, d)

    ui.set_source(ui.powlaw1d.pl)
    pl.ampl = 1e-4
    pl.gamma = 1.7

    # choose an energy that is not equal to the center of the bin
    # just to check how this is handled
    #
    pdens = ui.calc_photon_flux(2.6)
    edens = ui.calc_energy_flux(2.6)

    enscale = sherpa.astro.utils._charge_e

    # Evaluate the model over the bin 2-3 keV; since the grid
    # has a width of 1 keV we do not need to divide by the bin
    # width when calculating the density.
    #
    ymdl = pl([2], [3])
    expected_pdens = ymdl.sum()
    expected_edens = enscale * 2.5 * expected_pdens

    # Prior to fixing #619, Sherpa returns 0 for both densities
    #
    assert pdens == pytest.approx(expected_pdens)

    # check against log as values ~ 5e-13
    edens = np.log10(edens)
    expected_edens = np.log10(expected_edens)
    assert edens == pytest.approx(expected_edens)
def test_get_axes_datapha_rmf():
    """RMF only"""

    ui.load_arrays(1, [1, 2, 3], [1, 2, 3], ui.DataPHA)

    ebins = np.asarray([0.1, 0.2, 0.4, 0.8])
    elo = ebins[:-1]
    ehi = ebins[1:]
    ui.set_rmf(ui.create_rmf(elo, ehi, e_min=elo, e_max=ehi))

    ax = ui.get_axes()
    assert len(ax) == 2
    assert ax[0] == pytest.approx([0.1, 0.2, 0.4])
    assert ax[1] == pytest.approx([0.2, 0.4, 0.8])
Example #5
0
def test_fake_pha_basic_arfrmf_set_in_advance(clean_astro_ui, reset_seed):
    """Similar to test_fake_pha_basic but instead of passing in
    the RMF, we set it before. The result should be the same, so we
    don't have ot go through all the parameterization of that test.
    """

    np.random.seed(20348)

    channels = np.arange(1, 4, dtype=np.int16)
    counts = np.ones(3, dtype=np.int16)

    ui.load_arrays('id123', channels, counts, ui.DataPHA)
    ui.set_exposure('id123', 100)

    ebins = np.asarray([1.1, 1.2, 1.4, 1.6])
    elo = ebins[:-1]
    ehi = ebins[1:]
    arf = ui.create_arf(elo, ehi)
    rmf = ui.create_rmf(elo, ehi, e_min=elo, e_max=ehi)
    ui.set_rmf('id123', rmf)
    ui.set_arf('id123', arf)

    mdl = ui.create_model_component('const1d', 'mdl')
    mdl.c0 = 2
    ui.set_source('id123', mdl)

    ui.fake_pha('id123', None, None, 1000.0)

    faked = ui.get_data('id123')
    assert faked.exposure == pytest.approx(1000.0)
    assert (faked.channel == channels).all()

    assert faked.name == 'faked'
    assert faked.background_ids == []

    # check we've faked counts (the scaling is such that it is
    # very improbable that this condition will fail)
    assert (faked.counts > counts).all()

    # For reference the predicted source signal is
    #    [200, 400, 400]
    #
    # What we'd like to say is that the predicted counts are
    # similar, but this is not easy to do. What we can try
    # is summing the counts (to average over the randomness)
    # and then a simple check
    #
    assert (faked.counts.sum() > 200) and (faked.counts.sum() < 3000)
    # This is more likely to fail by chance, but still very unlikely
    assert faked.counts[1] > faked.counts[0]
Example #6
0
def test_fake_pha_no_data(id, clean_astro_ui, reset_seed):
    """What happens if there is no data loaded at the id?
    """

    np.random.seed(21347)

    ebins = np.asarray([1.1, 1.2, 1.4, 1.6])
    elo = ebins[:-1]
    ehi = ebins[1:]
    arf = ui.create_arf(elo, ehi)
    rmf = ui.create_rmf(elo, ehi, e_min=elo, e_max=ehi)

    mdl = ui.create_model_component('const1d', 'mdl')
    mdl.c0 = 2
    ui.set_source(id, mdl)

    ui.fake_pha(id, arf, rmf, 1000.0)

    # We don't really check anything sensible with the counts.
    # It is unlikely the simulated counts will be <= 1.
    #
    # For reference the predicted source signal is
    #    [200, 400, 400]
    #
    channels = np.arange(1, 4)
    counts = [1, 1, 1]

    faked = ui.get_data(id)
    assert faked.exposure == pytest.approx(1000.0)
    assert (faked.channel == channels).all()

    assert faked.name == 'faked'
    assert faked.get_arf().name == 'test-arf'
    assert faked.get_rmf().name == 'delta-rmf'

    assert faked.background_ids == []

    # check we've faked counts (the scaling is such that it is
    # very improbable that this condition will fail)
    assert (faked.counts > counts).all()

    # What we'd like to say is that the predicted counts are
    # similar, but this is not easy to do. What we can try
    # is summing the counts (to average over the randomness)
    # and then a simple check
    #
    assert (faked.counts.sum() > 200) and (faked.counts.sum() < 3000)
    # This is more likely to fail by chance, but still very unlikely
    assert faked.counts[1] > faked.counts[0]
def test_grouped_pha_all_bad_response(arf, rmf, chantype, exp_counts, exp_xlo,
                                      exp_xhi, lo1, hi1, lo2, hi2,
                                      clean_astro_ui):
    """Helpdesk ticket: low-count data had no valid bins after grouping #790

    A simple PHA dataset is created, which has no "good" grouped data
    (1 group, but with a quality of 2). Several checks are made to
    ensure we can filter/notice/plot the data even when it is empty.

    Checks are done for
      - arf-only
      - rmf-only
      - arf+rmf
    analysis in case there's a difference in the code paths

    """

    chans = numpy.arange(1, 6, dtype=numpy.int16)
    counts = numpy.asarray([0, 1, 2, 0, 1], dtype=numpy.int16)
    grouping = numpy.asarray([1, -1, -1, -1, -1], dtype=numpy.int16)
    quality = numpy.asarray([2, 2, 2, 2, 2], dtype=numpy.int16)

    dset = ui.DataPHA('low', chans, counts, grouping=grouping, quality=quality)
    ui.set_data(dset)

    egrid = numpy.asarray([0.1, 0.2, 0.3, 0.4, 0.5, 0.6])
    elo = egrid[:-1]
    ehi = egrid[1:]

    # it is required that at least one of arf or rmf is set but this
    # is not enforced
    #
    if arf:
        ui.set_arf(ui.create_arf(elo, ehi))

    if rmf:
        # NOTE: need to set e_min/max otherwise get a 'noenergybins'
        #       error from sherpa.astro.data.DataPHA._get_ebins
        #
        ui.set_rmf(ui.create_rmf(elo, ehi, e_min=elo, e_max=ehi))

    # plot units depend on analysis type;
    #
    ui.set_analysis(chantype)

    # Run tests
    check_bad_grouping(exp_xlo, exp_xhi, exp_counts, lo1, hi1, lo2, hi2)
Example #8
0
def test_fake_pha_incompatible_rmf(id, clean_astro_ui):
    """Check we error out if RMF is wrong size."""

    channels = np.arange(1, 4, dtype=np.int16)
    counts = np.ones(3, dtype=np.int16)

    ui.load_arrays(id, channels, counts, ui.DataPHA)

    ebins = np.asarray([1.1, 1.2, 1.4, 1.6, 1.8, 2.0])
    elo = ebins[:-1]
    ehi = ebins[1:]
    rmf = ui.create_rmf(elo, ehi, e_min=elo, e_max=ehi)

    with pytest.raises(DataErr) as exc:
        ui.fake_pha(id, None, rmf, 1000.0)

    id = 1 if id is None else id
    emsg = f"RMF 'delta-rmf' is incompatible with PHA dataset '{id}'"
    assert str(exc.value) == emsg
Example #9
0
def test_fake_pha_missing_arf(id, clean_astro_ui, tmp_path):
    """Check we error out if ARF is not valid."""

    channels = np.arange(1, 4, dtype=np.int16)
    counts = np.ones(3, dtype=np.int16)

    ui.load_arrays(id, channels, counts, ui.DataPHA)

    ebins = np.asarray([1.1, 1.2, 1.4, 1.6])
    elo = ebins[:-1]
    ehi = ebins[1:]
    rmf = ui.create_rmf(elo, ehi, e_min=elo, e_max=ehi)

    arf = tmp_path / 'arf'

    with pytest.raises(IOErr) as exc:
        ui.fake_pha(id, str(arf), rmf, 1000.0)

    assert str(exc.value) == f"file '{arf}' not found"
Example #10
0
    def test_cache_copy(self):
        # fake up a PHA data set
        chans = numpy.arange(1, 11, dtype=numpy.int8)
        counts = numpy.ones(chans.size)

        # bin width is not 0.1 but something slightly different
        ebins = numpy.linspace(0.1, 1.2, num=chans.size + 1)
        elo = ebins[:-1]
        ehi = ebins[1:]

        dset = ui.DataPHA('test', chans, counts)

        # make sure ARF isn't 1 to make sure it's being applied
        arf = ui.create_arf(elo, ehi, specresp=0.7 * numpy.ones(chans.size))

        rmf = ui.create_rmf(elo, ehi, e_min=elo, e_max=ehi)

        ui.set_data(1, dset)
        ui.set_arf(1, arf)
        ui.set_rmf(1, rmf)

        ui.set_source(ui.const1d.mdl)

        # again not 1
        mdl.c0 = 8


        # Copy the values from the plot structures, since get_xxx_plot
        # returns the same object so m1.y == m2.y will not note a difference.
        #

        d1y = ui.get_data_plot().y.copy()
        m1y = ui.get_model_plot().y.copy()
        s1y = ui.get_source_plot().y.copy()

        d2y = ui.get_data_plot().y.copy()
        m2y = ui.get_model_plot().y.copy()
        s2y = ui.get_source_plot().y.copy()
        rtol = 1.0e-4
        atol = 1.0e-4
        numpy.testing.assert_allclose(d1y, d2y, rtol, atol)
        numpy.testing.assert_allclose(m1y, m2y, rtol, atol)
        numpy.testing.assert_allclose(s1y, s2y, rtol, atol)
Example #11
0
def test_fake_pha_basic(id, has_bkg, clean_astro_ui):
    """No background.

    See also test_fake_pha_add_background

    For simplicity we use perfect responses.

    A background dataset can be added, but it should
    not be used in the simulation.
    """

    channels = np.arange(1, 4, dtype=np.int16)
    counts = np.ones(3, dtype=np.int16)
    bcounts = 100 * counts

    ui.load_arrays(id, channels, counts, ui.DataPHA)
    ui.set_exposure(id, 100)

    if has_bkg:
        bkg = ui.DataPHA('bkg', channels, bcounts, exposure=200, backscal=0.4)
        ui.set_bkg(id, bkg, bkg_id='faked-bkg')

    ebins = np.asarray([1.1, 1.2, 1.4, 1.6])
    elo = ebins[:-1]
    ehi = ebins[1:]
    arf = ui.create_arf(elo, ehi)
    rmf = ui.create_rmf(elo, ehi, e_min=elo, e_max=ehi)

    mdl = ui.create_model_component('const1d', 'mdl')
    mdl.c0 = 2
    ui.set_source(id, mdl)

    ui.fake_pha(id, arf, rmf, 1000.0)

    faked = ui.get_data(id)
    assert faked.exposure == pytest.approx(1000.0)
    assert (faked.channel == channels).all()

    assert faked.name == 'faked'
    assert faked.get_arf().name == 'test-arf'
    assert faked.get_rmf().name == 'delta-rmf'

    if has_bkg and id is not None:
        assert faked.background_ids == ['faked-bkg']
        bkg = ui.get_bkg(id, 'faked-bkg')
        assert bkg.name == 'bkg'
        assert bkg.counts == pytest.approx(bcounts)
        assert bkg.exposure == pytest.approx(200)

    else:
        assert faked.background_ids == []

    # check we've faked counts (the scaling is such that it is
    # very improbable that this condition will fail)
    assert (faked.counts > counts).all()

    # For reference the predicted source signal is
    #    [200, 400, 400]
    #
    # What we'd like to say is that the predicted counts are
    # similar, but this is not easy to do. What we can try
    # is summing the counts (to average over the randomness)
    # and then a simple check
    #
    assert faked.counts.sum() > 200
Example #12
0
def test_calc_flux_pha_bin_edges(clean_astro_ui):
    """What happens when filter edges partially overlap bins?

    Later tests may also cover this condition, but here we use
    faked data that is made to make the behavior "obvious".
    """

    chans = np.arange(1, 11, 1, dtype=np.int)
    counts = np.zeros(chans.size, dtype=np.int)

    # "perfect" response
    energies = np.arange(1, 12, 1)
    elo, ehi = energies[:-1], energies[1:]
    flat = np.ones(chans.size, dtype=np.int)

    d = ui.DataPHA('example', chans, counts)
    arf = ui.create_arf(elo, ehi, flat)
    rmf = ui.create_rmf(elo,
                        ehi,
                        e_min=elo,
                        e_max=elo,
                        startchan=1,
                        fname=None)

    d.set_arf(arf)
    d.set_rmf(rmf)
    ui.set_data(1, d)

    ui.set_source(ui.powlaw1d.pl)
    pl.ampl = 1e-4
    pl.gamma = 1.7

    # Evaluate the model on the energy grid
    ymdl = pl(elo, ehi)

    pflux = ui.calc_photon_flux(2.6, 7.8)
    eflux = ui.calc_energy_flux(2.6, 7.8)

    enscale = sherpa.astro.utils._charge_e

    # Left in as notes:
    #
    # This are the true values. Given that the edge bins (should be)
    # using linear interpolation, how close do we get to this?
    #
    # gterm1 = 1.0 - 1.7
    # gterm2 = 2.0 - 1.7
    # true_pflux = 1e-4 * (7.8**gterm1 - 2.6**gterm1) / gterm1
    # true_eflux = enscale * 1e-4 * (7.8**gterm2 - 2.6**gterm2) / gterm2
    #
    # Comparing the linear interpolation scheme to that used by
    # Sherpa prior to fixing #619, I find
    #
    # Photon fluxes:
    #     linear_interp / true_pflux = 1.042251
    #     sherpa        / true_pflux = 0.837522
    #
    # Energy fluxes:
    #     linear_interp / true_eflux = 1.017872
    #     sherpa        / true_eflux = 0.920759
    #
    scale = np.asarray([0.0, 0.4, 1.0, 1.0, 1.0, 1.0, 0.8, 0, 0.0, 0.0])
    expected_pflux = (ymdl * scale).sum()

    emid = enscale * (elo + ehi) / 2
    expected_eflux = (emid * ymdl * scale).sum()

    assert pflux == pytest.approx(expected_pflux)

    # check against log as values ~ 3e-13
    eflux = np.log10(eflux)
    expected_eflux = np.log10(expected_eflux)
    assert eflux == pytest.approx(expected_eflux)