Exemple #1
0
def test_source_component_arbitrary_grid():
    ui = Session()
    model = Const1D('c')
    model.c0 = 10

    def tst(x, y, re_x, yy):
        ui.load_arrays(1, x, y)
        regrid_model = model.regrid(re_x)
        ui.plot_source_component(regrid_model)
        numpy.testing.assert_array_equal(ui._compsrcplot.x, x)
        numpy.testing.assert_array_almost_equal(ui._compsrcplot.y, yy)

    x = [1, 2, 3]
    y = [1, 2, 3]
    re_x = [10, 20, 30]
    tst(x, y, re_x, [0, 0, 0])

    x = numpy.linspace(1, 10, 10)
    y = x
    re_x = numpy.linspace(5, 15, 15)
    tst(x, y, re_x, [0, 0, 0, 0, 10, 10, 10, 10, 10, 10])

    re_x = numpy.linspace(1, 5, 15)
    tst(x, y, re_x, [10, 10, 10, 10, 10, 0, 0, 0, 0, 0])

    re_x = numpy.linspace(3, 5, 15)
    tst(x, y, re_x, [0, 0, 10, 10, 10, 0, 0, 0, 0, 0])
Exemple #2
0
def test_source_component_arbitrary_grid_int():
    from sherpa.astro.ui.utils import Session
    from sherpa.models import Const1D
    from sherpa.data import Data1DInt

    ui = Session()

    x = numpy.array([1, 2, 3]), numpy.array([2, 3, 4])
    y = [1.5, 2.5, 3.5]
    re_x = numpy.array([10, 20, 30]), numpy.array([20, 30, 40])

    ui.load_arrays(1, x[0], x[1], y, Data1DInt)
    model = Const1D('c')
    model.c0 = 10

    regrid_model = model.regrid(*re_x)

    with pytest.warns(UserWarning):
        ui.plot_source_component(regrid_model)

    x_points = (x[0] + x[1]) / 2
    re_x_points = (re_x[0] + re_x[1]) / 2
    points = numpy.concatenate((x_points, re_x_points))

    numpy.testing.assert_array_equal(ui._compsrcplot.x, points)
    numpy.testing.assert_array_equal(ui._compsrcplot.y,
                                     [10, 10, 10, 100, 100, 100])
Exemple #3
0
def calc_wstat_sherpa(mu_sig, n_on, n_off, alpha):
    import sherpa.stats as ss
    from sherpa.astro.data import DataPHA
    from sherpa.models import Const1D
    wstat = ss.WStat()

    model = Const1D()
    model.c0 = mu_sig
    data = DataPHA(counts=np.atleast_1d(n_on),
                   name='dummy',
                   channel=np.atleast_1d(1),
                   backscal=1,
                   exposure=1)
    background = DataPHA(counts=np.atleast_1d(n_off),
                         name='dummy background',
                         channel=np.atleast_1d(1),
                         backscal=np.atleast_1d(1. / alpha),
                         exposure=1)

    data.set_background(background, 1)

    # Docstring for ``calc_stat``
    # https://github.com/sherpa/sherpa/blob/fe8508818662346cb6d9050ba676e23318e747dd/sherpa/stats/__init__.py#L219

    stat = wstat.calc_stat(model=model, data=data)
    print("Sherpa stat: {}".format(stat[0]))
    print("Sherpa fvec: {}".format(stat[1]))
Exemple #4
0
def setup():
    const = Const1D("const")
    const.c0 = 0
    const.c0.freeze()

    my_model = MyModel("my_model")
    my_model.integrate = False

    return Session(), my_model, const
def test_zero_division_calc_stat():
    ui = Session()
    x = numpy.arange(100)
    y = numpy.zeros(100)
    ui.load_arrays(1, x, y, DataPHA)
    ui.group_counts(1, 100)
    ui.set_full_model(1, Const1D("const"))

    # in principle I wouldn't need to call calc_stat_info(), I could just
    # use _get_stat_info to reproduce the issue, However, _get_stat_info is not a public
    # method, so I want to double check that calc_stat_info does not throw an exception.
    # So, first we try to run calc_stat_info and make sure there are no exceptions.
    # Then, since calc_stat_info only logs something and doesn't return anything, we use
    # a white box approach to get the result from _get_stat_info.
    ui.calc_stat_info()
    assert ui._get_stat_info()[0].rstat is numpy.nan
Exemple #6
0
def test_source_component_arbitrary_grid_int():
    ui = Session()

    x = numpy.array([1, 2, 3]), numpy.array([2, 3, 4])
    y = [1.5, 2.5, 3.5]
    re_x = numpy.array([10, 20, 30]), numpy.array([20, 30, 40])

    ui.load_arrays(1, x[0], x[1], y, Data1DInt)
    model = Const1D('c')
    model.c0 = 10

    regrid_model = model.regrid(*re_x)
    ui.plot_source_component(regrid_model)

    x_points = (x[0] + x[1]) / 2.0
    numpy.testing.assert_array_equal(ui._compsrcplot.x, x_points)
    numpy.testing.assert_array_equal(ui._compsrcplot.y, [0., 0., 0.])
Exemple #7
0
def test_plot_model_arbitrary_grid_integrated():
    ui = Session()

    x = [1, 2, 3], [2, 3, 4]
    y = [1, 2, 3]
    re_x = [10, 20, 30], [20, 30, 40]

    ui.load_arrays(1, x[0], x[1], y, Data1DInt)
    model = Const1D('c')
    model.c0 = 10

    regrid_model = model.regrid(*re_x)
    ui.set_model(regrid_model)

    with pytest.warns(UserWarning):
        ui.plot_model()

    numpy.testing.assert_array_equal(ui._modelplot.x, [1.5, 2.5, 3.5])
    numpy.testing.assert_array_equal(ui._modelplot.y, [10, 10, 10])
Exemple #8
0
def test_source_component_arbitrary_grid():
    ui = Session()

    x = [1, 2, 3]
    y = [1, 2, 3]
    re_x = [10, 20, 30]

    ui.load_arrays(1, x, y)
    model = Const1D('c')
    model.c0 = 10

    regrid_model = model.regrid(re_x)

    with pytest.warns(UserWarning):
        ui.plot_source_component(regrid_model)

    numpy.testing.assert_array_equal(ui._compsrcplot.x, x + re_x)
    numpy.testing.assert_array_equal(ui._compsrcplot.y, [
        10,
    ] * 6)
Exemple #9
0
def setup(make_data_path):

    from sherpa.astro.io import read_pha
    from sherpa.astro import xspec

    infile = make_data_path("9774.pi")
    data = read_pha(infile)
    data.notice(0.3, 7.0)

    # Change the exposure time to make the fitted amplitude
    # > 1
    #
    data.exposure = 1

    # Use the wabs model because it is unlikely to change
    # (as scientifically it is no-longer useful). The problem
    # with using something like the phabs model is that
    # changes to that model in XSPEC could change the results
    # here.
    #
    # We fit the log of the nH since this makes the numbers
    # a bit closer to O(1), and so checking should be easier.
    #
    abs1 = xspec.XSwabs('abs1')
    p1 = PowLaw1D('p1')
    factor = Const1D('factor')
    factor.integrate = False
    model = abs1 * p1 + 0 * factor

    factor.c0 = 0
    abs1.nh = 10**factor.c0

    # Ensure the nh limits are honoured by factor (upper limit only).
    # If you don't do this then the fit can fail because a value
    # outside the abs1.nh but within factor.c0 can be picked.
    #
    factor.c0.max = numpy.log10(abs1.nh.max)

    rsp = Response1D(data)
    return {'data': data, 'model': rsp(model)}
Exemple #10
0
def test_zero_case():
    """Check what happens when values can be near -1. See #740"""

    xs = np.arange(1, 6)
    ys = np.asarray([0.5, -0.5, 0.3, 0.2, -0.1])
    dyl = np.asarray([2, 2, 2, 2, 2])
    dyh = np.asarray([2, 2, 2, 2, 2])

    data = Data1DAsymmetricErrs('zero', xs, ys, dyl, dyh)
    mdl = Const1D('flat')

    bestfit = Fit(data, mdl).fit()

    rd = ReSampleData(data, mdl)

    # Both approaches should give the same results (as setting
    # niter explicitly).
    #
    # res = rd.call(niter=10, seed=47)
    res = rd(niter=10, seed=47)

    # Technically this depends on random chance, but it is rather
    # unlikely we'd get 10 -1 values here. Relax the -1 value and
    # just ensure we have more than 1 unique value here (can probably
    # assert nvs == 10 but technically we allow repeated values).
    #
    vs = np.unique(res['flat.c0'])
    nvs = len(vs)
    assert nvs > 1

    # Minimal testing of the other return values. We do assume that
    # we have not found a better fit than the fit.
    #
    samples = res['samples']
    stats = res['statistic']
    assert samples.shape == (10, 5)
    assert stats.shape == (10, )
    assert (stats >= bestfit.statval).all()
def test_fake_pha_background_pha(reset_seed):
    """Sample from background pha"""
    np.random.seed(1234)

    data = DataPHA("any", channels, counts, exposure=1000.)
    bkg = DataPHA("bkg", channels, bcounts, exposure=2000, backscal=2.5)
    data.set_background(bkg, id="used-bkg")

    data.set_arf(arf)
    data.set_rmf(rmf)

    mdl = Const1D("mdl")
    mdl.c0 = 0
    # Just make sure that the model does not contribute
    fake_pha(data, mdl, is_source=True, add_bkgs=False)
    assert data.counts.sum() == 0

    fake_pha(data, mdl, is_source=True, add_bkgs=True)
    # expected is [200, 200, 200]
    assert data.counts.sum() > 400
    assert data.counts.sum() < 1000

    # Add several more backgrounds. Actual background should be average.
    # We add 5 background with half the exposure time as this first oned
    # and essentially 0 counts. So, we should find 1/11 of the counts
    # we found in the last run.
    for i in range(5):
        bkg = DataPHA("bkg",
                      channels,
                      np.ones(3, dtype=np.int16),
                      exposure=1000,
                      backscal=2.5)
        data.set_background(bkg, id=i)

    fake_pha(data, mdl, is_source=True, add_bkgs=True)
    # expected is about [18, 18, 18]
    assert data.counts.sum() > 10
    assert data.counts.sum() < 200
def get_data(mu_sig, n_on, n_off, alpha):
    from sherpa.astro.data import DataPHA
    from sherpa.models import Const1D

    model = Const1D()
    model.c0 = mu_sig
    data = DataPHA(
        counts=np.atleast_1d(n_on),
        name="dummy",
        channel=np.atleast_1d(1),
        backscal=1,
        exposure=1,
    )
    background = DataPHA(
        counts=np.atleast_1d(n_off),
        name="dummy background",
        channel=np.atleast_1d(1),
        backscal=np.atleast_1d(1. / alpha),
        exposure=1,
    )

    data.set_background(background, 1)

    return model, data
Exemple #13
0
def test_plot_model_arbitrary_grid_integrated():
    ui = Session()
    model = Const1D('c')
    model.c0 = 10

    def tst(x, y, re_x, yy):
        ui.load_arrays(1, x[0], x[1], y, Data1DInt)
        regrid_model = model.regrid(*re_x)
        ui.set_model(regrid_model)
        ui.plot_model()
        avg_x = 0.5 * (x[0] + x[1])
        numpy.testing.assert_array_equal(ui._modelplot.x, avg_x)
        numpy.testing.assert_array_almost_equal(ui._modelplot.y, yy)

    tmp = numpy.arange(1, 5, 1)
    x = tmp[:-1], tmp[1:]
    y = x[0]
    tmp = numpy.arange(10, 50, 10)
    re_x = tmp[:-1], tmp[1:]
    tst(x, y, re_x, [0, 0, 0])

    tmp = numpy.arange(1, 20, 1)
    x = tmp[:-1], tmp[1:]
    y = x[0]
    tmp = numpy.arange(1, 20, 0.5)
    re_x = tmp[:-1], tmp[1:]
    tst(x, y, re_x, len(y) * [10])

    tmp = numpy.arange(1, 20, 1)
    x = tmp[:-1], tmp[1:]
    y = x[0]
    tmp = numpy.arange(10, 20, 0.5)
    re_x = tmp[:-1], tmp[1:]
    n = int(len(y) / 2)
    yy = numpy.append(n * [0.], n * [10.])
    tst(x, y, re_x, yy)
def test_fake_pha_basic(has_bkg, is_source, reset_seed):
    """No background.

    See also test_fake_pha_add_background

    For simplicity we use perfect responses.

    A background dataset can be added, but it should
    not be used in the simulation with default settings
    """
    np.random.seed(4276)
    data = DataPHA("any", channels, counts, exposure=1000.)

    if has_bkg:
        bkg = DataPHA("bkg", channels, bcounts, exposure=2000, backscal=0.4)
        data.set_background(bkg, id="unused-bkg")

    data.set_arf(arf)
    data.set_rmf(rmf)

    mdl = Const1D("mdl")
    mdl.c0 = 2

    fake_pha(data, mdl, is_source=is_source, add_bkgs=False)

    assert data.exposure == pytest.approx(1000.0)
    assert (data.channel == channels).all()

    assert data.name == "any"
    assert data.get_arf().name == "user-arf"
    assert data.get_rmf().name == "delta-rmf"

    if has_bkg:
        assert data.background_ids == ["unused-bkg"]
        bkg = data.get_background("unused-bkg")
        assert bkg.name == "bkg"
        assert bkg.counts == pytest.approx(bcounts)
        assert bkg.exposure == pytest.approx(2000)

    else:
        assert data.background_ids == []

    if is_source:
        # check we've faked counts (the scaling is such that it is
        # very improbable that this condition will fail)
        assert (data.counts > counts).all()

        # For reference the predicted source signal is
        #    [200, 400, 400]
        #
        # What we'd like to say is that the predicted counts are
        # similar, but this is not easy to do. What we can try
        # is summing the counts (to average over the randomness)
        # and then a simple check
        #
        assert data.counts.sum() > 500
        assert data.counts.sum() < 1500
        # This is more likely to fail by chance, but still very unlikely
        assert data.counts[1] > data.counts[0]
    else:
        # No multiplication with exposure time, arf binning, etc.
        # so we just expect very few counts
        assert data.counts.sum() < 10
        assert data.counts.sum() >= 2

    # Essentially double the exposure by having two identical arfs
    data.set_arf(arf, 2)
    data.set_rmf(rmf, 2)
    fake_pha(data, mdl, is_source=is_source, add_bkgs=False)
    if is_source:
        assert data.counts.sum() > 1200
        assert data.counts.sum() < 3000
        assert data.counts[1] > data.counts[0]
    else:
        assert data.counts.sum() < 20
        assert data.counts.sum() >= 4
def test_fake_pha_bkg_model(reset_seed):
    """Test background model
    """

    np.random.seed(5329853)

    data = DataPHA("any", channels, counts, exposure=1000.)

    bkg = DataPHA("bkg", channels, bcounts, exposure=2000, backscal=1.)
    data.set_background(bkg, id="used-bkg")

    data.set_arf(arf)
    data.set_rmf(rmf)

    bkg.set_arf(arf)
    bkg.set_rmf(rmf)

    mdl = Const1D("mdl")
    mdl.c0 = 0

    bmdl = Const1D("bmdl")
    bmdl.c0 = 2

    # With no background model the simulated source counts
    # are 0.
    #
    fake_pha(data,
             mdl,
             is_source=True,
             add_bkgs=False,
             bkg_models={"used-bkg": bmdl})

    assert data.counts == pytest.approx([0, 0, 0])

    # Check we have created source counts this time.
    #
    fake_pha(data,
             mdl,
             is_source=True,
             add_bkgs=True,
             bkg_models={"used-bkg": bmdl})

    assert data.exposure == pytest.approx(1000.0)
    assert (data.channel == channels).all()

    assert data.name == "any"
    assert data.get_arf().name == "user-arf"
    assert data.get_rmf().name == "delta-rmf"

    # The background itself is unchanged
    assert data.background_ids == ["used-bkg"]
    bkg = data.get_background("used-bkg")
    assert bkg.name == "bkg"
    assert bkg.counts == pytest.approx(bcounts)
    assert bkg.exposure == pytest.approx(2000)

    # Apply a number of regression checks to test the output. These
    # can expect to change if the randomization changes (either
    # explicitly or implicity). There used to be a number of checks
    # that compares the simulated data to the input values, but these
    # could occasionally fail, and so the seed was fixed for these
    # tests.
    #
    # For reference the predicted signal is
    #    [200, 400, 400]
    # but, unlike in the test above, this time it's all coming
    # from the background.
    #
    assert data.counts == pytest.approx([186, 411, 405])

    # Now add a second set of arf/rmf for the data.
    # However, all the signal is background, so this does not change
    # any of the results.
    data.set_arf(arf, 2)
    data.set_rmf(rmf, 2)
    fake_pha(data,
             mdl,
             is_source=True,
             add_bkgs=True,
             bkg_models={"used-bkg": bmdl})

    assert data.counts == pytest.approx([197, 396, 389])
def test_fake_pha_has_valid_ogip_keywords_all_fake(tmp_path, reset_seed):
    """See #1209

    When everything is faked, what happens?
    """

    np.random.seed(5)

    data = DataPHA("any", channels, counts, exposure=1000.)

    bkg = DataPHA("bkg", channels, bcounts, exposure=2000, backscal=1.)
    data.set_background(bkg, id="used-bkg")

    data.set_arf(arf)
    data.set_rmf(rmf)

    bkg.set_arf(arf)
    bkg.set_rmf(rmf)

    mdl = Const1D("mdl")
    mdl.c0 = 0

    bmdl = Const1D("bmdl")
    bmdl.c0 = 2

    fake_pha(data,
             mdl,
             is_source=True,
             add_bkgs=True,
             bkg_models={"used-bkg": bmdl})

    outfile = tmp_path / "sim.pha"
    io.write_pha(str(outfile), data, ascii=False)

    inpha = io.read_pha(str(outfile))
    assert inpha.channel == pytest.approx(channels)

    # it is not required that we check counts (that is, we can drop this
    # if it turns out not to be repeatable across platforms), but for
    # now keep the check.
    #
    assert inpha.counts == pytest.approx([188, 399, 416])

    for field in [
            "staterror", "syserror", "bin_lo", "bin_hi", "grouping", "quality"
    ]:
        assert getattr(inpha, field) is None

    assert inpha.exposure == pytest.approx(1000.0)
    assert inpha.backscal == pytest.approx(1.0)
    assert inpha.areascal == pytest.approx(1.0)
    assert not inpha.grouped
    assert not inpha.subtracted
    assert inpha.response_ids == []
    assert inpha.background_ids == []

    hdr = inpha.header
    assert hdr["TELESCOP"] == "none"
    assert hdr["INSTRUME"] == "none"
    assert hdr["FILTER"] == "none"

    for key in [
            "EXPOSURE", "AREASCAL", "BACKSCAL", "ANCRFILE", "BACKFILE",
            "RESPFILE"
    ]:
        assert key not in hdr
def test_fake_pha_has_valid_ogip_keywords_from_real(make_data_path, tmp_path,
                                                    reset_seed):
    """See #1209

    In this version we use a "real" PHA file as the base.

    See sherpa/astro/ui/tests/test_astro_ui_utils_simulation.py

        test_fake_pha_issue_1209

    which is closer to the reported case in #1209
    """

    np.random.seed(5)

    infile = make_data_path("acisf01575_001N001_r0085_pha3.fits.gz")
    data = io.read_pha(infile)

    mdl = Const1D("mdl")
    mdl.c0 = 0

    bmdl = Const1D("bmdl")
    bmdl.c0 = 2

    fake_pha(data,
             mdl,
             is_source=True,
             add_bkgs=True,
             bkg_models={"used-bkg": bmdl})

    outfile = tmp_path / "sim.pha"
    io.write_pha(str(outfile), data, ascii=False)

    inpha = io.read_pha(str(outfile))
    assert inpha.channel == pytest.approx(np.arange(1, 1025))

    # it is not required that we check counts (that is, we can drop this
    # if it turns out not to be repeatable across platforms), but for
    # now keep the check.
    #
    assert inpha.counts.sum() == 19

    for field in [
            "staterror", "syserror", "bin_lo", "bin_hi", "grouping", "quality"
    ]:
        assert getattr(inpha, field) is None

    assert inpha.exposure == pytest.approx(37664.157219191)
    assert inpha.backscal == pytest.approx(2.2426552620567e-06)
    assert inpha.areascal == pytest.approx(1.0)
    assert not inpha.grouped
    assert not inpha.subtracted
    assert inpha.response_ids == []
    assert inpha.background_ids == []

    hdr = inpha.header
    assert hdr["TELESCOP"] == "CHANDRA"
    assert hdr["INSTRUME"] == "ACIS"
    assert hdr["FILTER"] == "none"

    for key in [
            "EXPOSURE", "AREASCAL", "BACKSCAL", "ANCRFILE", "BACKFILE",
            "RESPFILE"
    ]:
        assert key not in hdr
Exemple #18
0
def test_fake_pha_bkg_model():
    """Test background model
    """
    data = DataPHA('any', channels, counts, exposure=1000.)

    bkg = DataPHA('bkg', channels, bcounts, exposure=2000, backscal=1.)
    data.set_background(bkg, id='used-bkg')

    data.set_arf(arf)
    data.set_rmf(rmf)

    bkg.set_arf(arf)
    bkg.set_rmf(rmf)

    mdl = Const1D('mdl')
    mdl.c0 = 0

    bmdl = Const1D('bmdl')
    bmdl.c0 = 2

    fake_pha(data,
             mdl,
             is_source=True,
             add_bkgs=True,
             bkg_models={'used-bkg': bmdl})

    assert data.exposure == pytest.approx(1000.0)
    assert (data.channel == channels).all()

    assert data.name == 'any'
    assert data.get_arf().name == 'user-arf'
    assert data.get_rmf().name == 'delta-rmf'

    # The background itself is unchanged
    assert data.background_ids == ['used-bkg']
    bkg = data.get_background('used-bkg')
    assert bkg.name == 'bkg'
    assert bkg.counts == pytest.approx(bcounts)
    assert bkg.exposure == pytest.approx(2000)

    # check we've faked counts (the scaling is such that it is
    # very improbable that this condition will fail)
    assert (data.counts > counts).all()

    # For reference the predicted signal is
    #    [200, 400, 400]
    # but, unlike in the test above, this time it's all coming
    # from the background.
    #
    # What we'd like to say is that the predicted counts are
    # similar, but this is not easy to do. What we can try
    # is summing the counts (to average over the randomness)
    # and then a simple check
    #
    assert data.counts.sum() > 500
    assert data.counts.sum() < 1500
    # This is more likely to fail by chance, but still very unlikely
    assert data.counts[1] > 1.5 * data.counts[0]

    # Now add a second set of arf/rmf for the data.
    # However, all the signal is background, so this does not change
    # any of the results.
    data.set_arf(arf, 2)
    data.set_rmf(rmf, 2)
    fake_pha(data,
             mdl,
             is_source=True,
             add_bkgs=True,
             bkg_models={'used-bkg': bmdl})
    assert data.counts.sum() > 500
    assert data.counts.sum() < 1500
    assert data.counts[1] > 1.5 * data.counts[0]