示例#1
0
def test_fake_pha_background_model(clean_astro_ui, reset_seed):
    """Check we can add a background component.

    See also test_fake_pha_basic.

    For simplicity we use perfect responses.
    """

    np.random.seed(27347)

    id = 'qwerty'
    channels = np.arange(1, 4, dtype=np.int16)
    counts = np.ones(3, dtype=np.int16)
    bcounts = 100 * counts

    ui.load_arrays(id, channels, counts, ui.DataPHA)
    ui.set_exposure(id, 100)
    ui.set_backscal(id, 0.1)

    bkg = ui.DataPHA('bkg', channels, bcounts, exposure=200, backscal=0.4)

    ebins = np.asarray([1.1, 1.2, 1.4, 1.6])
    elo = ebins[:-1]
    ehi = ebins[1:]
    arf = ui.create_arf(elo, ehi)
    rmf = ui.create_rmf(elo, ehi, e_min=elo, e_max=ehi)

    mdl = ui.create_model_component('const1d', 'mdl')
    mdl.c0 = 0
    bkgmdl = ui.create_model_component('const1d', 'mdl')
    bkgmdl.c0 = 2
    ui.set_source(id, mdl)
    ui.set_bkg(id, bkg)
    ui.set_bkg_source(id, bkgmdl)
    ui.set_arf(id, arf, bkg_id=1)
    ui.set_rmf(id, rmf, bkg_id=1)

    ui.fake_pha(id, arf, rmf, 1000.0, bkg='model')

    faked = ui.get_data(id)
    assert faked.exposure == pytest.approx(1000.0)
    assert (faked.channel == channels).all()

    # check we've faked counts (the scaling is such that it is
    # very improbable that this condition will fail)
    assert (faked.counts > counts).all()

    # For reference the predicted source signal is
    #    [200, 400, 400]
    # and the background signal is
    #    [125, 125, 125]
    # so, even with randomly drawn values, the following
    # checks should be robust.
    #
    predicted_by_source = 1000 * mdl(elo, ehi)
    predicted_by_bkg = (1000 / 200) * (0.1 / 0.4) * bcounts
    assert (faked.counts > predicted_by_source).all()
    assert (faked.counts > predicted_by_bkg).all()
示例#2
0
def test_fake_pha_basic_arfrmf_set_in_advance(clean_astro_ui, reset_seed):
    """Similar to test_fake_pha_basic but instead of passing in
    the RMF, we set it before. The result should be the same, so we
    don't have ot go through all the parameterization of that test.
    """

    np.random.seed(20348)

    channels = np.arange(1, 4, dtype=np.int16)
    counts = np.ones(3, dtype=np.int16)

    ui.load_arrays('id123', channels, counts, ui.DataPHA)
    ui.set_exposure('id123', 100)

    ebins = np.asarray([1.1, 1.2, 1.4, 1.6])
    elo = ebins[:-1]
    ehi = ebins[1:]
    arf = ui.create_arf(elo, ehi)
    rmf = ui.create_rmf(elo, ehi, e_min=elo, e_max=ehi)
    ui.set_rmf('id123', rmf)
    ui.set_arf('id123', arf)

    mdl = ui.create_model_component('const1d', 'mdl')
    mdl.c0 = 2
    ui.set_source('id123', mdl)

    ui.fake_pha('id123', None, None, 1000.0)

    faked = ui.get_data('id123')
    assert faked.exposure == pytest.approx(1000.0)
    assert (faked.channel == channels).all()

    assert faked.name == 'faked'
    assert faked.background_ids == []

    # check we've faked counts (the scaling is such that it is
    # very improbable that this condition will fail)
    assert (faked.counts > counts).all()

    # For reference the predicted source signal is
    #    [200, 400, 400]
    #
    # What we'd like to say is that the predicted counts are
    # similar, but this is not easy to do. What we can try
    # is summing the counts (to average over the randomness)
    # and then a simple check
    #
    assert (faked.counts.sum() > 200) and (faked.counts.sum() < 3000)
    # This is more likely to fail by chance, but still very unlikely
    assert faked.counts[1] > faked.counts[0]
示例#3
0
def test_fake_pha_basic(id, has_bkg, clean_astro_ui):
    """No background.

    See also test_fake_pha_add_background

    For simplicity we use perfect responses.

    A background dataset can be added, but it should
    not be used in the simulation.
    """

    channels = np.arange(1, 4, dtype=np.int16)
    counts = np.ones(3, dtype=np.int16)
    bcounts = 100 * counts

    ui.load_arrays(id, channels, counts, ui.DataPHA)
    ui.set_exposure(id, 100)

    if has_bkg:
        bkg = ui.DataPHA('bkg', channels, bcounts, exposure=200, backscal=0.4)
        ui.set_bkg(id, bkg, bkg_id='faked-bkg')

    ebins = np.asarray([1.1, 1.2, 1.4, 1.6])
    elo = ebins[:-1]
    ehi = ebins[1:]
    arf = ui.create_arf(elo, ehi)
    rmf = ui.create_rmf(elo, ehi, e_min=elo, e_max=ehi)

    mdl = ui.create_model_component('const1d', 'mdl')
    mdl.c0 = 2
    ui.set_source(id, mdl)

    ui.fake_pha(id, arf, rmf, 1000.0)

    faked = ui.get_data(id)
    assert faked.exposure == pytest.approx(1000.0)
    assert (faked.channel == channels).all()

    assert faked.name == 'faked'
    assert faked.get_arf().name == 'test-arf'
    assert faked.get_rmf().name == 'delta-rmf'

    if has_bkg and id is not None:
        assert faked.background_ids == ['faked-bkg']
        bkg = ui.get_bkg(id, 'faked-bkg')
        assert bkg.name == 'bkg'
        assert bkg.counts == pytest.approx(bcounts)
        assert bkg.exposure == pytest.approx(200)

    else:
        assert faked.background_ids == []

    # check we've faked counts (the scaling is such that it is
    # very improbable that this condition will fail)
    assert (faked.counts > counts).all()

    # For reference the predicted source signal is
    #    [200, 400, 400]
    #
    # What we'd like to say is that the predicted counts are
    # similar, but this is not easy to do. What we can try
    # is summing the counts (to average over the randomness)
    # and then a simple check
    #
    assert faked.counts.sum() > 200