예제 #1
0
def test_fake_pha_bkg_model(reset_seed):
    """Test background model
    """

    np.random.seed(5329853)

    data = DataPHA("any", channels, counts, exposure=1000.)

    bkg = DataPHA("bkg", channels, bcounts, exposure=2000, backscal=1.)
    data.set_background(bkg, id="used-bkg")

    data.set_arf(arf)
    data.set_rmf(rmf)

    bkg.set_arf(arf)
    bkg.set_rmf(rmf)

    mdl = Const1D("mdl")
    mdl.c0 = 0

    bmdl = Const1D("bmdl")
    bmdl.c0 = 2

    # With no background model the simulated source counts
    # are 0.
    #
    fake_pha(data,
             mdl,
             is_source=True,
             add_bkgs=False,
             bkg_models={"used-bkg": bmdl})

    assert data.counts == pytest.approx([0, 0, 0])

    # Check we have created source counts this time.
    #
    fake_pha(data,
             mdl,
             is_source=True,
             add_bkgs=True,
             bkg_models={"used-bkg": bmdl})

    assert data.exposure == pytest.approx(1000.0)
    assert (data.channel == channels).all()

    assert data.name == "any"
    assert data.get_arf().name == "user-arf"
    assert data.get_rmf().name == "delta-rmf"

    # The background itself is unchanged
    assert data.background_ids == ["used-bkg"]
    bkg = data.get_background("used-bkg")
    assert bkg.name == "bkg"
    assert bkg.counts == pytest.approx(bcounts)
    assert bkg.exposure == pytest.approx(2000)

    # Apply a number of regression checks to test the output. These
    # can expect to change if the randomization changes (either
    # explicitly or implicity). There used to be a number of checks
    # that compares the simulated data to the input values, but these
    # could occasionally fail, and so the seed was fixed for these
    # tests.
    #
    # For reference the predicted signal is
    #    [200, 400, 400]
    # but, unlike in the test above, this time it's all coming
    # from the background.
    #
    assert data.counts == pytest.approx([186, 411, 405])

    # Now add a second set of arf/rmf for the data.
    # However, all the signal is background, so this does not change
    # any of the results.
    data.set_arf(arf, 2)
    data.set_rmf(rmf, 2)
    fake_pha(data,
             mdl,
             is_source=True,
             add_bkgs=True,
             bkg_models={"used-bkg": bmdl})

    assert data.counts == pytest.approx([197, 396, 389])
예제 #2
0
def test_fake_pha_basic(has_bkg, is_source, reset_seed):
    """No background.

    See also test_fake_pha_add_background

    For simplicity we use perfect responses.

    A background dataset can be added, but it should
    not be used in the simulation with default settings
    """
    np.random.seed(4276)
    data = DataPHA("any", channels, counts, exposure=1000.)

    if has_bkg:
        bkg = DataPHA("bkg", channels, bcounts, exposure=2000, backscal=0.4)
        data.set_background(bkg, id="unused-bkg")

    data.set_arf(arf)
    data.set_rmf(rmf)

    mdl = Const1D("mdl")
    mdl.c0 = 2

    fake_pha(data, mdl, is_source=is_source, add_bkgs=False)

    assert data.exposure == pytest.approx(1000.0)
    assert (data.channel == channels).all()

    assert data.name == "any"
    assert data.get_arf().name == "user-arf"
    assert data.get_rmf().name == "delta-rmf"

    if has_bkg:
        assert data.background_ids == ["unused-bkg"]
        bkg = data.get_background("unused-bkg")
        assert bkg.name == "bkg"
        assert bkg.counts == pytest.approx(bcounts)
        assert bkg.exposure == pytest.approx(2000)

    else:
        assert data.background_ids == []

    if is_source:
        # check we've faked counts (the scaling is such that it is
        # very improbable that this condition will fail)
        assert (data.counts > counts).all()

        # For reference the predicted source signal is
        #    [200, 400, 400]
        #
        # What we'd like to say is that the predicted counts are
        # similar, but this is not easy to do. What we can try
        # is summing the counts (to average over the randomness)
        # and then a simple check
        #
        assert data.counts.sum() > 500
        assert data.counts.sum() < 1500
        # This is more likely to fail by chance, but still very unlikely
        assert data.counts[1] > data.counts[0]
    else:
        # No multiplication with exposure time, arf binning, etc.
        # so we just expect very few counts
        assert data.counts.sum() < 10
        assert data.counts.sum() >= 2

    # Essentially double the exposure by having two identical arfs
    data.set_arf(arf, 2)
    data.set_rmf(rmf, 2)
    fake_pha(data, mdl, is_source=is_source, add_bkgs=False)
    if is_source:
        assert data.counts.sum() > 1200
        assert data.counts.sum() < 3000
        assert data.counts[1] > data.counts[0]
    else:
        assert data.counts.sum() < 20
        assert data.counts.sum() >= 4
예제 #3
0
def test_fake_pha_bkg_model():
    """Test background model
    """
    data = DataPHA('any', channels, counts, exposure=1000.)

    bkg = DataPHA('bkg', channels, bcounts, exposure=2000, backscal=1.)
    data.set_background(bkg, id='used-bkg')

    data.set_arf(arf)
    data.set_rmf(rmf)

    bkg.set_arf(arf)
    bkg.set_rmf(rmf)

    mdl = Const1D('mdl')
    mdl.c0 = 0

    bmdl = Const1D('bmdl')
    bmdl.c0 = 2

    fake_pha(data,
             mdl,
             is_source=True,
             add_bkgs=True,
             bkg_models={'used-bkg': bmdl})

    assert data.exposure == pytest.approx(1000.0)
    assert (data.channel == channels).all()

    assert data.name == 'any'
    assert data.get_arf().name == 'user-arf'
    assert data.get_rmf().name == 'delta-rmf'

    # The background itself is unchanged
    assert data.background_ids == ['used-bkg']
    bkg = data.get_background('used-bkg')
    assert bkg.name == 'bkg'
    assert bkg.counts == pytest.approx(bcounts)
    assert bkg.exposure == pytest.approx(2000)

    # check we've faked counts (the scaling is such that it is
    # very improbable that this condition will fail)
    assert (data.counts > counts).all()

    # For reference the predicted signal is
    #    [200, 400, 400]
    # but, unlike in the test above, this time it's all coming
    # from the background.
    #
    # What we'd like to say is that the predicted counts are
    # similar, but this is not easy to do. What we can try
    # is summing the counts (to average over the randomness)
    # and then a simple check
    #
    assert data.counts.sum() > 500
    assert data.counts.sum() < 1500
    # This is more likely to fail by chance, but still very unlikely
    assert data.counts[1] > 1.5 * data.counts[0]

    # Now add a second set of arf/rmf for the data.
    # However, all the signal is background, so this does not change
    # any of the results.
    data.set_arf(arf, 2)
    data.set_rmf(rmf, 2)
    fake_pha(data,
             mdl,
             is_source=True,
             add_bkgs=True,
             bkg_models={'used-bkg': bmdl})
    assert data.counts.sum() > 500
    assert data.counts.sum() < 1500
    assert data.counts[1] > 1.5 * data.counts[0]