예제 #1
0
def test_get_mask_is_none():

    pha = DataPHA('name', [1, 2, 3], [1, 1, 1])
    assert pha.mask is True
    assert pha.get_mask() is None
예제 #2
0
def make_basic_datapha():

    chans = np.arange(10)
    los = 0.1 + 0.1 * chans
    his = 0.1 + los
    return DataPHA('', chans, np.ones(10), bin_lo=los, bin_hi=his)
예제 #3
0
def test_fake_pha_basic(has_bkg, is_source, reset_seed):
    """No background.

    See also test_fake_pha_add_background

    For simplicity we use perfect responses.

    A background dataset can be added, but it should
    not be used in the simulation with default settings
    """
    np.random.seed(4276)
    data = DataPHA('any', channels, counts, exposure=1000.)

    if has_bkg:
        bkg = DataPHA('bkg', channels, bcounts, exposure=2000, backscal=0.4)
        data.set_background(bkg, id='unused-bkg')

    data.set_arf(arf)
    data.set_rmf(rmf)

    mdl = Const1D('mdl')
    mdl.c0 = 2

    fake_pha(data, mdl, is_source=is_source, add_bkgs=False)

    assert data.exposure == pytest.approx(1000.0)
    assert (data.channel == channels).all()

    assert data.name == 'any'
    assert data.get_arf().name == 'user-arf'
    assert data.get_rmf().name == 'delta-rmf'

    if has_bkg:
        assert data.background_ids == ['unused-bkg']
        bkg = data.get_background('unused-bkg')
        assert bkg.name == 'bkg'
        assert bkg.counts == pytest.approx(bcounts)
        assert bkg.exposure == pytest.approx(2000)

    else:
        assert data.background_ids == []

    if is_source:
        # check we've faked counts (the scaling is such that it is
        # very improbable that this condition will fail)
        assert (data.counts > counts).all()

        # For reference the predicted source signal is
        #    [200, 400, 400]
        #
        # What we'd like to say is that the predicted counts are
        # similar, but this is not easy to do. What we can try
        # is summing the counts (to average over the randomness)
        # and then a simple check
        #
        assert data.counts.sum() > 500
        assert data.counts.sum() < 1500
        # This is more likely to fail by chance, but still very unlikely
        assert data.counts[1] > data.counts[0]
    else:
        # No multiplication with exposure time, arf binning, etc.
        # so we just expect very few counts
        assert data.counts.sum() < 10
        assert data.counts.sum() >= 2

    # Essentially double the exposure by having two identical arfs
    data.set_arf(arf, 2)
    data.set_rmf(rmf, 2)
    fake_pha(data, mdl, is_source=is_source, add_bkgs=False)
    if is_source:
        assert data.counts.sum() > 1200
        assert data.counts.sum() < 3000
        assert data.counts[1] > data.counts[0]
    else:
        assert data.counts.sum() < 20
        assert data.counts.sum() >= 4
예제 #4
0
def test_fake_pha_bkg_model():
    """Test background model
    """
    data = DataPHA('any', channels, counts, exposure=1000.)

    bkg = DataPHA('bkg', channels, bcounts, exposure=2000, backscal=1.)
    data.set_background(bkg, id='used-bkg')

    data.set_arf(arf)
    data.set_rmf(rmf)

    bkg.set_arf(arf)
    bkg.set_rmf(rmf)

    mdl = Const1D('mdl')
    mdl.c0 = 0

    bmdl = Const1D('bmdl')
    bmdl.c0 = 2

    fake_pha(data,
             mdl,
             is_source=True,
             add_bkgs=True,
             bkg_models={'used-bkg': bmdl})

    assert data.exposure == pytest.approx(1000.0)
    assert (data.channel == channels).all()

    assert data.name == 'any'
    assert data.get_arf().name == 'user-arf'
    assert data.get_rmf().name == 'delta-rmf'

    # The background itself is unchanged
    assert data.background_ids == ['used-bkg']
    bkg = data.get_background('used-bkg')
    assert bkg.name == 'bkg'
    assert bkg.counts == pytest.approx(bcounts)
    assert bkg.exposure == pytest.approx(2000)

    # check we've faked counts (the scaling is such that it is
    # very improbable that this condition will fail)
    assert (data.counts > counts).all()

    # For reference the predicted signal is
    #    [200, 400, 400]
    # but, unlike in the test above, this time it's all coming
    # from the background.
    #
    # What we'd like to say is that the predicted counts are
    # similar, but this is not easy to do. What we can try
    # is summing the counts (to average over the randomness)
    # and then a simple check
    #
    assert data.counts.sum() > 500
    assert data.counts.sum() < 1500
    # This is more likely to fail by chance, but still very unlikely
    assert data.counts[1] > 1.5 * data.counts[0]

    # Now add a second set of arf/rmf for the data.
    # However, all the signal is background, so this does not change
    # any of the results.
    data.set_arf(arf, 2)
    data.set_rmf(rmf, 2)
    fake_pha(data,
             mdl,
             is_source=True,
             add_bkgs=True,
             bkg_models={'used-bkg': bmdl})
    assert data.counts.sum() > 500
    assert data.counts.sum() < 1500
    assert data.counts[1] > 1.5 * data.counts[0]
예제 #5
0
def test_rsp_normf_call(arfexp, phaexp):
    """Check out Response1D with no RMF.

    analysis is the analysis setting
    arfexp determines whether the arf has an exposure time
    phaexp determines whether the PHA has an exposure time

    This only uses the channel setting
    """

    # Chose different exposure times for ARF and PHA to see which
    # gets picked up.
    #
    if arfexp:
        arf_exposure = 200.1
    else:
        arf_exposure = None

    if phaexp:
        pha_exposure = 220.9
    else:
        pha_exposure = None

    if phaexp:
        exposure = pha_exposure
        mdl_label = '({} * flat)'.format(exposure)
    elif arfexp:
        exposure = arf_exposure
        mdl_label = '({} * flat)'.format(exposure)
    else:
        exposure = 1.0
        mdl_label = 'flat'

    # rdata is only used to define the grids
    rdata = create_non_delta_rmf()
    specresp = create_non_delta_specresp()
    adata = create_arf(rdata.energ_lo,
                       rdata.energ_hi,
                       specresp,
                       exposure=arf_exposure)

    constant = 2.3
    mdl = Const1D('flat')
    mdl.c0 = constant

    # Turn off integration on this model, so that it is not integrated
    # across the bin width.
    #
    mdl.integrate = False

    nchans = rdata.e_min.size
    channels = np.arange(1, nchans + 1, dtype=np.int16)
    counts = np.ones(nchans, dtype=np.int16)
    pha = DataPHA('test-pha',
                  channel=channels,
                  counts=counts,
                  exposure=pha_exposure)

    pha.set_arf(adata)

    rsp = Response1D(pha)
    wrapped = rsp(mdl)

    assert isinstance(wrapped, ArithmeticModel)

    expname = 'apply_arf({})'.format(mdl_label)
    assert wrapped.name == expname

    expected = exposure * constant * specresp

    pha.set_analysis('channel')
    out = wrapped([4, 5])
    assert_allclose(out, expected)
예제 #6
0
 def setUp(self):
     self.old_level = logger.getEffectiveLevel()
     logger.setLevel(logging.ERROR)
     self.pha = DataPHA('', numpy.arange(16384, dtype=float)+1,
                        numpy.zeros(16384),
                        bin_lo = self._emin, bin_hi = self._emax )
예제 #7
0
def read_pha(arg, use_errors=False, use_background=False):
    """Create a DataPHA object.

    Parameters
    ----------
    arg
        The name of the file or a representation of the file
        (the type depends on the I/O backend) containing the
        PHA data.
    use_errors : bool, optional
        If the PHA file contains statistical error values for the
        count (or count rate) column, should it be read in. This
        defaults to ``False``.
    use_background : bool, optional
        Should the background PHA data (and optional responses) also
        be read in and associated with the data set?

    Returns
    -------
    data : sherpa.astro.data.DataPHA

    """
    datasets, filename = backend.get_pha_data(arg,
                                              use_background=use_background)
    phasets = []
    output_once = True
    for data in datasets:
        if not use_errors:
            if data['staterror'] is not None or data['syserror'] is not None:
                if data['staterror'] is None:
                    msg = 'systematic'
                elif data['syserror'] is None:
                    msg = 'statistical'
                    if output_once:
                        wmsg = "systematic errors were not found in " + \
                               f"file '{filename}'"
                        warning(wmsg)
                else:
                    msg = 'statistical and systematic'
                if output_once:
                    imsg = msg + " errors were found in file " + \
                           f"'{filename}' \nbut not used; " + \
                           "to use them, re-read with use_errors=True"
                    info(imsg)
                data['staterror'] = None
                data['syserror'] = None

        dname = os.path.dirname(filename)
        albl = 'ARF'
        rlbl = 'RMF'
        if use_background:
            albl = albl + ' (background)'
            rlbl = rlbl + ' (background)'

        arf = _read_ancillary(data, 'arffile', albl, dname, read_arf,
                              output_once)
        rmf = _read_ancillary(data, 'rmffile', rlbl, dname, read_rmf,
                              output_once)

        backgrounds = []

        if data['backfile'] and data['backfile'].lower() != 'none':
            try:
                if os.path.dirname(data['backfile']) == '':
                    data['backfile'] = os.path.join(os.path.dirname(filename),
                                                    data['backfile'])

                bkg_datasets = []
                # Do not read backgrounds of backgrounds
                if not use_background:
                    bkg_datasets = read_pha(data['backfile'], use_errors, True)

                    if output_once:
                        info(f"read background file {data['backfile']}")

                if numpy.iterable(bkg_datasets):
                    for bkg_dataset in bkg_datasets:
                        if bkg_dataset.get_response() == (None, None) and \
                           rmf is not None:
                            bkg_dataset.set_response(arf, rmf)
                        backgrounds.append(bkg_dataset)
                else:
                    if bkg_datasets.get_response() == (None, None) and \
                       rmf is not None:
                        bkg_datasets.set_response(arf, rmf)
                    backgrounds.append(bkg_datasets)

            except Exception:
                if output_once:
                    warning(str(sys.exc_info()[1]))

        for bkg_type, bscal_type in zip(('background_up', 'background_down'),
                                        ('backscup', 'backscdn')):
            if data[bkg_type] is not None:
                bkg = DataPHA(filename,
                              channel=data['channel'],
                              counts=data[bkg_type],
                              bin_lo=data['bin_lo'],
                              bin_hi=data['bin_hi'],
                              grouping=data['grouping'],
                              quality=data['quality'],
                              exposure=data['exposure'],
                              backscal=data[bscal_type],
                              header=data['header'])
                bkg.set_response(arf, rmf)
                if output_once:
                    info(
                        f"read {bkg_type} into a dataset from file {filename}")
                backgrounds.append(bkg)

        for k in [
                'backfile', 'arffile', 'rmffile', 'backscup', 'backscdn',
                'background_up', 'background_down'
        ]:
            data.pop(k, None)

        pha = DataPHA(filename, **data)
        pha.set_response(arf, rmf)
        for i, bkg in enumerate(backgrounds):
            if bkg.grouping is None:
                bkg.grouping = pha.grouping
                bkg.grouped = bkg.grouping is not None
            if bkg.quality is None:
                bkg.quality = pha.quality
            pha.set_background(bkg, i + 1)

        # set units *after* bkgs have been set
        pha._set_initial_quantity()
        phasets.append(pha)
        output_once = False

    if len(phasets) == 1:
        phasets = phasets[0]

    return phasets