Beispiel #1
0
 def setUp(self):
     self.old_level = logger.getEffectiveLevel()
     logger.setLevel(logging.ERROR)
     self.pha = DataPHA('', numpy.arange(46, dtype=float)+1.,
                        numpy.zeros(46),
                        bin_lo = self._emin, bin_hi = self._emax )
     self.pha.units="energy"
Beispiel #2
0
def test_sourceplot(caplog):

    bins = np.arange(0.1, 10.1, 0.1)
    data = DataPHA('',
                   np.arange(10),
                   np.ones(10),
                   bin_lo=bins[:-1].copy(),
                   bin_hi=bins[1:].copy())
    data.units = "energy"
    assert data.rate
    assert data.plot_fac == 0

    # use a model that is "okay" to use with keV bins
    #
    m1 = Const1D('bgnd')
    m2 = Gauss1D('abs1')
    src = 100 * m1 * (1 - m2) * 10000

    m1.c0 = 0.01
    m2.pos = 5.0
    m2.fwhm = 4.0
    m2.ampl = 0.1

    sp = SourcePlot()
    with caplog.at_level(logging.INFO, logger='sherpa'):
        sp.prepare(data, src)

    assert len(caplog.records) == 0
    check_sourceplot_energy(sp)
Beispiel #3
0
class test_filter_energy_grid(SherpaTestCase):

    _notice = numpy.ones(46, dtype=bool)
    _notice[44:46]=False

    _ignore = numpy.zeros(46, dtype=bool)
    _ignore[14:33]=True

    _emin = numpy.array([
        1.46000006e-03,   2.48199999e-01,   3.06600004e-01,   4.67200011e-01,
        5.69400012e-01,   6.42400026e-01,   7.00800002e-01,   7.44599998e-01,
        7.88399994e-01,   8.17600012e-01,   8.61400008e-01,   8.90600026e-01,
        9.49000001e-01,   9.92799997e-01,   1.03659999e+00,   1.09500003e+00,
        1.13880002e+00,   1.19719994e+00,   1.28480005e+00,   1.40160000e+00,
        1.47459996e+00,   1.60599995e+00,   1.69360006e+00,   1.81040001e+00,
        1.89800000e+00,   1.94180000e+00,   2.02940011e+00,   2.08780003e+00,
        2.19000006e+00,   2.27760005e+00,   2.39439988e+00,   2.58419991e+00,
        2.71560001e+00,   2.86159992e+00,   3.08060002e+00,   3.38720012e+00,
        3.56240010e+00,   3.79600000e+00,   4.02960014e+00,   4.24860001e+00,
        4.71579981e+00,   5.02239990e+00,   5.37279987e+00,   5.89839983e+00,
        6.57000017e+00,   9.86960030e+00], numpy.float)

    _emax = numpy.array([
        0.2482    ,   0.3066    ,   0.46720001,   0.56940001,   0.64240003,
        0.7008    ,   0.7446    ,   0.78839999,   0.81760001,   0.86140001,
        0.89060003,   0.949     ,   0.9928    ,   1.03659999,   1.09500003,
        1.13880002,   1.19719994,   1.28480005,   1.4016    ,   1.47459996,
        1.60599995,   1.69360006,   1.81040001,   1.898     ,   1.9418    ,
        2.02940011,   2.08780003,   2.19000006,   2.27760005,   2.39439988,
        2.58419991,   2.71560001,   2.86159992,   3.08060002,   3.38720012,
        3.5624001 ,   3.796     ,   4.02960014,   4.24860001,   4.71579981,
        5.0223999 ,   5.37279987,   5.89839983,   6.57000017,   9.8696003 ,
        14.95040035], numpy.float)

    def setUp(self):
        self.old_level = logger.getEffectiveLevel()
        logger.setLevel(logging.ERROR)
        self.pha = DataPHA('', numpy.arange(46, dtype=float)+1.,
                           numpy.zeros(46),
                           bin_lo = self._emin, bin_hi = self._emax )
        self.pha.units="energy"

    def tearDown(self):
        logger.setLevel(self.old_level)

    def test_notice(self):
        #clear mask
        self.pha.notice()        
        self.pha.notice(0.0, 6.0)
        #self.assertEqual(self._notice, self.pha.mask)
        assert (self._notice==numpy.asarray(self.pha.mask)).all()


    def test_ignore(self):
        #clear mask
        self.pha.notice()
        self.pha.ignore(0.0, 1.0)
        self.pha.ignore(3.0, 15.0)
        #self.assertEqual(self._ignore, self.pha.mask)
        assert (self._ignore==numpy.asarray(self.pha.mask)).all()
Beispiel #4
0
 def setUp(self):
     self.pha = DataPHA('',
                        np.arange(204, dtype=float) + 1.,
                        np.zeros(204),
                        bin_lo=self._emin,
                        bin_hi=self._emax)
     self.pha.units = "energy"
Beispiel #5
0
def test_rsp_normf_error(analysis):
    """Check that an error is raised on set_analysis

    """

    exposure = 200.1

    # rdata is only used to define the grids
    rdata = create_non_delta_rmf()
    specresp = create_non_delta_specresp()
    adata = create_arf(rdata.energ_lo,
                       rdata.energ_hi,
                       specresp,
                       exposure=exposure)

    nchans = rdata.e_min.size
    channels = np.arange(1, nchans + 1, dtype=np.int16)
    counts = np.ones(nchans, dtype=np.int16)
    pha = DataPHA('test-pha', channel=channels, counts=counts,
                  exposure=exposure)
    pha.set_arf(adata)

    with pytest.raises(DataErr) as exc:
        pha.set_analysis(analysis)

    emsg = "response incomplete for dataset test-pha, " + \
           "check the instrument model"
    assert str(exc.value) == emsg
Beispiel #6
0
def test_pha_get_ylabel_yfac1(override_plot_backend):
    """Basic check

    The label depends on the backend, so we just want the dummy
    backend used here. **UNFORTUNATELY** - either because the
    override_plot_backend fixture is not well written, the current
    approach to setting up the plot backend does not handle it being
    swapped out (e.g. see #1191), or a combination of the two - the
    test doesn't work well if there is a non-dummy backend loaded.

    """

    chans = np.arange(1, 4)
    counts = np.ones_like(chans)
    pha = DataPHA("dummy", chans, counts)

    pha.plot_fac = 1

    # This is ugly - hopefully #1191 will fix this
    #
    ylabel = pha.get_ylabel()
    if backend.__name__.endswith('.dummy_backend'):
        assert ylabel == 'Counts/channel X Channel^1'
    else:
        assert ylabel.startswith('Counts/channel X Channel')
        assert "1" in ylabel
Beispiel #7
0
def test_pha_get_filter_checks_ungrouped(chtype, expected, args):
    """Check we get the filter we expect

    chtype is channel, energy, or wavelength
    expected is the expected response
    args is a list of 3-tuples of (flag, loval, hival) where
    flag is True for notice and False for ignore; they define
    the filter to apply
    """

    chans = np.arange(1, 11, dtype=int)
    counts = np.ones(10, dtype=int)
    pha = DataPHA('data', chans, counts)

    # Use an ARF to create a channel to energy mapping
    # The 0.2-2.2 keV range maps to 5.636-61.992 Angstrom
    #
    egrid = 0.2 * np.arange(1, 12)
    arf = DataARF('arf', egrid[:-1], egrid[1:], np.ones(10))
    pha.set_arf(arf)

    pha.units = chtype
    for (flag, lo, hi) in args:
        if flag:
            pha.notice(lo, hi)
        else:
            pha.ignore(lo, hi)

    assert pha.get_filter(format='%.1f') == expected
Beispiel #8
0
class test_filter_wave_grid(SherpaTestCase):

    _notice = np.ones(16384, dtype=bool)
    _notice[8465:16384] = False

    _ignore = np.zeros(16384, dtype=bool)
    _ignore[14064:15984] = True

    _emin = np.arange(205.7875, 0.9875, -0.0125)

    _emax = _emin + 0.0125

    def setUp(self):
        self.old_level = logger.getEffectiveLevel()
        logger.setLevel(logging.ERROR)
        self.pha = DataPHA('', np.arange(16384, dtype=float) + 1,
                           np.zeros(16384),
                           bin_lo=self._emin,
                           bin_hi=self._emax)

    def tearDown(self):
        logger.setLevel(self.old_level)

    def test_notice(self):
        self.pha.units = 'wavelength'
        self.pha.notice()
        self.pha.notice(100.0, 225.0)
        assert (self._notice == np.asarray(self.pha.mask)).all()

    def test_ignore(self):
        self.pha.units = 'wavelength'
        self.pha.notice()
        self.pha.ignore(30.01, 225.0)
        self.pha.ignore(0.1, 6.0)
        assert (self._ignore == np.asarray(self.pha.mask)).all()
Beispiel #9
0
def test_sourceplot_wavelength_counts(caplog):
    """test_sourceplot_wavelength but when rate=False is chosen"""

    bins = np.arange(0.1, 10.1, 0.1)
    data = DataPHA('',
                   np.arange(10),
                   np.ones(10),
                   bin_lo=bins[:-1].copy(),
                   bin_hi=bins[1:].copy())
    data.units = "wave"
    data.rate = False

    # use a model that is "okay" to use with keV bins
    #
    m1 = Const1D('bgnd')
    m2 = Gauss1D('abs1')
    src = 100 * m1 * (1 - m2) * 10000

    m1.c0 = 0.01
    m2.pos = 5.0
    m2.fwhm = 4.0
    m2.ampl = 0.1

    sp = SourcePlot()
    with caplog.at_level(logging.INFO, logger='sherpa'):
        sp.prepare(data, src)

    assert len(caplog.records) == 0
    check_sourceplot_wavelength(sp)
Beispiel #10
0
def test_pha_get_xerr_all_bad_energy_group():
    """get_xerr handles all bad values [energy]

    The behavior with grouping is different, presumably because
    we assume we have grouping when we have a quality array.
    """

    pha = DataPHA('name', [1, 2, 3], [1, 1, 1],
                  grouping=[1, 1, 1],
                  quality=[2, 2, 2])

    ebins = np.asarray([3.0, 5., 8.0, 12.0])
    rlo = ebins[:-1]
    rhi = ebins[1:]
    rmf = create_delta_rmf(rlo, rhi, e_min=rlo, e_max=rhi)
    pha.set_rmf(rmf)
    pha.units = 'energy'

    assert pha.get_xerr() == pytest.approx([2.0, 3.0, 4.0])

    assert pha.grouped
    pha.ignore_bad()

    # Should this error out or not?
    assert pha.get_filter() == ''
    # with pytest.raises(DataErr) as de:
    #     pha.get_filter()

    # assert str(de.value) == 'mask excludes all data'

    assert pha.get_xerr() == pytest.approx([])
Beispiel #11
0
def test_sourceplot_wavelength_facn(factor, caplog):
    """Change plot factor for test_sourceplot_wavelength"""

    bins = np.arange(0.1, 10.1, 0.1)
    data = DataPHA('',
                   np.arange(10),
                   np.ones(10),
                   bin_lo=bins[:-1].copy(),
                   bin_hi=bins[1:].copy())
    data.units = "wavelength"
    data.plot_fac = factor
    assert data.rate

    m1 = Const1D('bgnd')
    m2 = Gauss1D('abs1')
    src = 100 * m1 * (1 - m2) * 10000

    m1.c0 = 0.01
    m2.pos = 5.0
    m2.fwhm = 4.0
    m2.ampl = 0.1

    sp = SourcePlot()
    with caplog.at_level(logging.INFO, logger='sherpa'):
        sp.prepare(data, src)

    assert len(caplog.records) == 0
    check_sourceplot_wavelength(sp, factor=factor)
Beispiel #12
0
def test_sourceplot_wavelength(caplog):
    """Check we get wavelength units"""

    bins = np.arange(0.1, 10.1, 0.1)
    data = DataPHA('',
                   np.arange(10),
                   np.ones(10),
                   bin_lo=bins[:-1].copy(),
                   bin_hi=bins[1:].copy())
    data.units = "wave"

    # Note that the model evaluation in done in Angstroms
    #
    m1 = Const1D('bgnd')
    m2 = Gauss1D('abs1')
    src = 100 * m1 * (1 - m2) * 10000

    m1.c0 = 0.01
    m2.pos = 5.0
    m2.fwhm = 4.0
    m2.ampl = 0.1

    sp = SourcePlot()
    with caplog.at_level(logging.INFO, logger='sherpa'):
        sp.prepare(data, src)

    assert len(caplog.records) == 0
    check_sourceplot_wavelength(sp)
Beispiel #13
0
def test_sourceplot_channels(caplog):
    """Although we ask for channels we get energy units"""

    bins = np.arange(0.1, 10.1, 0.1)
    data = DataPHA('',
                   np.arange(10),
                   np.ones(10),
                   bin_lo=bins[:-1].copy(),
                   bin_hi=bins[1:].copy())
    data.units = "channel"

    # use a model that is "okay" to use with keV bins
    #
    m1 = Const1D('bgnd')
    m2 = Gauss1D('abs1')
    src = 100 * m1 * (1 - m2) * 10000

    m1.c0 = 0.01
    m2.pos = 5.0
    m2.fwhm = 4.0
    m2.ampl = 0.1

    sp = SourcePlot()
    with caplog.at_level(logging.INFO, logger='sherpa'):
        sp.prepare(data, src)

    assert len(caplog.records) == 1
    lname, lvl, msg = caplog.record_tuples[0]
    assert lname == 'sherpa.astro.plot'
    assert lvl == logging.WARN
    assert msg == 'Channel space is unappropriate for the PHA unfolded source model,\nusing energy.'

    check_sourceplot_energy(sp)
Beispiel #14
0
class test_filter_energy_grid(SherpaTestCase):

    _notice = numpy.ones(46, dtype=bool)
    _notice[44:46]=False

    _ignore = numpy.zeros(46, dtype=bool)
    _ignore[14:33]=True

    _emin = numpy.array([
        1.46000006e-03,   2.48199999e-01,   3.06600004e-01,   4.67200011e-01,
        5.69400012e-01,   6.42400026e-01,   7.00800002e-01,   7.44599998e-01,
        7.88399994e-01,   8.17600012e-01,   8.61400008e-01,   8.90600026e-01,
        9.49000001e-01,   9.92799997e-01,   1.03659999e+00,   1.09500003e+00,
        1.13880002e+00,   1.19719994e+00,   1.28480005e+00,   1.40160000e+00,
        1.47459996e+00,   1.60599995e+00,   1.69360006e+00,   1.81040001e+00,
        1.89800000e+00,   1.94180000e+00,   2.02940011e+00,   2.08780003e+00,
        2.19000006e+00,   2.27760005e+00,   2.39439988e+00,   2.58419991e+00,
        2.71560001e+00,   2.86159992e+00,   3.08060002e+00,   3.38720012e+00,
        3.56240010e+00,   3.79600000e+00,   4.02960014e+00,   4.24860001e+00,
        4.71579981e+00,   5.02239990e+00,   5.37279987e+00,   5.89839983e+00,
        6.57000017e+00,   9.86960030e+00], numpy.float)

    _emax = numpy.array([
        0.2482    ,   0.3066    ,   0.46720001,   0.56940001,   0.64240003,
        0.7008    ,   0.7446    ,   0.78839999,   0.81760001,   0.86140001,
        0.89060003,   0.949     ,   0.9928    ,   1.03659999,   1.09500003,
        1.13880002,   1.19719994,   1.28480005,   1.4016    ,   1.47459996,
        1.60599995,   1.69360006,   1.81040001,   1.898     ,   1.9418    ,
        2.02940011,   2.08780003,   2.19000006,   2.27760005,   2.39439988,
        2.58419991,   2.71560001,   2.86159992,   3.08060002,   3.38720012,
        3.5624001 ,   3.796     ,   4.02960014,   4.24860001,   4.71579981,
        5.0223999 ,   5.37279987,   5.89839983,   6.57000017,   9.8696003 ,
        14.95040035], numpy.float)

    def setUp(self):
        self.old_level = logger.getEffectiveLevel()
        logger.setLevel(logging.ERROR)
        self.pha = DataPHA('', numpy.arange(46, dtype=float)+1.,
                           numpy.zeros(46),
                           bin_lo = self._emin, bin_hi = self._emax )
        self.pha.units="energy"

    def tearDown(self):
        logger.setLevel(self.old_level)

    def test_notice(self):
        #clear mask
        self.pha.notice()        
        self.pha.notice(0.0, 6.0)
        #self.assertEqual(self._notice, self.pha.mask)
        assert (self._notice==numpy.asarray(self.pha.mask)).all()


    def test_ignore(self):
        #clear mask
        self.pha.notice()
        self.pha.ignore(0.0, 1.0)
        self.pha.ignore(3.0, 15.0)
        #self.assertEqual(self._ignore, self.pha.mask)
        assert (self._ignore==numpy.asarray(self.pha.mask)).all()
Beispiel #15
0
def calc_wstat_sherpa(mu_sig, n_on, n_off, alpha):
    import sherpa.stats as ss
    from sherpa.astro.data import DataPHA
    from sherpa.models import Const1D
    wstat = ss.WStat()

    model = Const1D()
    model.c0 = mu_sig
    data = DataPHA(counts=np.atleast_1d(n_on),
                   name='dummy',
                   channel=np.atleast_1d(1),
                   backscal=1,
                   exposure=1)
    background = DataPHA(counts=np.atleast_1d(n_off),
                         name='dummy background',
                         channel=np.atleast_1d(1),
                         backscal=np.atleast_1d(1. / alpha),
                         exposure=1)

    data.set_background(background, 1)

    # Docstring for ``calc_stat``
    # https://github.com/sherpa/sherpa/blob/fe8508818662346cb6d9050ba676e23318e747dd/sherpa/stats/__init__.py#L219

    stat = wstat.calc_stat(model=model, data=data)
    print("Sherpa stat: {}".format(stat[0]))
    print("Sherpa fvec: {}".format(stat[1]))
Beispiel #16
0
 def setUp(self):
     self.old_level = logger.getEffectiveLevel()
     logger.setLevel(logging.ERROR)
     self.pha = DataPHA('',
                        np.arange(16384, dtype=float) + 1,
                        np.zeros(16384),
                        bin_lo=self._emin,
                        bin_hi=self._emax)
Beispiel #17
0
def test_can_not_group_ungrouped():
    """Does setting the grouping setting fail with no data?"""

    pha = DataPHA('name', [1, 2, 3], [1, 1, 1])
    assert not pha.grouped
    with pytest.raises(DataErr) as exc:
        pha.grouped = True

    assert str(exc.value) == "data set 'name' does not specify grouping flags"
Beispiel #18
0
def test_error_on_invalid_channel_ungrouped(chan):
    """Does channel access fail when outside the bounds?

    For ungrouped data it currently does not, but just
    acts as an identity function.
    """

    pha = DataPHA('name', [1, 2, 3], [1, 1, 1])
    assert pha._from_channel(chan) == chan
Beispiel #19
0
def test_pha_get_ylabel_yfac0():
    """This does not depend on the backend"""

    chans = np.arange(1, 4)
    counts = np.ones_like(chans)
    pha = DataPHA("dummy", chans, counts)

    assert pha.plot_fac == 0
    assert pha.get_ylabel() == 'Counts/channel'
def test_sourceplot():

    bins = np.arange(0.1, 10.1, 0.1)
    data = DataPHA('', np.arange(10), np.ones(10),
                   bin_lo=bins[:-1].copy(),
                   bin_hi=bins[1:].copy())
    data.units = "energy"

    # use a model that is "okay" to use with keV bins
    #
    m1 = Const1D('bgnd')
    m2 = Gauss1D('abs1')
    src = 100 * m1 * (1 - m2) * 10000

    m1.c0 = 0.01
    m2.pos = 5.0
    m2.fwhm = 4.0
    m2.ampl = 0.1

    sp = SourcePlot()
    sp.prepare(data, src)

    # add in several asserts to check that something has been
    # added to the object
    #
    assert sp.xlabel == 'Energy (keV)'

    # the following depends on the backend
    # assert sp.ylabel == 'f(E)  Photons/sec/cm$^2$/keV'

    assert sp.title == 'Source Model of '

    assert sp.xlo == pytest.approx(bins[:-1])
    assert sp.xhi == pytest.approx(bins[1:])

    # The check of the values is just to check that things are going
    # as expected, so the model values have been adjusted so that
    # an "integer" check can be used with enough precision to make
    # sure that the model is being evaluated correctly, but without
    # a very-high-precision check
    #
    yexp = np.asarray([9998, 9997, 9997, 9997, 9996, 9996, 9995, 9994,
                       9994, 9993, 9992, 9991, 9990, 9988, 9987, 9985,
                       9983, 9982, 9980, 9977, 9975, 9973, 9970, 9967,
                       9964, 9961, 9958, 9955, 9951, 9948, 9944, 9941,
                       9937, 9934, 9930, 9927, 9923, 9920, 9917, 9914,
                       9911, 9909, 9907, 9905, 9903, 9902, 9901, 9900,
                       9900, 9900, 9900, 9901, 9902, 9903, 9905, 9907,
                       9909, 9911, 9914, 9917, 9920, 9923, 9927, 9930,
                       9934, 9937, 9941, 9944, 9948, 9951, 9955, 9958,
                       9961, 9964, 9967, 9970, 9973, 9975, 9977, 9980,
                       9982, 9983, 9985, 9987, 9988, 9990, 9991, 9992,
                       9993, 9994, 9994, 9995, 9996, 9996, 9997, 9997,
                       9997, 9998, 9998])

    assert (sp.y.astype(np.int) == yexp).all()
Beispiel #21
0
def test_pha_set_analysis_rate_invalid():
    """Just check we error out"""

    chans = np.arange(1, 4)
    counts = np.ones_like(chans)
    pha = DataPHA("dummy", chans, counts)

    with pytest.raises(DataErr) as de:
        pha.set_analysis("channel", type=None)

    assert str(de.value) == "unknown plot type 'None', choose 'rate' or 'counts'"
Beispiel #22
0
def test_rsp_no_arf_matrix_call(analysis, phaexp):
    """Check out Response1D with matrix but no ARF

    analysis is the analysis setting
    arfexp determines whether the arf has an exposure time
    phaexp determines whether the PHA has an exposure time
    """

    if phaexp:
        pha_exposure = 220.9
    else:
        pha_exposure = None

    if phaexp:
        exposure = pha_exposure
        mdl_label = '({} * flat)'.format(exposure)
    else:
        exposure = 1.0
        mdl_label = 'flat'

    rdata = create_non_delta_rmf()

    constant = 2.3
    mdl = Const1D('flat')
    mdl.c0 = constant

    # Turn off integration on this model, so that it is not integrated
    # across the bin width.
    #
    mdl.integrate = False

    nchans = rdata.e_min.size
    channels = np.arange(1, nchans + 1, dtype=np.int16)
    counts = np.ones(nchans, dtype=np.int16)
    pha = DataPHA('test-pha', channel=channels, counts=counts,
                  exposure=pha_exposure)

    pha.set_rmf(rdata)

    rsp = Response1D(pha)
    wrapped = rsp(mdl)

    assert isinstance(wrapped, ArithmeticModel)

    expname = 'apply_rmf({})'.format(mdl_label)
    assert wrapped.name == expname

    modvals = exposure * constant * np.ones(rdata.energ_lo.size)
    matrix = get_non_delta_matrix()
    expected = np.matmul(modvals, matrix)

    pha.set_analysis(analysis)
    out = wrapped([4, 5])
    assert_allclose(out, expected)
Beispiel #23
0
def test_error_on_invalid_channel_grouped(chan, exp1, exp2):
    """Does channel access fail when outside the bounds?

    It is not clear what _from_channel is doing here, so
    just check the responses.
    """

    pha = DataPHA('name', [1, 2, 3], [1, 1, 1],
                  grouping=[1, -1, 1])
    assert pha.grouped
    assert pha._from_channel(chan) == exp2
Beispiel #24
0
def make_data(data_class):
    """Create a test data object of the given class.

    Using a string means it is easier to support the various PHA
    "types" - eg basic, grouping, grouping+quality.

    """

    x0 = np.asarray([1, 3, 7, 12])
    y = np.asarray([2, 3, 4, 5])
    if data_class == "1d":
        return Data1D('x1', x0, y)

    if data_class == "1dint":
        return Data1DInt('xint1', x0, np.asarray([3, 5, 8, 15]), y)

    chans = np.arange(1, 5)
    if data_class == "pha":
        return DataPHA('pha', chans, y)

    # We want to provide PHA tests that check out the grouping and
    # quality handling (but it is not worth trying all different
    # variants), so we have "grp" for grouping and no quality [*], and
    # "qual" for grouping and quality.
    #
    # [*] by which I mean we have not called ignore_bad, not that
    # there is no quality array.
    #
    grp = np.asarray([1, -1, 1, 1])
    qual = np.asarray([0, 0, 2, 0])
    pha = DataPHA('pha', chans, y, grouping=grp, quality=qual)
    if data_class == "grp":
        return pha

    if data_class == "qual":
        pha.ignore_bad()
        return pha

    x0 = np.asarray([1, 2, 3] * 2)
    x1 = np.asarray([1, 1, 1, 2, 2, 2])
    y = np.asarray([2, 3, 4, 5, 6, 7])
    if data_class == "2d":
        return Data2D('x2', x0, x1, y, shape=(2, 3))

    if data_class == "2dint":
        return Data2DInt('xint2', x0, x1, x0 + 1, x1 + 1, y, shape=(2, 3))

    if data_class == "img":
        return DataIMG('img', x0, x1, y, shape=(2, 3))

    if data_class == "imgint":
        return DataIMGInt('imgi', x0, x1, x0 + 1, x1 + 1, y, shape=(2, 3))

    assert False
Beispiel #25
0
def test_pha_ignore_bad_no_quality():
    """Just check we error out"""

    chans = np.arange(1, 4)
    counts = np.ones_like(chans)
    pha = DataPHA("dummy", chans, counts)

    with pytest.raises(DataErr) as de:
        pha.ignore_bad()

    assert str(de.value) == "data set 'dummy' does not specify quality flags"
Beispiel #26
0
def test_288_b():
    """The issue from #288 which was failing"""

    channels = np.arange(1, 6)
    counts = np.asarray([5, 5, 10, 10, 2])
    grouping = np.asarray([1, -1, 1, -1, 1], dtype=np.int16)
    pha = DataPHA('x', channels, counts, grouping=grouping)

    assert pha.mask
    pha.ignore(3.1, 4)

    assert pha.mask == pytest.approx([True, False, True])
def test_stats_calc_stat_wstat_diffbins():
    """wstat statistic fails when src/bg bin sizes do not match"""

    statobj = WStat()

    data, model = setup_single_pha(True, False, background=True)

    # Tweak data to have one-less bin than the background. This
    # used to be easy but with data validation we need to
    # create a new object.
    #
    data2 = DataPHA("faked",
                    channel=data.channel[:-1],
                    counts=data.counts[:-1],
                    staterror=data.staterror[:-1],
                    grouping=data.grouping[:-1],
                    exposure=data.exposure,
                    backscal=data.backscal,
                    areascal=data.areascal)

    # We might expect the ARF/RMF calls to fail if we add validation
    # (to check the ARF/RMF is valid for the PHA dataset).
    #
    data2.set_arf(data.get_arf())
    data2.set_rmf(data.get_rmf())
    data2.set_background(data.get_background())

    # There is no Sherpa error for this, which seems surprising
    with pytest.raises(TypeError) as err:
        statobj.calc_stat(data2, model)

    assert str(
        err.value) == "input array sizes do not match, data: 5 vs group: 4"
Beispiel #28
0
def test_288_a_energy():
    """The issue from #288 which was working

    test_288_a but with a response so we test energy filters
    """

    channels = np.arange(1, 6)
    counts = np.asarray([5, 5, 10, 10, 2])
    grouping = np.asarray([1, -1, 1, -1, 1], dtype=np.int16)
    pha = DataPHA('x', channels, counts, grouping=grouping)

    rlo = channels
    rhi = channels + 1
    rmf = create_delta_rmf(rlo, rhi, e_min=rlo, e_max=rhi)
    pha.set_arf(rmf)
    pha.set_analysis('energy')

    assert pha.mask
    pha.ignore(3, 4)

    # I use approx because it gives a nice answer, even though
    # I want equality not approximation in this test. Fortunately
    # with bools the use of approx is okay (it can tell the
    # difference between 0 and 1, aka False and True).
    #
    assert pha.mask == pytest.approx([True, False, True])
Beispiel #29
0
def test_416_a():
    """The first test case from issue #416"""

    # if y is not a numpy array then group_counts errors out
    # with a strange error. Another reason why DataPHA needs
    # to validate input
    #
    x = np.asarray([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
    y = np.asarray([0, 0, 0, 2, 1, 1, 0, 0, 0, 0])

    pha = DataPHA('416', x, y)
    pha.notice(3.5, 6.5)

    mask = [False, False, False, True, True, True, False, False, False, False]
    assert pha.mask == pytest.approx(mask)

    pha.group_counts(3)

    # We have a simplified mask
    mask = [True, True]
    assert pha.mask == pytest.approx(mask)

    # the "full" mask can be retrieved with get_mask
    mask = [True] * 10
    assert pha.get_mask() == pytest.approx(mask)

    grouping = [1, -1, -1, -1, -1,  1, -1, -1, -1, -1.]
    assert pha.grouping == pytest.approx(grouping)

    quality = [0, 0, 0, 0, 0, 2, 2, 2, 2, 2]
    assert pha.quality == pytest.approx(quality)

    dep = pha.get_dep(filter=True)
    assert dep == pytest.approx([3, 1])
def test_rspmodelpha_matrix_call_xspec():
    """Check XSPEC constant is invariant to wavelength/energy setting.

    As XSPEC models internally convert from Angstrom to keV,
    do a simple check here.
    """

    exposure = 200.1
    rdata = create_non_delta_rmf()
    specresp = create_non_delta_specresp()
    adata = create_arf(rdata.energ_lo,
                       rdata.energ_hi,
                       specresp,
                       exposure=exposure)

    constant = 2.3
    mdl = XSconstant('flat')
    mdl.factor = constant

    nchans = rdata.e_min.size
    channels = np.arange(1, nchans + 1, dtype=np.int16)
    counts = np.ones(nchans, dtype=np.int16)
    pha = DataPHA('test-pha',
                  channel=channels,
                  counts=counts,
                  exposure=exposure)

    # The set_arf call isn't necessary, but leave in
    pha.set_arf(adata)
    pha.set_rmf(rdata)

    # The XSPEC models are evaluated on an energy grid, even when
    # the analysis setting is wavelength. Also, unlike the Sherpa
    # Constant model, the XSPEC XSconstant model is defined
    # over the integrated bin, so no correction is needed for the
    # bin width.
    #
    modvals = constant * specresp
    matrix = get_non_delta_matrix()
    expected = np.matmul(modvals, matrix)

    wrapped = RSPModelPHA(adata, rdata, pha, mdl)

    pha.set_analysis('wave')
    out_wl = wrapped([4, 5])
    assert_allclose(out_wl, expected)

    pha.set_analysis('energy')
    out_en = wrapped([4, 5])
    assert_allclose(out_en, expected)
def test_arfmodelpha_call(ignore):
    """What happens calling an arf with a pha?

    The ignore value indicates what channel to ignore (0 means
    nothing is ignored). The aim is to check edge effects,
    and as there are only a few channels, it was decided to
    test all channels.
    """

    # Note: the exposure is set in the PHA and ARF, but should not be
    #       used when evaluating the model; it's value has been
    #       set to a value that the test will fail it it is.
    #
    exposure = 200.1
    estep = 0.01
    egrid = np.arange(0.01, 0.06, estep)
    svals = [1.1, 1.2, 1.3, 1.4]
    specresp = np.asarray(svals)
    adata = create_arf(egrid[:-1], egrid[1:], specresp, exposure=exposure)

    constant = 2.3
    mdl = Const1D('flat')
    mdl.c0 = constant

    channels = np.arange(1, 5, dtype=np.int16)
    counts = np.asarray([10, 5, 12, 7], dtype=np.int16)
    pha = DataPHA('test-pha',
                  channel=channels,
                  counts=counts,
                  exposure=exposure)
    pha.set_arf(adata)

    # force energy units (only needed if ignore is set)
    pha.set_analysis('energy')

    if ignore is not None:
        de = estep * 0.9
        e0 = egrid[ignore]
        pha.notice(lo=e0, hi=e0 + de, ignore=True)

        # The assert are intended to help people reading this
        # code rather than being a useful check that the code
        # is working.
        mask = [True, True, True, True]
        mask[ignore] = False
        assert (pha.mask == mask).all()

    wrapped = ARFModelPHA(adata, pha, mdl)

    # The model is evaluated on the ARF grid, not whatever
    # is sent in. It is also integrated across the bins,
    # which is why there is a multiplication by the
    # grid width (for this constant model).
    #
    # Note that the filter doesn't change the grid.
    #
    de = egrid[1:] - egrid[:-1]
    expected = constant * np.asarray(svals) * de
    out = wrapped([4, 5])
    assert_allclose(out, expected)
Beispiel #32
0
def test_xspecvar_zero_handling(bexp, yexp, dyexp):
    """How does XSPEC variance handle 0 in source and/or background?

    The values were calculated using XSPEC 12.10.1m (HEASOFT 6.26.1)
    using the following commands to create the file foo.dat which
    contains (after three 'header' lines) the data 'x 0.5 y dy'

        data foo.fits
	iplot data
	wplot foo.dat
	quit

    where foo.fits is a fake PHA file set up to have the channel/count
    values used below (a CSC-style PHA file was used so that source
    and background were in the same file but a separate bgnd PHA
    file could also have been used).
    """

    stat = Chi2XspecVar()
    chans = numpy.arange(1, 10, dtype=numpy.int16)
    scnts = numpy.asarray([0, 0, 0, 1, 3, 1, 1, 3, 3], dtype=numpy.int16)
    bcnts = numpy.asarray([0, 1, 3, 0, 0, 1, 3, 1, 3], dtype=numpy.int16)

    s = DataPHA('src', chans, scnts, exposure=1)
    b = DataPHA('bkg', chans, bcnts, exposure=bexp)
    s.set_background(b)
    s.subtract()

    y, dy, other = s.to_fit(staterrfunc=stat.calc_staterror)
    assert other is None
    assert y == pytest.approx(yexp)
    assert dy == pytest.approx(dyexp)
def test_rmfmodelpha_delta_no_ebounds(analysis, caplog):
    """What happens calling an rmf with a pha and no EBOUNDS is set

    Ensure we can't filter on energy or wavelength since there's no
    EBOUNDS information. This behavior was seen when writing
    test_rmfmodelpha_call, so a test was written for it.

    The code used to raise a DataErr but now just displays a
    logged warning.
    """

    estep = 0.01
    egrid = np.arange(0.01, 0.06, estep)
    rdata = create_delta_rmf(egrid[:-1], egrid[1:])

    channels = np.arange(1, 5, dtype=np.int16)
    counts = np.asarray([10, 5, 12, 7], dtype=np.int16)
    pha = DataPHA('test-pha', channel=channels, counts=counts)
    pha.set_rmf(rdata)

    pha.set_analysis(analysis)
    with caplog.at_level(logging.INFO, logger='sherpa'):
        pha.notice(0.025, 0.045, ignore=False)

    assert len(caplog.records) == 1
    log_name, log_level, message = caplog.record_tuples[0]
    assert log_name == 'sherpa.astro.data'
    assert log_level == logging.INFO
    assert message == 'Skipping dataset test-pha: RMF does not specify energy bins'
Beispiel #34
0
def test_pha_no_bkg(subtract):
    """Just check we error out

    Given the way the code works, it errors out both ways.
    """

    chans = np.arange(1, 4)
    counts = np.ones_like(chans)
    pha = DataPHA("dummy", chans, counts)

    with pytest.raises(DataErr) as de:
        pha.subtracted = subtract

    assert str(de.value) == "data set 'dummy' does not have any associated backgrounds"
Beispiel #35
0
 def setUp(self):
     self.old_level = logger.getEffectiveLevel()
     logger.setLevel(logging.ERROR)
     self.pha = DataPHA('', np.arange(16384, dtype=float) + 1,
                        np.zeros(16384),
                        bin_lo=self._emin,
                        bin_hi=self._emax)
Beispiel #36
0
 def setUp(self):
     self.old_level = logger.getEffectiveLevel()
     logger.setLevel(logging.ERROR)
     self.pha = DataPHA('', numpy.arange(46, dtype=float)+1.,
                        numpy.zeros(46),
                        bin_lo = self._emin, bin_hi = self._emax )
     self.pha.units="energy"
Beispiel #37
0
def test_arfmodelpha_call(ignore):
    """What happens calling an arf with a pha?

    The ignore value indicates what channel to ignore (0 means
    nothing is ignored). The aim is to check edge effects,
    and as there are only a few channels, it was decided to
    test all channels.
    """

    # Note: the exposure is set in the PHA and ARF, but should not be
    #       used when evaluating the model; it's value has been
    #       set to a value that the test will fail it it is.
    #
    exposure = 200.1
    estep = 0.01
    egrid = np.arange(0.01, 0.06, estep)
    svals = [1.1, 1.2, 1.3, 1.4]
    specresp = np.asarray(svals)
    adata = create_arf(egrid[:-1], egrid[1:], specresp,
                       exposure=exposure)

    constant = 2.3
    mdl = Const1D('flat')
    mdl.c0 = constant

    channels = np.arange(1, 5, dtype=np.int16)
    counts = np.asarray([10, 5, 12, 7], dtype=np.int16)
    pha = DataPHA('test-pha', channel=channels, counts=counts,
                  exposure=exposure)
    pha.set_arf(adata)

    # force energy units (only needed if ignore is set)
    pha.set_analysis('energy')

    if ignore is not None:
        de = estep * 0.9
        e0 = egrid[ignore]
        pha.notice(lo=e0, hi=e0 + de, ignore=True)

        # The assert are intended to help people reading this
        # code rather than being a useful check that the code
        # is working.
        mask = [True, True, True, True]
        mask[ignore] = False
        assert (pha.mask == mask).all()

    wrapped = ARFModelPHA(adata, pha, mdl)

    # The model is evaluated on the ARF grid, not whatever
    # is sent in. It is also integrated across the bins,
    # which is why there is a multiplication by the
    # grid width (for this constant model).
    #
    # Note that the filter doesn't change the grid.
    #
    de = egrid[1:] - egrid[:-1]
    expected = constant * np.asarray(svals) * de
    out = wrapped([4, 5])
    assert_allclose(out, expected)
Beispiel #38
0
def test_rspmodelpha_delta_call_wave():
    """What happens calling a rsp with a pha (RMF is a delta fn)? Wavelength.

    Unlike the energy case no bins are ignored, as this code path
    has already been tested.
    """

    exposure = 200.1
    estep = 0.025
    egrid = np.arange(0.1, 0.8, estep)
    elo = egrid[:-1]
    ehi = egrid[1:]
    specresp = 2.4 * np.ones(elo.size, dtype=np.float32)
    specresp[2:5] = 0.0
    specresp[16:19] = 3.2
    adata = create_arf(elo, ehi, specresp, exposure=exposure)
    rdata = create_delta_rmf(elo, ehi, e_min=elo, e_max=ehi)
    nchans = elo.size

    constant = 2.3
    mdl = Const1D('flat')
    mdl.c0 = constant

    channels = np.arange(1, nchans + 1, dtype=np.int16)
    counts = np.ones(nchans, dtype=np.int16)
    pha = DataPHA('test-pha', channel=channels, counts=counts,
                  exposure=exposure)
    pha.set_rmf(rdata)

    pha.set_analysis('wave')

    wrapped = RSPModelPHA(adata, rdata, pha, mdl)

    # Note that this is a Sherpa model, so it's normalization is
    # per unit x axis, so when integrated here the bins are in
    # Angstroms, so the bin width to multiply by is
    # Angstroms, not keV.
    #
    dl = (DataPHA._hc / elo) - (DataPHA._hc / ehi)
    expected = constant * specresp * dl

    out = wrapped([4, 5])
    assert_allclose(out, expected)
Beispiel #39
0
def test_rspmodelpha_matrix_call_xspec():
    """Check XSPEC constant is invariant to wavelength/energy setting.

    As XSPEC models internally convert from Angstrom to keV,
    do a simple check here.
    """

    exposure = 200.1
    rdata = create_non_delta_rmf()
    specresp = create_non_delta_specresp()
    adata = create_arf(rdata.energ_lo,
                       rdata.energ_hi,
                       specresp,
                       exposure=exposure)

    constant = 2.3
    mdl = XSconstant('flat')
    mdl.factor = constant

    nchans = rdata.e_min.size
    channels = np.arange(1, nchans + 1, dtype=np.int16)
    counts = np.ones(nchans, dtype=np.int16)
    pha = DataPHA('test-pha', channel=channels, counts=counts,
                  exposure=exposure)

    # The set_arf call isn't necessary, but leave in
    pha.set_arf(adata)
    pha.set_rmf(rdata)

    # The XSPEC models are evaluated on an energy grid, even when
    # the analysis setting is wavelength. Also, unlike the Sherpa
    # Constant model, the XSPEC XSconstant model is defined
    # over the integrated bin, so no correction is needed for the
    # bin width.
    #
    modvals = constant * specresp
    matrix = get_non_delta_matrix()
    expected = np.matmul(modvals, matrix)

    wrapped = RSPModelPHA(adata, rdata, pha, mdl)

    pha.set_analysis('wave')
    out_wl = wrapped([4, 5])
    assert_allclose(out_wl, expected)

    pha.set_analysis('energy')
    out_en = wrapped([4, 5])
    assert_allclose(out_en, expected)
Beispiel #40
0
def test_arf1d_pha_zero_energy_bin():
    "What happens when the first bin starts at 0, with replacement"

    ethresh = 1.0e-10

    # Note: the two exposures are different to check which is
    #       used (the answer is neither, which seems surprising)
    #
    exposure1 = 0.1
    egrid = np.asarray([0.0, 0.1, 0.2, 0.4, 0.5, 0.7, 0.8])
    elo = egrid[:-1]
    ehi = egrid[1:]
    specresp = np.asarray([10.2, 9.8, 10.0, 12.0, 8.0, 10.0])

    with warnings.catch_warnings(record=True) as ws:
        warnings.simplefilter("always")
        adata = create_arf(elo, ehi, specresp, exposure=exposure1,
                           ethresh=ethresh)

    validate_zero_replacement(ws, 'ARF', 'user-arf', ethresh)

    arf = ARF1D(adata)

    exposure2 = 2.4
    channels = np.arange(1, 7, dtype=np.int16)
    counts = np.ones(6, dtype=np.int16)
    pha = DataPHA('test-pha', channel=channels, counts=counts,
                  exposure=exposure2)
    pha.set_arf(adata)

    pha.set_analysis('energy')

    mdl = MyPowLaw1D()
    tmdl = PowLaw1D()

    wrapped = ARFModelPHA(arf, pha, mdl)

    out = wrapped([0.1, 0.2])
    elo[0] = ethresh
    expected = specresp * tmdl(elo, ehi)

    assert_allclose(out, expected)
    assert not np.isnan(out[0])
Beispiel #41
0
def test_rspmodelpha_delta_call_channel():
    """What happens calling a rsp with a pha (RMF is a delta fn)? Channels.

    I am not convinced I understand the bin width calculation here,
    as it doesn't seem to match the wavelength case.
    """

    exposure = 200.1
    estep = 0.025
    egrid = np.arange(0.1, 0.8, estep)
    elo = egrid[:-1]
    ehi = egrid[1:]
    specresp = 2.4 * np.ones(elo.size, dtype=np.float32)
    specresp[2:5] = 0.0
    specresp[16:19] = 3.2
    adata = create_arf(elo, ehi, specresp, exposure=exposure)
    rdata = create_delta_rmf(elo, ehi, e_min=elo, e_max=ehi)
    nchans = elo.size

    constant = 2.3
    mdl = Const1D('flat')
    mdl.c0 = constant

    channels = np.arange(1, nchans + 1, dtype=np.int16)
    counts = np.ones(nchans, dtype=np.int16)
    pha = DataPHA('test-pha', channel=channels, counts=counts,
                  exposure=exposure)
    pha.set_rmf(rdata)

    pha.set_analysis('channel')

    wrapped = RSPModelPHA(adata, rdata, pha, mdl)

    # Since this is channels you might expect the bin width to be 1,
    # but it is actually still dE.
    #
    de = ehi - elo
    expected = constant * specresp * de

    out = wrapped([4, 5])
    assert_allclose(out, expected)
Beispiel #42
0
def test_rspmodelpha_delta_call(ignore):
    """What happens calling a rsp with a pha (RMF is a delta fn)?

    The ignore value gives the channel to ignore (counting from 0).
    """

    exposure = 200.1
    estep = 0.025
    egrid = np.arange(0.1, 0.8, estep)
    elo = egrid[:-1]
    ehi = egrid[1:]
    specresp = 2.4 * np.ones(elo.size, dtype=np.float32)
    specresp[2:5] = 0.0
    specresp[16:19] = 3.2
    adata = create_arf(elo, ehi, specresp, exposure=exposure)
    rdata = create_delta_rmf(elo, ehi, e_min=elo, e_max=ehi)
    nchans = elo.size

    constant = 2.3
    mdl = Const1D('flat')
    mdl.c0 = constant

    channels = np.arange(1, nchans + 1, dtype=np.int16)
    counts = np.ones(nchans, dtype=np.int16)
    pha = DataPHA('test-pha', channel=channels, counts=counts,
                  exposure=exposure)
    pha.set_rmf(rdata)

    # force energy units (only needed if ignore is set)
    pha.set_analysis('energy')

    if ignore is not None:
        de = estep * 0.9
        e0 = egrid[ignore]
        pha.notice(lo=e0, hi=e0 + de, ignore=True)

        # The assert are intended to help people reading this
        # code rather than being a useful check that the code
        # is working.
        mask = [True] * nchans
        mask[ignore] = False
        assert (pha.mask == mask).all()

    wrapped = RSPModelPHA(adata, rdata, pha, mdl)

    # The model is evaluated on the RMF grid, not whatever
    # is sent in. It is also integrated across the bins,
    # which is why there is a multiplication by the
    # grid width (for this constant model).
    #
    # Note that the filter doesn't change the grid.
    #
    de = egrid[1:] - egrid[:-1]
    expected = constant * specresp * de
    out = wrapped([4, 5])
    assert_allclose(out, expected)
Beispiel #43
0
def test_rmfmodelpha_matrix_mismatch(analysis):
    """Check that an error is raised if there's a mismatch.

    """

    exposure = 200.1
    rdata = create_non_delta_rmf()

    # nchans should be rdata.e_min.size for the sizes to match
    nchans = rdata.energ_lo.size
    channels = np.arange(1, nchans + 1, dtype=np.int16)
    counts = np.ones(nchans, dtype=np.int16)
    pha = DataPHA('test-pha', channel=channels, counts=counts,
                  exposure=exposure)
    pha.set_rmf(rdata)

    with pytest.raises(DataErr) as exc:
        pha.set_analysis(analysis)

    emsg = "RMF 'non-delta-rmf' is incompatible with PHA dataset 'test-pha'"
    assert str(exc.value) == emsg
Beispiel #44
0
def test_rspmodelpha_matrix_call(ignore):
    """What happens calling a rsp with a pha (RMF is a matrix)?

    The ignore value gives the channel to ignore (counting from 0).
    """

    exposure = 200.1
    rdata = create_non_delta_rmf()
    specresp = create_non_delta_specresp()
    elo = rdata.energ_lo
    ehi = rdata.energ_hi

    adata = create_arf(elo, ehi, specresp, exposure=exposure)
    nchans = rdata.e_min.size

    constant = 22.3
    slope = -1.2
    mdl = Polynom1D('sloped')
    mdl.c0 = constant
    mdl.c1 = slope

    channels = np.arange(1, nchans + 1, dtype=np.int16)
    counts = np.ones(nchans, dtype=np.int16)
    pha = DataPHA('test-pha', channel=channels, counts=counts,
                  exposure=exposure)
    pha.set_rmf(rdata)

    # force energy units (only needed if ignore is set)
    pha.set_analysis('energy')

    if ignore is not None:
        e0 = rdata.e_min[ignore]
        e1 = rdata.e_max[ignore]
        de = 0.9 * (e1 - e0)
        pha.notice(lo=e0, hi=e0 + de, ignore=True)

        # The assert are intended to help people reading this
        # code rather than being a useful check that the code
        # is working.
        mask = [True] * nchans
        mask[ignore] = False
        assert (pha.mask == mask).all()

    wrapped = RSPModelPHA(adata, rdata, pha, mdl)

    # The filter does not change the grid
    modvals = specresp * mdl(rdata.energ_lo, rdata.energ_hi)
    matrix = get_non_delta_matrix()
    expected = np.matmul(modvals, matrix)

    out = wrapped([4, 5])
    assert_allclose(out, expected)
Beispiel #45
0
def test_rsp1d_matrix_pha_zero_energy_bin():
    """What happens when the first bin starts at 0, with replacement.

    Unlike test_rsp1d_delta_pha_zero_energy_bin this directly
    calls Response1D to create the model.
    """

    ethresh = 1.0e-5

    rdata = create_non_delta_rmf()

    # hack the first bin to have 0 energy
    rdata.energ_lo[0] = 0.0

    # PHA and ARF have different exposure ties
    exposure_arf = 0.1
    exposure_pha = 2.4

    specresp = create_non_delta_specresp()

    with warnings.catch_warnings(record=True) as ws:
        warnings.simplefilter("always")
        adata = create_arf(rdata.energ_lo,
                           rdata.energ_hi,
                           specresp,
                           exposure=exposure_arf,
                           ethresh=ethresh)

    validate_zero_replacement(ws, 'ARF', 'user-arf', ethresh)

    nchans = rdata.e_min.size
    channels = np.arange(1, nchans + 1, dtype=np.int16)
    counts = np.ones(nchans, dtype=np.int16)
    pha = DataPHA('test-pha', channel=channels, counts=counts,
                  exposure=exposure_pha)
    pha.set_rmf(rdata)
    pha.set_arf(adata)

    pha.set_analysis('energy')

    mdl = MyPowLaw1D()

    rsp = Response1D(pha)
    wrapped = rsp(mdl)

    # Evaluate the statistic / model. The value was calculated using
    # commit a65fb94004664eab219cc09652172ffe1dad80a6 on a linux
    # system (Ubuntu 17.04).
    #
    f = Fit(pha, wrapped)
    ans = f.calc_stat()
    assert ans == pytest.approx(37971.8716151947)
Beispiel #46
0
def test_rmfmodelpha_matrix_call(ignore):
    """What happens calling an rmf (matrix) with a pha?

    The ignore value gives the channel to ignore (counting from 0).
    """

    exposure = 200.1
    rdata = create_non_delta_rmf()
    elo = rdata.e_min
    ehi = rdata.e_max
    nchans = elo.size

    constant = 12.2
    slope = 0.01
    mdl = Polynom1D('not-flat')
    mdl.c0 = constant
    mdl.c1 = slope

    channels = np.arange(1, nchans + 1, dtype=np.int16)
    counts = np.ones(nchans, dtype=np.int16)
    pha = DataPHA('test-pha', channel=channels, counts=counts,
                  exposure=exposure)
    pha.set_rmf(rdata)

    # force energy units (only needed if ignore is set)
    pha.set_analysis('energy')

    if ignore is not None:
        e0 = elo[ignore]
        e1 = ehi[ignore]
        de = 0.9 * (e1 - e0)
        pha.notice(lo=e0, hi=e0 + de, ignore=True)

        # The assert are intended to help people reading this
        # code rather than being a useful check that the code
        # is working.
        mask = [True] * nchans
        mask[ignore] = False
        assert (pha.mask == mask).all()

    wrapped = RMFModelPHA(rdata, pha, mdl)

    # Note that the evaluation ignores any filter we've applied.
    # and the exposure time is not used.
    #
    modvals = mdl(rdata.energ_lo, rdata.energ_hi)
    matrix = get_non_delta_matrix()
    expected = np.matmul(modvals, matrix)

    out = wrapped([4, 5])
    assert_allclose(out, expected)
Beispiel #47
0
def get_data(mu_sig, n_on, n_off, alpha):
    from sherpa.astro.data import DataPHA
    from sherpa.models import Const1D

    model = Const1D()
    model.c0 = mu_sig
    data = DataPHA(
        counts=np.atleast_1d(n_on),
        name="dummy",
        channel=np.atleast_1d(1),
        backscal=1,
        exposure=1,
    )
    background = DataPHA(
        counts=np.atleast_1d(n_off),
        name="dummy background",
        channel=np.atleast_1d(1),
        backscal=np.atleast_1d(1. / alpha),
        exposure=1,
    )

    data.set_background(background, 1)

    return model, data
Beispiel #48
0
def test_rmf1d_delta_pha_zero_energy_bin():
    "What happens when the first bin starts at 0, with replacement"

    ethresh = 2e-7

    egrid = np.asarray([0.0, 0.1, 0.2, 0.4, 0.5, 0.7, 0.8])
    elo = egrid[:-1]
    ehi = egrid[1:]

    with warnings.catch_warnings(record=True) as ws:
        warnings.simplefilter("always")
        rdata = create_delta_rmf(elo, ehi, ethresh=ethresh)

    validate_zero_replacement(ws, 'RMF', 'delta-rmf', ethresh)

    exposure = 2.4
    channels = np.arange(1, 7, dtype=np.int16)
    counts = np.ones(6, dtype=np.int16)
    pha = DataPHA('test-pha', channel=channels, counts=counts,
                  exposure=exposure)
    pha.set_rmf(rdata)

    pha.set_analysis('energy')

    mdl = MyPowLaw1D()
    tmdl = PowLaw1D()

    wrapped = RMFModelPHA(rdata, pha, mdl)

    out = wrapped([0.1, 0.2])

    elo[0] = ethresh
    expected = tmdl(elo, ehi)

    assert_allclose(out, expected)
    assert not np.isnan(out[0])
Beispiel #49
0
def test_rsp1d_delta_pha_zero_energy_bin():
    "What happens when the first bin starts at 0, with replacement"

    ethresh = 2.0e-7

    # PHA and ARF have different exposure ties
    exposure1 = 0.1
    exposure2 = 2.4
    egrid = np.asarray([0.0, 0.1, 0.2, 0.4, 0.5, 0.7, 0.8])
    elo = egrid[:-1]
    ehi = egrid[1:]
    specresp = np.asarray([10.2, 9.8, 10.0, 12.0, 8.0, 10.0])

    with warnings.catch_warnings(record=True) as ws:
        warnings.simplefilter("always")
        adata = create_arf(elo, ehi, specresp, exposure=exposure1,
                           ethresh=ethresh)

    validate_zero_replacement(ws, 'ARF', 'user-arf', ethresh)

    with warnings.catch_warnings(record=True) as ws:
        warnings.simplefilter("always")
        rdata = create_delta_rmf(elo, ehi, ethresh=ethresh)

    validate_zero_replacement(ws, 'RMF', 'delta-rmf', ethresh)

    channels = np.arange(1, 7, dtype=np.int16)
    counts = np.ones(6, dtype=np.int16)
    pha = DataPHA('test-pha', channel=channels, counts=counts,
                  exposure=exposure2)
    pha.set_rmf(rdata)
    pha.set_arf(adata)

    pha.set_analysis('energy')

    mdl = MyPowLaw1D()
    tmdl = PowLaw1D()

    wrapped = RSPModelPHA(adata, rdata, pha, mdl)

    out = wrapped([0.1, 0.2])

    elo[0] = ethresh
    expected = specresp * tmdl(elo, ehi)

    assert_allclose(out, expected)
    assert not np.isnan(out[0])
Beispiel #50
0
def test_rmfmodelpha_delta_no_ebounds(analysis):
    """What happens calling an rmf with a pha and no EBOUNDS is set

    Ensure we can't filter on energy or wavelength since there's no
    EBOUNDS information. This behavior was seen when writing
    test_rmfmodelpha_call, so a test was written for it.
    """

    estep = 0.01
    egrid = np.arange(0.01, 0.06, estep)
    rdata = create_delta_rmf(egrid[:-1], egrid[1:])

    channels = np.arange(1, 5, dtype=np.int16)
    counts = np.asarray([10, 5, 12, 7], dtype=np.int16)
    pha = DataPHA('test-pha', channel=channels, counts=counts)
    pha.set_rmf(rdata)

    pha.set_analysis(analysis)
    with pytest.raises(DataErr) as exc:
        pha.notice(0.025, 0.045, ignore=False)

    assert str(exc.value) == 'RMF does not specify energy bins'
Beispiel #51
0
def read_pha(arg, use_errors=False, use_background=False):
    """Create a DataPHA object.

    Parameters
    ----------
    arg
        The name of the file or a representation of the file
        (the type depends on the I/O backend) containing the
        PHA data.
    use_errors : bool, optional
        If the PHA file contains statistical error values for the
        count (or count rate) column, should it be read in. This
        defaults to ``False``.
    use_background : bool, optional
        Should the background PHA data (and optional responses) also
        be read in and associated with the data set?

    Returns
    -------
    data : sherpa.astro.data.DataPHA

    """
    datasets, filename = backend.get_pha_data(arg,
                                              use_background=use_background)
    phasets = []
    output_once = True
    for data in datasets:
        if not use_errors:
            if data['staterror'] is not None or data['syserror'] is not None:
                if data['staterror'] is None:
                    msg = 'systematic'
                elif data['syserror'] is None:
                    msg = 'statistical'
                    if output_once:
                        wmsg = "systematic errors were not found in " + \
                               "file '{}'".format(filename)
                        warning(wmsg)
                else:
                    msg = 'statistical and systematic'
                if output_once:
                    imsg = msg + " errors were found in file " + \
                           "'{}' \nbut not used; ".format(filename) + \
                           "to use them, re-read with use_errors=True"
                    info(imsg)
                data['staterror'] = None
                data['syserror'] = None

        dname = os.path.dirname(filename)
        albl = 'ARF'
        rlbl = 'RMF'
        if use_background:
            albl = albl + ' (background)'
            rlbl = rlbl + ' (background)'

        arf = _read_ancillary(data, 'arffile', albl, dname, read_arf,
                              output_once)
        rmf = _read_ancillary(data, 'rmffile', rlbl, dname, read_rmf,
                              output_once)

        backgrounds = []

        if data['backfile'] and data['backfile'].lower() != 'none':
            try:
                if os.path.dirname(data['backfile']) == '':
                    data['backfile'] = os.path.join(os.path.dirname(filename),
                                                    data['backfile'])

                bkg_datasets = []
                # Do not read backgrounds of backgrounds
                if not use_background:
                    bkg_datasets = read_pha(data['backfile'], use_errors, True)

                    if output_once:
                        info('read background file {}'.format(
                            data['backfile']))

                if numpy.iterable(bkg_datasets):
                    for bkg_dataset in bkg_datasets:
                        if bkg_dataset.get_response() == (None, None) and \
                           rmf is not None:
                            bkg_dataset.set_response(arf, rmf)
                        backgrounds.append(bkg_dataset)
                else:
                    if bkg_datasets.get_response() == (None, None) and \
                       rmf is not None:
                        bkg_datasets.set_response(arf, rmf)
                    backgrounds.append(bkg_datasets)

            except:
                if output_once:
                    warning(str(sys.exc_info()[1]))

        for bkg_type, bscal_type in izip(('background_up', 'background_down'),
                                         ('backscup', 'backscdn')):
            if data[bkg_type] is not None:
                b = DataPHA(filename,
                            channel=data['channel'],
                            counts=data[bkg_type],
                            bin_lo=data['bin_lo'],
                            bin_hi=data['bin_hi'],
                            grouping=data['grouping'],
                            quality=data['quality'],
                            exposure=data['exposure'],
                            backscal=data[bscal_type],
                            header=data['header'])
                b.set_response(arf, rmf)
                if output_once:
                    info("read {} into a dataset from file {}".format(
                        bkg_type, filename))
                backgrounds.append(b)

        for k in ['backfile', 'arffile', 'rmffile', 'backscup', 'backscdn',
                  'background_up', 'background_down']:
            data.pop(k, None)

        pha = DataPHA(filename, **data)
        pha.set_response(arf, rmf)
        for id, b in enumerate(backgrounds):
            if b.grouping is None:
                b.grouping = pha.grouping
                b.grouped = (b.grouping is not None)
            if b.quality is None:
                b.quality = pha.quality
            pha.set_background(b, id + 1)

        # set units *after* bkgs have been set
        pha._set_initial_quantity()
        phasets.append(pha)
        output_once = False

    if len(phasets) == 1:
        phasets = phasets[0]

    return phasets
Beispiel #52
0
class test_filter_energy_grid_reversed(SherpaTestCase):

    _notice = numpy.zeros(204, dtype=bool)
    _notice[0:42]=True

    _ignore = numpy.ones(204, dtype=bool)
    _ignore[66:70]=False
    _ignore[0:17]=False

    _emin = numpy.array([
        2.39196181,  2.35973215,  2.34076023,  2.30973101,  2.2884388 ,
        2.25861454,  2.22371697,  2.20662117,  2.18140674,  2.14317489,
        2.12185216,  2.09055495,  2.06256914,  2.04509854,  2.02788448,
        2.00133967,  1.97772908,  1.96379483,  1.93868744,  1.91855776,
        1.89444292,  1.87936974,  1.85819471,  1.84568763,  1.82923627,
        1.78920078,  1.77360916,  1.76206875,  1.74499893,  1.73006463,
        1.70084822,  1.6883322 ,  1.67772949,  1.65171933,  1.63476169,
        1.59687376,  1.5745424 ,  1.55736887,  1.54051399,  1.52546024,
        1.50043869,  1.48890531,  1.47329199,  1.46072423,  1.44289041,
        1.43344045,  1.41616774,  1.40441585,  1.3979584 ,  1.38773119,
        1.37138033,  1.35170007,  1.33725214,  1.33249414,  1.31839108,
        1.30797839,  1.29657102,  1.28310275,  1.26550889,  1.25471842,
        1.24513853,  1.23672664,  1.22944438,  1.21509433,  1.21003771,
        1.20401597,  1.19705439,  1.18722582,  0.90194935,  0.89519638,
        0.88912934,  0.88492262,  0.87837797,  0.87366825,  0.8689999 ,
        0.86437255,  0.85693878,  0.84793305,  0.84404182,  0.83580172,
        0.82876647,  0.82395256,  0.81865752,  0.81185687,  0.80004948,
        0.79450154,  0.78852075,  0.77920061,  0.77340651,  0.76626247,
        0.76202762,  0.75783074,  0.75413191,  0.74727529,  0.74321008,
        0.73474538,  0.73166627,  0.72687   ,  0.71785438,  0.71488959,
        0.71068853,  0.70199603,  0.69832331,  0.69387686,  0.68788701,
        0.68354762,  0.67847627,  0.67117327,  0.66512167,  0.66175646,
        0.65620857,  0.6518243 ,  0.64605182,  0.64142239,  0.63754696,
        0.63128632,  0.62478495,  0.62006336,  0.61440694,  0.60915887,
        0.60591549,  0.60078359,  0.5938406 ,  0.59103745,  0.58488411,
        0.58124125,  0.57883304,  0.57406437,  0.57023615,  0.56442606,
        0.56041539,  0.55701393,  0.55392498,  0.55030966,  0.54346251,
        0.53728294,  0.53515989,  0.5291304 ,  0.52448714,  0.51990861,
        0.51589233,  0.50996011,  0.50509953,  0.49889025,  0.49512967,
        0.49003205,  0.48888513,  0.48524383,  0.48164544,  0.47720695,
        0.47283325,  0.46916556,  0.46660379,  0.46280268,  0.45925769,
        0.45514211,  0.45290345,  0.44987884,  0.44589564,  0.44333643,
        0.44099477,  0.43790293,  0.43446559,  0.43088335,  0.42605683,
        0.42131537,  0.41826019,  0.41506338,  0.41155648,  0.40895697,
        0.40502119,  0.40400422,  0.40164718,  0.39864835,  0.39584854,
        0.39389083,  0.39130434,  0.38890362,  0.38526753,  0.38292497,
        0.38075879,  0.37891743,  0.37648395,  0.37557775,  0.37347662,
        0.37154216,  0.36742872,  0.3641032 ,  0.36167556,  0.35983625,
        0.35634032,  0.35248783,  0.35085678,  0.34843227,  0.34669766,
        0.34418666,  0.33912122,  0.33720407,  0.33505177,  0.33279634,
        0.33081138,  0.32847831,  0.32592943,  0.3111549 ], numpy.float)

    _emax = numpy.array([
        3.06803656,  2.39196181,  2.35973215,  2.34076023,  2.30973101,
        2.2884388 ,  2.25861454,  2.22371697,  2.20662117,  2.18140674,
        2.14317489,  2.12185216,  2.09055495,  2.06256914,  2.04509854,
        2.02788448,  2.00133967,  1.97772908,  1.96379483,  1.93868744,
        1.91855776,  1.89444292,  1.87936974,  1.85819471,  1.84568763,
        1.82923627,  1.78920078,  1.77360916,  1.76206875,  1.74499893,
        1.73006463,  1.70084822,  1.6883322 ,  1.67772949,  1.65171933,
        1.63476169,  1.59687376,  1.5745424 ,  1.55736887,  1.54051399,
        1.52546024,  1.50043869,  1.48890531,  1.47329199,  1.46072423,
        1.44289041,  1.43344045,  1.41616774,  1.40441585,  1.3979584 ,
        1.38773119,  1.37138033,  1.35170007,  1.33725214,  1.33249414,
        1.31839108,  1.30797839,  1.29657102,  1.28310275,  1.26550889,
        1.25471842,  1.24513853,  1.23672664,  1.22944438,  1.21509433,
        1.21003771,  1.20401597,  1.19705439,  1.18722582,  0.90194935,
        0.89519638,  0.88912934,  0.88492262,  0.87837797,  0.87366825,
        0.8689999 ,  0.86437255,  0.85693878,  0.84793305,  0.84404182,
        0.83580172,  0.82876647,  0.82395256,  0.81865752,  0.81185687,
        0.80004948,  0.79450154,  0.78852075,  0.77920061,  0.77340651,
        0.76626247,  0.76202762,  0.75783074,  0.75413191,  0.74727529,
        0.74321008,  0.73474538,  0.73166627,  0.72687   ,  0.71785438,
        0.71488959,  0.71068853,  0.70199603,  0.69832331,  0.69387686,
        0.68788701,  0.68354762,  0.67847627,  0.67117327,  0.66512167,
        0.66175646,  0.65620857,  0.6518243 ,  0.64605182,  0.64142239,
        0.63754696,  0.63128632,  0.62478495,  0.62006336,  0.61440694,
        0.60915887,  0.60591549,  0.60078359,  0.5938406 ,  0.59103745,
        0.58488411,  0.58124125,  0.57883304,  0.57406437,  0.57023615,
        0.56442606,  0.56041539,  0.55701393,  0.55392498,  0.55030966,
        0.54346251,  0.53728294,  0.53515989,  0.5291304 ,  0.52448714,
        0.51990861,  0.51589233,  0.50996011,  0.50509953,  0.49889025,
        0.49512967,  0.49003205,  0.48888513,  0.48524383,  0.48164544,
        0.47720695,  0.47283325,  0.46916556,  0.46660379,  0.46280268,
        0.45925769,  0.45514211,  0.45290345,  0.44987884,  0.44589564,
        0.44333643,  0.44099477,  0.43790293,  0.43446559,  0.43088335,
        0.42605683,  0.42131537,  0.41826019,  0.41506338,  0.41155648,
        0.40895697,  0.40502119,  0.40400422,  0.40164718,  0.39864835,
        0.39584854,  0.39389083,  0.39130434,  0.38890362,  0.38526753,
        0.38292497,  0.38075879,  0.37891743,  0.37648395,  0.37557775,
        0.37347662,  0.37154216,  0.36742872,  0.3641032 ,  0.36167556,
        0.35983625,  0.35634032,  0.35248783,  0.35085678,  0.34843227,
        0.34669766,  0.34418666,  0.33912122,  0.33720407,  0.33505177,
        0.33279634,  0.33081138,  0.32847831,  0.32592943], numpy.float)


    def setUp(self):
        #self.old_level = logger.getEffectiveLevel()
        #logger.setLevel(logging.ERROR)
        self.pha = DataPHA('', numpy.arange(204, dtype=float)+1.,
                           numpy.zeros(204),
                           bin_lo = self._emin, bin_hi = self._emax )
        self.pha.units="energy"

    def tearDown(self):
        #logger.setLevel(self.old_level)
        pass

    def test_notice(self):
        #clear mask
        self.pha.notice()
        self.pha.notice(4., 8.3)
        assert (self._notice==numpy.asarray(self.pha.mask)).all()


    def test_ignore(self):
        #clear mask
        self.pha.notice()
        self.pha.ignore(10.3, 13.8)
        self.pha.ignore(4.6, 6.2)
        assert (self._ignore==numpy.asarray(self.pha.mask)).all()
Beispiel #53
0
def read_pha(arg, use_errors=False, use_background=False):
    """
    read_pha( filename [, use_errors=False [, use_background=False]] )

    read_pha( PHACrate [, use_errors=False [, use_background=False]] )
    """
    datasets, filename = backend.get_pha_data(arg,
                                              use_background=use_background)
    phasets = []
    output_once = True
    for data in datasets:
        if not use_errors:
            if data['staterror'] is not None or data['syserror'] is not None:
                if data['staterror'] is None:
                    msg = 'systematic'
                elif data['syserror'] is None:
                    msg = 'statistical'
                    if output_once:
                        wmsg = "systematic errors were not found in " + \
                               "file '{}'".format(filename)
                        warning(wmsg)
                else:
                    msg = 'statistical and systematic'
                if output_once:
                    imsg = msg + " errors were found in file " + \
                           "'{}' \nbut not used; ".format(filename) + \
                           "to use them, re-read with use_errors=True"
                    info(imsg)
                data['staterror'] = None
                data['syserror'] = None

        dname = os.path.dirname(filename)
        albl = 'ARF'
        rlbl = 'RMF'
        if use_background:
            albl = albl + ' (background)'
            rlbl = rlbl + ' (background)'

        arf = _read_ancillary(data, 'arffile', albl, dname, read_arf,
                              output_once)
        rmf = _read_ancillary(data, 'rmffile', rlbl, dname, read_rmf,
                              output_once)

        backgrounds = []

        if data['backfile'] and data['backfile'].lower() != 'none':
            try:
                if os.path.dirname(data['backfile']) == '':
                    data['backfile'] = os.path.join(os.path.dirname(filename),
                                                    data['backfile'])

                bkg_datasets = []
                # Do not read backgrounds of backgrounds
                if not use_background:
                    bkg_datasets = read_pha(data['backfile'], use_errors, True)

                    if output_once:
                        info('read background file {}'.format(
                            data['backfile']))

                if numpy.iterable(bkg_datasets):
                    for bkg_dataset in bkg_datasets:
                        if bkg_dataset.get_response() == (None, None) and \
                           rmf is not None:
                            bkg_dataset.set_response(arf, rmf)
                        backgrounds.append(bkg_dataset)
                else:
                    if bkg_datasets.get_response() == (None, None) and \
                       rmf is not None:
                        bkg_datasets.set_response(arf, rmf)
                    backgrounds.append(bkg_datasets)

            except:
                if output_once:
                    warning(str(sys.exc_info()[1]))

        for bkg_type, bscal_type in izip(('background_up', 'background_down'),
                                         ('backscup', 'backscdn')):
            if data[bkg_type] is not None:
                b = DataPHA(filename,
                            channel=data['channel'],
                            counts=data[bkg_type],
                            bin_lo=data['bin_lo'],
                            bin_hi=data['bin_hi'],
                            grouping=data['grouping'],
                            quality=data['quality'],
                            exposure=data['exposure'],
                            backscal=data[bscal_type],
                            header=data['header'])
                b.set_response(arf, rmf)
                if output_once:
                    info("read {} into a dataset from file {}".format(
                        bkg_type, filename))
                backgrounds.append(b)

        for k in ['backfile', 'arffile', 'rmffile', 'backscup', 'backscdn',
                  'background_up', 'background_down']:
            data.pop(k, None)

        pha = DataPHA(filename, **data)
        pha.set_response(arf, rmf)
        for id, b in enumerate(backgrounds):
            if b.grouping is None:
                b.grouping = pha.grouping
                b.grouped = (b.grouping is not None)
            if b.quality is None:
                b.quality = pha.quality
            pha.set_background(b, id + 1)

        # set units *after* bkgs have been set
        pha._set_initial_quantity()
        phasets.append(pha)
        output_once = False

    if len(phasets) == 1:
        phasets = phasets[0]

    return phasets
Beispiel #54
0
def test_rsp_normf_call(arfexp, phaexp):
    """Check out Response1D with no RMF.

    analysis is the analysis setting
    arfexp determines whether the arf has an exposure time
    phaexp determines whether the PHA has an exposure time

    This only uses the channel setting
    """

    # Chose different exposure times for ARF and PHA to see which
    # gets picked up.
    #
    if arfexp:
        arf_exposure = 200.1
    else:
        arf_exposure = None

    if phaexp:
        pha_exposure = 220.9
    else:
        pha_exposure = None

    if phaexp:
        exposure = pha_exposure
        mdl_label = '({} * flat)'.format(exposure)
    elif arfexp:
        exposure = arf_exposure
        mdl_label = '({} * flat)'.format(exposure)
    else:
        exposure = 1.0
        mdl_label = 'flat'

    # rdata is only used to define the grids
    rdata = create_non_delta_rmf()
    specresp = create_non_delta_specresp()
    adata = create_arf(rdata.energ_lo,
                       rdata.energ_hi,
                       specresp,
                       exposure=arf_exposure)

    constant = 2.3
    mdl = Const1D('flat')
    mdl.c0 = constant

    # Turn off integration on this model, so that it is not integrated
    # across the bin width.
    #
    mdl.integrate = False

    nchans = rdata.e_min.size
    channels = np.arange(1, nchans + 1, dtype=np.int16)
    counts = np.ones(nchans, dtype=np.int16)
    pha = DataPHA('test-pha', channel=channels, counts=counts,
                  exposure=pha_exposure)

    pha.set_arf(adata)

    rsp = Response1D(pha)
    wrapped = rsp(mdl)

    assert isinstance(wrapped, ArithmeticModel)

    expname = 'apply_arf({})'.format(mdl_label)
    assert wrapped.name == expname

    expected = exposure * constant * specresp

    pha.set_analysis('channel')
    out = wrapped([4, 5])
    assert_allclose(out, expected)
Beispiel #55
0
def read_pha(arg, use_errors=False, use_background=False):
    """
    read_pha( filename [, use_errors=False [, use_background=False]] )

    read_pha( PHACrate [, use_errors=False [, use_background=False]] )
    """
    datasets, filename = backend.get_pha_data(arg, use_background=use_background)
    phasets = []
    output_once = True
    for data in datasets:
        if not use_errors:
            if data["staterror"] is not None or data["syserror"] is not None:
                if data["staterror"] is None:
                    msg = "systematic"
                elif data["syserror"] is None:
                    msg = "statistical"
                    if output_once:
                        wmsg = "systematic errors were not found in " + "file '{}'".format(filename)
                        warning(wmsg)
                else:
                    msg = "statistical and systematic"
                if output_once:
                    imsg = (
                        msg
                        + " errors were found in file "
                        + "'{}' \nbut not used; ".format(filename)
                        + "to use them, re-read with use_errors=True"
                    )
                    info(imsg)
                data["staterror"] = None
                data["syserror"] = None

        dname = os.path.dirname(filename)
        albl = "ARF"
        rlbl = "RMF"
        if use_background:
            albl = albl + " (background)"
            rlbl = rlbl + " (background)"

        arf = _read_ancillary(data, "arffile", albl, dname, read_arf, output_once)
        rmf = _read_ancillary(data, "rmffile", rlbl, dname, read_rmf, output_once)

        backgrounds = []

        if data["backfile"] and data["backfile"].lower() != "none":
            try:
                if os.path.dirname(data["backfile"]) == "":
                    data["backfile"] = os.path.join(os.path.dirname(filename), data["backfile"])

                bkg_datasets = []
                # Do not read backgrounds of backgrounds
                if not use_background:
                    bkg_datasets = read_pha(data["backfile"], use_errors, True)

                    if output_once:
                        info("read background file {}".format(data["backfile"]))

                if numpy.iterable(bkg_datasets):
                    for bkg_dataset in bkg_datasets:
                        if bkg_dataset.get_response() == (None, None) and rmf is not None:
                            bkg_dataset.set_response(arf, rmf)
                        backgrounds.append(bkg_dataset)
                else:
                    if bkg_datasets.get_response() == (None, None) and rmf is not None:
                        bkg_datasets.set_response(arf, rmf)
                    backgrounds.append(bkg_datasets)

            except:
                if output_once:
                    warning(str(sys.exc_info()[1]))

        for bkg_type, bscal_type in izip(("background_up", "background_down"), ("backscup", "backscdn")):
            if data[bkg_type] is not None:
                b = DataPHA(
                    filename,
                    channel=data["channel"],
                    counts=data[bkg_type],
                    bin_lo=data["bin_lo"],
                    bin_hi=data["bin_hi"],
                    grouping=data["grouping"],
                    quality=data["quality"],
                    exposure=data["exposure"],
                    backscal=data[bscal_type],
                    header=data["header"],
                )
                b.set_response(arf, rmf)
                if output_once:
                    info("read {} into a dataset from file {}".format(bkg_type, filename))
                backgrounds.append(b)

        for k in ["backfile", "arffile", "rmffile", "backscup", "backscdn", "background_up", "background_down"]:
            data.pop(k, None)

        pha = DataPHA(filename, **data)
        pha.set_response(arf, rmf)
        for id, b in enumerate(backgrounds):
            if b.grouping is None:
                b.grouping = pha.grouping
                b.grouped = b.grouping is not None
            if b.quality is None:
                b.quality = pha.quality
            pha.set_background(b, id + 1)

        # set units *after* bkgs have been set
        pha._set_initial_quantity()
        phasets.append(pha)
        output_once = False

    if len(phasets) == 1:
        phasets = phasets[0]

    return phasets
Beispiel #56
0
 def setUp(self):
     self.pha = DataPHA('', np.arange(204, dtype=float) + 1.,
                        np.zeros(204),
                        bin_lo=self._emin,
                        bin_hi=self._emax)
     self.pha.units = "energy"