예제 #1
0
def test_dataspace1d_datapha(clean_astro_ui):
    """Explicitly test dataspace1d for DataPHA"""

    assert ui.list_data_ids() == []

    # Note the grid is ignored, other than the number of bins
    ui.dataspace1d(20, 30, step=2.5, id='x', dstype=ui.DataPHA)

    assert ui.list_data_ids() == ['x']
    assert ui.get_data('x').name == 'dataspace1d'

    grid = ui.get_indep('x')
    assert len(grid) == 1

    expected = numpy.asarray([1, 2, 3, 4, 5])
    assert grid[0] == pytest.approx(expected)

    y = ui.get_dep('x')
    assert y == pytest.approx(numpy.zeros(5))

    assert ui.get_exposure('x') is None
    assert ui.get_grouping('x') is None
    assert ui.get_quality('x') is None

    assert ui.get_data('x').subtracted is False

    with pytest.raises(IdentifierErr):
        ui.get_bkg('x')
예제 #2
0
def test_dataspace1d_datapha_bkg(clean_astro_ui):
    """Explicitly test dataspace1d for DataPHA (background)"""

    # list_bkg_ids will error out until the dataset exists
    assert ui.list_data_ids() == []

    # We don't use the grid range or step size since numbins has been
    # given.
    ui.dataspace1d(20, 30, step=2.5, numbins=10, id='x', dstype=ui.DataPHA)

    assert ui.list_data_ids() == ['x']
    assert ui.list_bkg_ids('x') == []

    ui.dataspace1d(20,
                   30,
                   step=2.5,
                   numbins=10,
                   id='x',
                   bkg_id=2,
                   dstype=ui.DataPHA)

    assert ui.list_data_ids() == ['x']
    assert ui.list_bkg_ids('x') == [2]

    assert ui.get_data('x').name == 'dataspace1d'

    # I've explicitly not chosen the default background identifier
    with pytest.raises(IdentifierErr):
        ui.get_bkg('x')

    assert ui.get_bkg('x', 2).name == 'bkg_dataspace1d'

    grid = ui.get_indep('x', bkg_id=2)
    assert len(grid) == 1

    expected = numpy.asarray([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
    assert grid[0] == pytest.approx(expected)

    y = ui.get_dep('x', bkg_id=2)
    assert y == pytest.approx(numpy.zeros(10))

    assert ui.get_exposure('x', bkg_id=2) is None
    assert ui.get_grouping('x', bkg_id=2) is None
    assert ui.get_quality('x', bkg_id=2) is None

    assert ui.get_bkg('x', bkg_id=2).subtracted is False

    # check we can subtract the dataset; as the data is all zeros
    # we don't bother checking the result.
    #
    ui.subtract('x')
def test_load_grouping(idval, clean_astro_ui, tmp_path):
    """Simple grouping check"""

    x = [1, 2, 3]
    y = [0, 4, 3]
    if idval is None:
        ui.load_arrays(1, x, y, ui.DataPHA)
    else:
        ui.load_arrays(idval, x, y, ui.DataPHA)

    path = tmp_path / 'group.dat'
    path.write_text('1\n-1\n1')

    data = ui.get_data(idval)
    assert data.grouping is None

    if idval is None:
        ui.load_grouping(str(path))
    else:
        ui.load_grouping(idval, str(path))

    assert not data.grouped
    assert data.grouping is not None

    ui.group(idval)

    assert data.grouped

    grps = ui.get_grouping(idval)
    assert grps.shape == (3, )

    # It's not clear what requirements load_grouping makes of the
    # data, so do not enforce a data type. At a minimum there
    # would be potential backend differences.
    #
    # assert grps.dtype == np.int16

    assert grps == pytest.approx([1, -1, 1])

    # Note that get_dep is returning the sum per group / channel width
    # (since we have no instrument response).
    #
    y = ui.get_dep(idval)
    assert y.shape == (2, )
    assert y == pytest.approx([2, 3])
예제 #4
0
    def setUp(self):

        self._old_logger_level = logger.getEffectiveLevel()
        logger.setLevel(logging.ERROR)

        ui.set_stat('wstat')

        infile1 = self.make_path('3c273.pi')
        infile2 = self.make_path('9774.pi')
        ui.load_pha(1, infile1)
        ui.load_pha(2, infile2)

        # Since 9774.pi isn't grouped, group it. Note that this
        # call groups the background to 20 counts per bin. In this
        # case we do not want that; instead we want to use the same
        # grouping scheme as the source file.
        #
        # Note: this is related to issue 227
        #
        ui.group_counts(2, 20)
        ui.set_grouping(2, bkg_id=1, val=ui.get_grouping(2))

        # There's no need to have the same model in both datasets,
        # but assume the same source model can be used, with a
        # normalization difference.
        #
        ui.set_source(1, ui.powlaw1d.pl1)
        ui.set_source(2, ui.const1d.c2 * ui.get_source(1))

        # The powerlaw slope and normalization are
        # intended to be "a reasonable approximation"
        # to the data, just to make sure that any statistic
        # calculation doesn't blow-up too much.
        #
        # Note: the model values for 3c273 are slighly different
        #       to the single-PHA-file case, so stat results are
        #       slightly different
        #
        ui.set_par("pl1.gamma", 1.7)
        ui.set_par("pl1.ampl", 1.6e-4)
        ui.set_par("c2.c0", 45)
예제 #5
0
    def load(self, filename):
        self.modelFile = filename
        with open(filename, 'r') as f:
            self.pca = json.load(f)
        for k, v in self.pca.items():
            self.pca[k] = np.array(v)
        nactivedata = self.pca['ihi'] - self.pca['ilo']
        assert self.pca['hi'].shape == (
            nactivedata,
        ), 'spectrum has different number of channels: %d vs %s' % (len(
            self.pca['hi']), self.ndata)
        assert self.pca['lo'].shape == self.pca['hi'].shape
        assert self.pca['mean'].shape == self.pca['hi'].shape
        assert len(self.pca['components']) == nactivedata
        assert nactivedata <= self.ndata
        ilo = int(self.pca['ilo'])
        ihi = int(self.pca['ihi'])
        self.cts = self.data[ilo:ihi]
        self.ncts = self.cts.sum(
        )  # 'have ncts background counts for deconvolution
        self.x = np.arange(ihi - ilo)
        self.ilo = ilo
        self.ihi = ihi

        # Only notice the channels between ilo + 1 and ihi (channel starts from 1, while index from 0).
        # The stat value will be affected, for assessment of goodness-of-fit for background.
        self.grouping0 = ui.get_grouping()
        ui.set_analysis('channel')
        # The channel filters, filter0 and filter_chan are all native channels.
        # ui.get_filter() will, instead, reture the binned channels if the spectrum is grouped.
        if self.grouping0 is not None:
            ui.ungroup()
        self.filter0 = ui.get_filter()
        ui.ignore()
        ui.notice(
            self.ilo + 1, self.ihi
        )  # ui.notice(a, b), from channel a to channel b, including channels a, b.
        self.filter_chan = ui.get_filter()
        ui.set_analysis('energy')
        if self.grouping0 is not None:
            ui.group()
예제 #6
0
    def setUp(self):

        self._old_logger_level = logger.getEffectiveLevel()
        logger.setLevel(logging.ERROR)

        ui.set_stat('wstat')

        infile1 = self.make_path('3c273.pi')
        infile2 = self.make_path('9774.pi')
        ui.load_pha(1, infile1)
        ui.load_pha(2, infile2)

        # Since 9774.pi isn't grouped, group it. Note that this
        # call groups the background to 20 counts per bin. In this
        # case we do not want that; instead we want to use the same
        # grouping scheme as the source file.
        #
        # Note: this is related to issue 227
        #
        ui.group_counts(2, 20)
        ui.set_grouping(2, bkg_id=1, val=ui.get_grouping(2))

        # There's no need to have the same model in both datasets,
        # but assume the same source model can be used, with a
        # normalization difference.
        #
        ui.set_source(1, ui.powlaw1d.pl1)
        ui.set_source(2, ui.const1d.c2 * ui.get_source(1))

        # The powerlaw slope and normalization are
        # intended to be "a reasonable approximation"
        # to the data, just to make sure that any statistic
        # calculation doesn't blow-up too much.
        #
        # Note: the model values for 3c273 are slighly different
        #       to the single-PHA-file case, so stat results are
        #       slightly different
        #
        ui.set_par("pl1.gamma", 1.7)
        ui.set_par("pl1.ampl", 1.6e-4)
        ui.set_par("c2.c0", 45)