Пример #1
0
def test_grouped_pha_all_bad_response_bg_warning(elo, ehi, nbins, bkg_id,
                                                 caplog, make_data_path,
                                                 clean_astro_ui):
    """Check we get the warning messages with background filtering"""

    ui.load_pha('check', make_data_path('3c273.pi'))

    ui.set_quality('check', 2 * numpy.ones(1024, dtype=numpy.int16), bkg_id=1)
    ui.ignore_bad('check', bkg_id=1)

    with caplog.at_level(logging.INFO, logger='sherpa'):
        ui.notice_id('check', elo, ehi, bkg_id=bkg_id)

    # filtering has or hasn't happened
    nsrc = ui.get_dep('check', filter=True).size
    nback = ui.get_dep('check', filter=True, bkg_id=1).size

    if bkg_id is None:
        assert nsrc == nbins
        assert nback == 0
    else:
        assert nsrc == 46  # ie no filter
        assert nback == 0

    # did we get a warning message from the background?
    assert len(caplog.records) == 1
    name, lvl, msg = caplog.record_tuples[0]
    assert name == 'sherpa.astro.data'
    assert lvl == logging.INFO
    assert msg.startswith('Skipping dataset ')
    assert msg.endswith('/3c273_bg.pi: mask excludes all data')
Пример #2
0
def test_filter_notice_bad_361(make_data_path, caplog):
    """Test out issue 361: notice then ignore bad.

    Since the data is grouped, the ignore_bad call is expected to
    drop the filter expression, with a warning message.
    """

    stats = setup_model(make_data_path)

    # We don't care about these warnings, so I could just
    # store the number and check that we get an extra one
    # below, but use this as a canary to check when the
    # system changes.
    #
    assert len(caplog.records) == 5

    ui.notice(0.5, 8.0)
    with caplog.at_level(logging.INFO, logger='sherpa'):
        ui.ignore_bad()

    assert len(caplog.records) == 6

    lname, lvl, msg = caplog.record_tuples[5]
    assert lname == 'sherpa.astro.data'
    assert lvl == logging.WARNING
    assert msg == 'filtering grouped data with quality ' + \
        'flags, previous filters deleted'

    s1 = ui.calc_stat()
    assert s1 == pytest.approx(stats['bad'])
Пример #3
0
def test_filter_notice_bad_361(make_data_path):
    """Test out issue 361: notice then ignore bad.

    Since the data is grouped, the ignore_bad call is expected to
    drop the filter expression, with a warning message.
    """

    logger = logging.getLogger('sherpa')
    hdlr = MockLoggingHandler(level='WARNING')
    logger.addHandler(hdlr)

    stats = setup_model(make_data_path)

    ui.notice(0.5, 8.0)
    ui.ignore_bad()
    s1 = ui.calc_stat()
    assert s1 == pytest.approx(stats['bad'])

    msgs = hdlr.messages
    assert msgs['warning'] == [
        'filtering grouped data with quality ' +
        'flags, previous filters deleted'
    ]
    for k in ['debug', 'info', 'error', 'critical']:
        assert msgs[k] == []
Пример #4
0
def test_filter_bad_notice_361(make_data_path):
    """Test out issue 361: ignore bad then notice.

    This is expected to fail with NumPy version 1.13 and should cause
    a DeprecationWarning with earlier versions (I do not know when
    the warning was added).
    """

    stats = setup_model(make_data_path)

    ui.ignore_bad()
    ui.notice(0.5, 8.0)
    s1 = ui.calc_stat()
    assert s1 == pytest.approx(stats['0.5-8.0'])
Пример #5
0
def test_filter_bad_grouped(make_data_path, clean_astro_ui):
    """Check behavior when the data is grouped.

    This is a test of the current behavior, to check that
    values still hold. It may be necessary to change this
    test if we change the quality handling.
    """

    infile = make_data_path('q1127_src1_grp30.pi')
    ui.load_pha(infile)
    pha = ui.get_data()
    assert pha.quality_filter is None
    assert pha.mask is True

    assert ui.get_dep().shape == (439, )
    assert pha.quality_filter is None
    assert pha.mask is True

    # The last group is marked as quality=2 and so calling
    # ignore_bad means we lose that group.
    #
    ui.ignore_bad()
    assert ui.get_dep().shape == (438, )
    assert pha.mask is True

    expected = np.ones(1024, dtype=bool)
    expected[996:1025] = False
    assert pha.quality_filter == pytest.approx(expected)

    # What happens when we filter the data? Unlike #1169
    # we do change the noticed range.
    #
    ui.notice(0.5, 7)
    assert pha.quality_filter == pytest.approx(expected)

    # The mask has been filtered to remove the bad channels
    # (this is grouped data)
    expected = np.ones(438, dtype=bool)
    expected[0:15] = False
    expected[410:438] = False
    assert pha.mask == pytest.approx(expected)

    expected = np.ones(996, dtype=bool)
    expected[0:34] = False
    expected[481:996] = False
    assert pha.get_mask() == pytest.approx(expected)
Пример #6
0
def test_filter_bad_ungrouped(make_data_path, clean_astro_ui):
    """Check behavior when the data is ungrouped.

    This is a test of the current behavior, to check that
    values still hold. It may be necessary to change this
    test if we change the quality handling.
    """

    infile = make_data_path('q1127_src1_grp30.pi')
    ui.load_pha(infile)
    pha = ui.get_data()
    assert pha.quality_filter is None
    assert pha.mask is True

    assert ui.get_dep().shape == (439, )
    ui.ungroup()
    assert ui.get_dep().shape == (1024, )
    assert pha.quality_filter is None
    assert pha.mask is True

    ui.ignore_bad()
    assert ui.get_dep().shape == (1024, )
    assert pha.quality_filter is None

    expected = np.ones(1024, dtype=bool)
    expected[996:1025] = False
    assert pha.mask == pytest.approx(expected)

    # At this point we've changed the mask array so Sherpa thinks
    # we've applied a filter, so a notice is not going to change
    # anything. See issue #1169
    #
    ui.notice(0.5, 7)
    assert pha.mask == pytest.approx(expected)

    # We need to ignore to change the mask.
    #
    ui.ignore(None, 0.5)
    ui.ignore(7, None)
    expected[0:35] = False
    expected[479:1025] = False
    assert pha.mask == pytest.approx(expected)
Пример #7
0
def validate_xspec_result(l, h, npts, ndof, statval):
    """Check that the stat results match those from XSPEC.

    This assumes the first data set returned by get_stat_info
    should be used.
    """

    ui.notice(None, None)
    ui.ignore(None, l)
    ui.ignore(h, None)
    ui.ignore_bad()
    sinfo = ui.get_stat_info()[0]
    assert sinfo.numpoints == npts
    assert sinfo.dof == ndof

    # XSPEC displays results to ~ 2dp, so the tolerance
    # is quite forgiving here. Or I could use pyxspec to
    # calculate and display this.
    #
    assert_allclose(sinfo.statval, statval, rtol=0, atol=0.005)
Пример #8
0
def validate_xspec_result(l, h, npts, ndof, statval):
    """Check that the stat results match those from XSPEC.

    This assumes the first data set returned by get_stat_info
    should be used.
    """

    ui.notice(None, None)
    ui.ignore(None, l)
    ui.ignore(h, None)
    ui.ignore_bad()
    sinfo = ui.get_stat_info()[0]
    assert sinfo.numpoints == npts
    assert sinfo.dof == ndof

    # XSPEC displays results to ~ 2dp, so the tolerance
    # is quite forgiving here. Or I could use pyxspec to
    # calculate and display this.
    #
    assert_allclose(sinfo.statval, statval, rtol=0, atol=0.005)
Пример #9
0
def test_filter_basic(make_data_path):
    """Test out issue 361 without ignore_bad calls.

    This should not trigger the bug behind issue 361.
    """

    stats = setup_model(make_data_path)

    s1 = ui.calc_stat()
    assert s1 == pytest.approx(stats['all'])

    ui.ignore_bad()
    s2 = ui.calc_stat()
    assert s2 == pytest.approx(stats['bad'])

    ui.notice(None, None)
    s3 = ui.calc_stat()
    assert s3 == pytest.approx(stats['all'])

    ui.notice(0.5, 8.0)
    s4 = ui.calc_stat()
    assert s4 == pytest.approx(stats['0.5-8.0'])
Пример #10
0
def check_bad_grouping(exp_xlo, exp_xhi, exp_counts, lo1, hi1, lo2, hi2):
    """Common tests from test_grouped_pha_all_badXXX

    Sending in two ranges is a bit excessive but easiest
    thing to implement
    """

    cts = ui.get_counts()
    assert cts == pytest.approx([exp_counts])

    dplot = ui.get_data_plot()
    assert dplot.xlo == pytest.approx([exp_xlo])
    assert dplot.xhi == pytest.approx([exp_xhi])
    assert dplot.y == pytest.approx([exp_counts])

    # ignore all the data
    ui.ignore(lo1, hi1)

    # can still plot
    cts = ui.get_counts()
    assert cts == pytest.approx([exp_counts])

    cts = ui.get_counts(filter=True)
    assert len(cts) == 0

    dplot = ui.get_data_plot()
    assert len(dplot.xlo) == 0
    assert len(dplot.xhi) == 0
    assert len(dplot.y) == 0

    # ignore does not fail
    #
    ui.ignore(lo2, hi2)

    # we can restore the data
    ui.notice(None, None)

    cts = ui.get_counts()
    assert cts == pytest.approx([exp_counts])

    dplot = ui.get_data_plot()
    assert dplot.xlo == pytest.approx([exp_xlo])
    assert dplot.xhi == pytest.approx([exp_xhi])
    assert dplot.y == pytest.approx([exp_counts])

    # now ignore the bad channels (ie everything)
    #
    ui.ignore_bad()

    cts = ui.get_counts()
    assert len(cts) == 0

    dplot = ui.get_data_plot()
    assert len(dplot.xlo) == 0
    assert len(dplot.xhi) == 0
    assert len(dplot.y) == 0

    # there's nothing to notice (this line is an example of #790)
    ui.notice(lo1, hi1)

    cts = ui.get_counts()
    assert len(cts) == 0

    dplot = ui.get_data_plot()
    assert len(dplot.xlo) == 0
    assert len(dplot.xhi) == 0
    assert len(dplot.y) == 0