Beispiel #1
0
def check_imgdata_convolved():
    """What is the behavior when we add the PSF to plot_pvalue?"""

    r1 = ui.get_psf()
    ui.plot_pvalue(c1, c1 + g1, conv_model=r1, num=40, bins=5)

    tmp = ui.get_pvalue_results()

    # these values are different to test_plot_pvalue_imgpsf_unconvolved
    #
    assert tmp.null == pytest.approx(2391.26963100235)
    assert tmp.alt == pytest.approx(563.3992697080881)
    assert tmp.lr == pytest.approx(1827.8703612942618)

    assert tmp.samples.shape == (40, 1)
    assert tmp.stats.shape == (40, 2)
    assert tmp.ratios.shape == (40, )

    tmp = ui.get_pvalue_plot()

    assert tmp.lr == pytest.approx(1827.8703612942618)

    assert tmp.xlabel == 'Likelihood Ratio'
    assert tmp.ylabel == 'Frequency'
    assert tmp.title == 'Likelihood Ratio Distribution'

    assert tmp.ratios.shape == (40, )
    assert tmp.xlo.shape == (6, )
    assert tmp.xhi.shape == (6, )
    assert tmp.y.shape == (6, )
Beispiel #2
0
def test_plot_pvalue(make_data_path, clean_astro_ui, hide_log_output):

    fname = make_data_path("qso.pi")
    ui.load_pha(fname)

    ui.set_stat('cstat')
    ui.set_method("neldermead")

    ui.group_counts(10)
    ui.notice(0.3, 8)

    ui.set_model("xsphabs.abs1*xspowerlaw.p1")
    ui.set_model("abs1*(p1+gauss1d.g1)")

    # move the fit close to the best fit to save a small amount
    # of time.
    abs1.nh = 0.05
    p1.phoindex = 1.28
    p1.norm = 2e-4
    g1.ampl = 1.8e-5

    g1.pos = 3.
    ui.freeze(g1.pos)
    g1.fwhm = 0.1
    ui.freeze(g1.fwhm)

    ui.fit()
    ui.plot_pvalue(p1, p1 + g1, num=100)

    tmp = ui.get_pvalue_results()

    assert tmp.null == pytest.approx(210.34566845619273)
    assert tmp.alt == pytest.approx(207.66618095925094)
    assert tmp.lr == pytest.approx(2.679487496941789)
Beispiel #3
0
def check_imgdata_unconvolved(caplog):
    """What is the behavior when we do not add the PSF to plot_pvalue?

    Note we add a check of the screen output here.
    """

    # include a check of the screen output
    #
    with caplog.at_level('INFO', logger='sherpa'):
        with SherpaVerbosity('INFO'):
            ui.plot_pvalue(c1, c1 + g1, num=40, bins=5)

    assert len(caplog.records) == 1

    lname, lvl, msg = caplog.record_tuples[0]
    assert lname == 'sherpa.ui.utils'
    assert lvl == logging.INFO

    # Do not use equality tests for the numeric values in case
    # there are numpy-version differences in the number of
    # significant figures.
    #
    toks = msg.split('\n')
    assert len(toks) == 5
    assert toks[0] == 'Likelihood Ratio Test'
    assert toks[1].startswith('null statistic   =  2391.2696')
    assert toks[2].startswith('alt statistic    =  353.82')
    assert toks[3].startswith('likelihood ratio =  2037.446')
    assert toks[4] == 'p-value          <  0.025'

    tmp = ui.get_pvalue_results()

    assert tmp.null == pytest.approx(2391.2696310023503)
    assert tmp.alt == pytest.approx(353.8235336370698)
    assert tmp.lr == pytest.approx(2037.4460973652804)

    assert tmp.samples.shape == (40, 1)
    assert tmp.stats.shape == (40, 2)
    assert tmp.ratios.shape == (40, )

    tmp = ui.get_pvalue_plot(2037.4460973652804)

    assert tmp.lr == pytest.approx(2037.4460973652804)

    assert tmp.xlabel == 'Likelihood Ratio'
    assert tmp.ylabel == 'Frequency'
    assert tmp.title == 'Likelihood Ratio Distribution'

    assert tmp.ratios.shape == (40, )
    assert tmp.xlo.shape == (6, )
    assert tmp.xhi.shape == (6, )
    assert tmp.y.shape == (6, )
Beispiel #4
0
 def test_rsp(self):
     fname = self.make_path("qso.pi")
     ui.load_pha(fname)
     ui.set_stat("chi2xspecvar")
     ui.set_method("neldermead")
     ui.group_counts(10)
     ui.notice(0.3, 8)
     ui.set_model("xsphabs.abs1*xspowerlaw.p1")
     ui.set_model("abs1*(p1+gauss1d.g1)")
     g1.pos = 3.
     ui.freeze(g1.pos)
     g1.fwhm = 0.1
     ui.freeze(g1.fwhm)
     ui.set_stat('cstat')
     ui.fit()
     ui.plot_pvalue(p1, p1 + g1, num=100)
     tmp = ui.get_pvalue_results()
     expected = [210.34566845619273, 207.66618095925094, 2.679487496941789]
     self.compare_results(expected, [tmp.null, tmp.alt, tmp.lr])
Beispiel #5
0
def test_plot_pvalue(make_data_path, clean_astro_ui, hide_logging):
    """Check plot_pvalue with PHA data."""

    fname = make_data_path("qso.pi")
    ui.load_pha(fname)

    ui.set_stat('cstat')
    ui.set_method("neldermead")

    ui.group_counts(10)
    ui.notice(0.3, 8)

    ui.set_model("xsphabs.abs1*(xspowerlaw.p1 +gauss1d.g1)")

    # move the fit close to the best fit to save a small amount
    # of time.
    abs1.nh = 0.05
    p1.phoindex = 1.28
    p1.norm = 2e-4
    g1.ampl = 1.8e-5

    g1.pos = 3.
    ui.freeze(g1.pos)
    g1.fwhm = 0.1
    ui.freeze(g1.fwhm)

    # Could we reduce the number of bins to save evaluation time?
    # We do want a non-default num value when checking the shapes
    # of the output attributes.
    #
    ui.fit()
    ui.plot_pvalue(p1, p1 + g1, num=100, bins=20)

    tmp = ui.get_pvalue_results()

    assert tmp.null == pytest.approx(210.34566845619273)
    assert tmp.alt == pytest.approx(207.66618095925094)
    assert tmp.lr == pytest.approx(2.679487496941789)

    # Have we returned the correct info?
    #
    # Is it worth checking the stored data (aka how randomised is this
    # output)?
    #
    assert tmp.samples.shape == (100, 2)
    assert tmp.stats.shape == (100, 2)
    assert tmp.ratios.shape == (100, )

    # Check the plot
    #
    tmp = ui.get_pvalue_plot()

    assert tmp.lr == pytest.approx(2.679487496941789)

    assert tmp.xlabel == 'Likelihood Ratio'
    assert tmp.ylabel == 'Frequency'
    assert tmp.title == 'Likelihood Ratio Distribution'

    # It would be nice to check the values here
    #
    assert tmp.ratios.shape == (100, )
    assert tmp.xlo.shape == (21, )
    assert tmp.xhi.shape == (21, )
    assert tmp.y.shape == (21, )