Ejemplo n.º 1
0
def test_wstat_calc_stat_info(hide_logging, make_data_path, clean_astro_ui):
    "bug #147"
    ui.load_pha("stat", make_data_path("3c273.pi"))
    ui.set_source("stat", ui.powlaw1d.p1)
    ui.set_stat("wstat")
    ui.fit("stat")
    ui.get_stat_info()
Ejemplo n.º 2
0
 def calc_bkg_stat(self, dof=False):
     ss = [
         s for s in ui.get_stat_info() if self.id in s.ids
         and s.bkg_ids is not None and len(s.bkg_ids) > 0
     ]
     if len(ss) != 1:
         for s in ui.get_stat_info():
             if self.id in s.ids and len(s.bkg_ids) > 0:
                 print('get_stat_info returned: ids=%s bkg_ids=%s' %
                       (s.ids, s.bkg_ids))
     assert len(ss) == 1
     return (ss[0].statval, ss[0].dof) if dof else ss[0].statval
Ejemplo n.º 3
0
def test_can_use_pspc_data(make_data_path):
    """A basic check that we can read in and use the ROSAT PSPC data.

    Unlike the previous tests, that directly access the io module,
    this uses the ui interface.
    """

    if not six.PY2 and (backend == "crates"):
        pytest.skip('Python3 and Crates: known to fail')

    # The PSPC PHA file does not have the ANCRFILE/RESPFILE keywords
    # set up, so the responses has to be manually added.
    #
    ui.load_pha(make_data_path(PHAFILE), use_errors=True)
    assert ui.get_analysis() == 'channel'

    ui.load_rmf(make_data_path(RMFFILE))
    assert ui.get_analysis() == 'energy'

    ui.set_source(ui.powlaw1d.pl)
    ui.set_par('pl.gamma', 1.7)
    ui.set_par('pl.ampl', 2e-6)

    s = ui.get_stat_info()[0]
    assert s.numpoints == 63
    assert s.dof == 61

    # Value obtained from XSPEC 12.9.1p; Sherpa returns
    # sexpected = 973.2270845920297
    sexpected = 973.23
    assert_allclose(s.statval, sexpected, rtol=0, atol=0.005)

    # apply an energy filter to remove the "bogus" points
    ui.ignore(None, 0.05)

    s = ui.get_stat_info()[0]
    assert s.numpoints == 62
    assert s.dof == 60
    assert_allclose(s.statval, sexpected, rtol=0, atol=0.005)

    ui.ignore(2.01, None)

    s = ui.get_stat_info()[0]
    assert s.numpoints == 7
    assert s.dof == 5

    assert_allclose(s.statval, sexpected, rtol=0, atol=0.005)
Ejemplo n.º 4
0
def test_can_use_pspc_data(make_data_path):
    """A basic check that we can read in and use the ROSAT PSPC data.

    Unlike the previous tests, that directly access the io module,
    this uses the ui interface.
    """

    # The PSPC PHA file does not have the ANCRFILE/RESPFILE keywords
    # set up, so the responses has to be manually added.
    #
    ui.load_pha(make_data_path(PHAFILE), use_errors=True)
    assert ui.get_analysis() == 'channel'

    ui.load_rmf(make_data_path(RMFFILE))
    assert ui.get_analysis() == 'energy'

    ui.set_source(ui.powlaw1d.pl)
    ui.set_par('pl.gamma', 1.7)
    ui.set_par('pl.ampl', 2e-6)

    s = ui.get_stat_info()[0]
    assert s.numpoints == 63
    assert s.dof == 61

    # Value obtained from XSPEC 12.9.1p; Sherpa returns
    # sexpected = 973.2270845920297
    sexpected = 973.23
    assert_allclose(s.statval, sexpected, rtol=0, atol=0.005)

    # apply an energy filter to remove the "bogus" points
    ui.ignore(None, 0.05)

    s = ui.get_stat_info()[0]
    assert s.numpoints == 62
    assert s.dof == 60
    assert_allclose(s.statval, sexpected, rtol=0, atol=0.005)

    ui.ignore(2.01, None)

    s = ui.get_stat_info()[0]
    assert s.numpoints == 7
    assert s.dof == 5

    assert_allclose(s.statval, sexpected, rtol=0, atol=0.005)
Ejemplo n.º 5
0
 def test_get_stat_info(self):
     fname_3c273 = self.make_path("3c273.pi")
     ui.load_pha(fname_3c273)
     src = ui.xspowerlaw.pl
     ui.set_source(src)
     ui.guess('pl')
     ui.set_stat('wstat')
     stat_info = ui.get_stat_info()[0]
     assert stat_info.dof == 44
     assert stat_info.numpoints == 46
Ejemplo n.º 6
0
 def test_get_stat_info(self):
     fname_3c273 = self.make_path("3c273.pi")
     ui.load_pha(fname_3c273)
     src = ui.xspowerlaw.pl
     ui.set_source(src)
     ui.guess('pl')
     ui.set_stat('wstat')
     stat_info = ui.get_stat_info()[0]
     assert stat_info.dof == 44
     assert stat_info.numpoints == 46
Ejemplo n.º 7
0
def saveSrcModel(id=1, writeTo='srcPowerLaw.json', stat=True, info={}):
    """
    """
    srcModel = ui.get_model(id=id)
    parDict = {p.fullname: p.val for p in srcModel.pars}
    if stat:
        fsrc, *_ = ui.get_stat_info()
        for i in ['statname', 'numpoints', 'dof', 'qval', 'rstat', 'statval']:
            parDict[i] = getattr(fsrc, i)
    for key, val in info.items():
        parDict[key] = val
    with open(writeTo, 'w') as f:
        json.dump(parDict, f)
def test_wstat_errors_data1d(clean_astro_ui, make_data_path):
    """Check we error out with a mixture of data.

    This was hit during some test that needed a clean_astro_ui
    fixture, so I just wanted to make sure we ran a similar
    test.
    """

    infile = make_data_path('3c273.pi')
    ui.load_pha(infile)

    x = np.asarray([1, 2, 3])
    y = np.asarray([2, 0, 4])
    ui.load_arrays(2, x, y)

    ui.set_source(ui.powlaw1d.m1)
    ui.set_source(2, ui.polynom1d.m2)

    ui.set_stat('wstat')

    with pytest.raises(StatErr) as exc:
        ui.get_stat_info()

    assert str(exc.value) == 'No background data has been supplied. Use cstat'
Ejemplo n.º 9
0
def saveModel(amodel, writeTo, stat=True, info={}):
    """
    Save the model paramaters to writeTo file.
    Paramaters
    stat = True
        if stat is True, save the stat info as well.
    """
    # The values of Dict is a pair of (value, frozen).
    # The paramater is frozen if frozen is True.
    parDict = {p.name: (p.val, p.frozen) for p in amodel.pars}
    if stat:
        fsrc, *_ = ui.get_stat_info(
        )  # Only the first data set, i.e. the source is returned.
        for i in ['statname', 'numpoints', 'dof', 'qval', 'rstat', 'statval']:
            parDict[i] = getattr(fsrc, i)
    for key, val in info.items():
        parDict[key] = val
    with open(writeTo, 'w') as f:
        json.dump(parDict, f)
Ejemplo n.º 10
0
def validate_xspec_result(l, h, npts, ndof, statval):
    """Check that the stat results match those from XSPEC.

    This assumes the first data set returned by get_stat_info
    should be used.
    """

    ui.notice(None, None)
    ui.ignore(None, l)
    ui.ignore(h, None)
    ui.ignore_bad()
    sinfo = ui.get_stat_info()[0]
    assert sinfo.numpoints == npts
    assert sinfo.dof == ndof

    # XSPEC displays results to ~ 2dp, so the tolerance
    # is quite forgiving here. Or I could use pyxspec to
    # calculate and display this.
    #
    assert_allclose(sinfo.statval, statval, rtol=0, atol=0.005)
Ejemplo n.º 11
0
def validate_xspec_result(l, h, npts, ndof, statval):
    """Check that the stat results match those from XSPEC.

    This assumes the first data set returned by get_stat_info
    should be used.
    """

    ui.notice(None, None)
    ui.ignore(None, l)
    ui.ignore(h, None)
    ui.ignore_bad()
    sinfo = ui.get_stat_info()[0]
    assert sinfo.numpoints == npts
    assert sinfo.dof == ndof

    # XSPEC displays results to ~ 2dp, so the tolerance
    # is quite forgiving here. Or I could use pyxspec to
    # calculate and display this.
    #
    assert_allclose(sinfo.statval, statval, rtol=0, atol=0.005)
Ejemplo n.º 12
0
def test_can_use_swift_data(make_data_path, clean_astro_ui):
    """A basic check that we can read in and use the Swift data.

    Unlike the previous tests, that directly access the io module,
    this uses the ui interface.
    """

    # The Swift PHA file does not have the ANCRFILE/RESPFILE keywords
    # set up, so the responses have to be manually added.
    #
    ui.load_pha(make_data_path(PHAFILE))

    rmffile = make_data_path(RMFFILE)
    with warnings.catch_warnings(record=True) as ws:
        warnings.simplefilter("always")
        ui.load_rmf(rmffile)

    validate_replacement_warning(ws, 'RMF', rmffile)

    arffile = make_data_path(ARFFILE)
    with warnings.catch_warnings(record=True) as ws:
        warnings.simplefilter("always")
        ui.load_arf(arffile)

    validate_replacement_warning(ws, 'ARF', arffile)

    assert ui.get_analysis() == 'energy'

    arf = ui.get_arf()
    rmf = ui.get_rmf()
    assert arf.energ_lo[0] == EMIN
    assert rmf.energ_lo[0] == EMIN
    assert rmf.e_min[0] == 0.0

    ui.set_source(ui.powlaw1d.pl)
    ui.set_par('pl.ampl', 0.0003)

    stat = ui.calc_stat()

    # This check is purely a regression test, so the value has
    # not been externally validated.
    #
    assert_allclose(stat, 58.2813692358182)

    # Pick an energy range which isn't affected by the first
    # bin.
    #
    # Unfortunately, using a range of 0.3-8.0 gives 771 bins
    # in XSPEC - channels 30 to 800 - but 770 bins in Sherpa,
    # channels 31 to 800.
    #
    # Note that the channel numbering starts at 0:
    # % dmlist target_sr.pha header,clean,raw | grep TLMIN
    # TLMIN1       = 0                    / Lowest legal channel number
    #
    # and so it's not clear when XSPEC says 30-800 what it
    # means. From https://github.com/sherpa/sherpa/issues/1211#issuecomment-881647128
    # we have that the first bin it is using is
    #     0.29-0.30
    # and the last bin is
    #     7.99-8.00
    # and I've checked with iplot that it has renumbered the
    # channels to 1-1024 from 0-1023
    #
    # % dmlist swxpc0to12s6_20130101v014.rmf.gz"[ebounds][channel=28:31]" data,clean
    #  CHANNEL    E_MIN                E_MAX
    #         28     0.28000000119209     0.28999999165535
    #         29     0.28999999165535     0.30000001192093
    #         30     0.30000001192093     0.31000000238419
    #         31     0.31000000238419     0.31999999284744
    # % dmlist swxpc0to12s6_20130101v014.rmf.gz"[ebounds][channel=798:801]" data,clean
    #  CHANNEL    E_MIN                E_MAX
    #        798         7.9800000191         7.9899997711
    #        799         7.9899997711                  8.0
    #        800                  8.0         8.0100002289
    #        801         8.0100002289         8.0200004578
    #
    # If I use ignore(None, 0.3); ignore(8.0, None) instead then the
    # result is 771 bins (channels 31 to 800). This is because the
    # e_min/max of the RMF has channel widths of 0.01 keV, starting at
    # 0, so both 0.3 and 8.0 fall on a bin boundary. So, it's either a
    # difference in < or <= (or > vs >=), or a rounding issue due to
    # floating-point conversion leading to one bin boundary being
    # slightly different in Sherpa vs XSPEC).
    #
    # When using ui.notice(0.3, 8.0); ui.get_indep(filter=True)
    # returns 770 channels, 31 to 800.
    #
    # Using ui.notice(0.3, 7.995) selects channels 31 to 800.
    # Using ui.notice(0.299, 8.0) selects channels 30 to 800.
    # Using ui.notice(0.299, 7.995) selects channels 30 to 800.
    #
    ui.notice(0.299, 8.0)

    # Check the selected range
    pha = ui.get_data()
    expected = np.zeros(1024, dtype=bool)
    expected[29:800] = True
    assert pha.mask == pytest.approx(expected)
    assert pha.get_mask() == pytest.approx(expected)

    # XSPEC 12.9.1b calculation of the statistic:
    #   chi sq = 203.88 from 771 bins with 769 dof
    #   cstat  = 568.52
    #
    # There are known differences between XSPEC and Sherpa
    # with chi2xspecvar. This only affects data sets where
    # there is background subtraction, which is not the case
    # here. See https://github.com/sherpa/sherpa/issues/356
    #
    ui.set_stat('chi2xspecvar')
    stat_xvar = ui.get_stat_info()

    assert len(stat_xvar) == 1
    stat_xvar = stat_xvar[0]
    assert stat_xvar.numpoints == 771
    assert stat_xvar.dof == 769
    assert_allclose(stat_xvar.statval, 203.88, rtol=0, atol=0.005)

    ui.set_stat('cstat')
    stat_cstat = ui.get_stat_info()

    assert len(stat_cstat) == 1
    stat_cstat = stat_cstat[0]
    assert stat_cstat.numpoints == 771
    assert stat_cstat.dof == 769
    assert_allclose(stat_cstat.statval, 568.52, rtol=0, atol=0.005)
Ejemplo n.º 13
0
def test_chi2(make_data_path, clean_astro_ui):
    "bugs #11400, #13297, #12365"

    data = make_data_path('3c273.pi')

    # Case 1: first ds has no error, second has, chi2-derived (chi2gehrels)
    # statistic. I expect stat.name to be chi2gehrels for ds1, chi2 for
    # ds2, chi2gehrels for ds1,2
    ui.load_data(1, data)
    ui.load_data(2, data, use_errors=True)

    ui.set_source(1, "gauss1d.g1")
    ui.set_source(2, "gauss1d.g1")

    ui.set_stat("chi2gehrels")

    si = ui.get_stat_info()

    stat1 = si[0].statname
    stat2 = si[1].statname
    stat12 = si[2].statname

    assert stat1 == 'chi2gehrels'
    assert stat2 == 'chi2'
    assert stat12 == 'chi2gehrels'

    # Case 2: first ds has errors, second has not, chi2-derived
    # (chi2gehrels) statistic. I expect stat.name to be chi2 for ds1,
    # chi2gehrels for ds2, chi2gehrels for ds1,2
    ui.load_data(2, data)
    ui.load_data(1, data, use_errors=True)

    si = ui.get_stat_info()

    stat1 = si[0].statname
    stat2 = si[1].statname
    stat12 = si[2].statname

    assert stat1 == 'chi2'
    assert stat2 == 'chi2gehrels'
    assert stat12 == 'chi2gehrels'

    # Case 3: both datasets have errors, chi2-derived (chi2gehrels)
    # statistic. I expect stat.name to be chi2 for all of them.
    ui.load_data(2, data, use_errors=True)
    ui.load_data(1, data, use_errors=True)

    si = ui.get_stat_info()

    stat1 = si[0].statname
    stat2 = si[1].statname
    stat12 = si[2].statname

    assert stat1 == 'chi2'
    assert stat2 == 'chi2'
    assert stat12 == 'chi2'

    # Case 4: first ds has errors, second has not, LeastSq statistic
    # I expect stat.name to be leastsq for all of them.
    ui.load_data(2, data)
    ui.load_data(1, data, use_errors=True)

    ui.set_stat("leastsq")

    si = ui.get_stat_info()

    stat1 = si[0].statname
    stat2 = si[1].statname
    stat12 = si[2].statname

    assert stat1 == 'leastsq'
    assert stat2 == 'leastsq'
    assert stat12 == 'leastsq'

    # Case 5: both ds have errors, LeastSq statistic
    # I expect stat.name to be leastsq for all of them.
    ui.load_data(2, data, use_errors=True)
    ui.load_data(1, data, use_errors=True)

    ui.set_stat("leastsq")

    si = ui.get_stat_info()

    stat1 = si[0].statname
    stat2 = si[1].statname
    stat12 = si[2].statname

    assert stat1 == 'leastsq'
    assert stat2 == 'leastsq'
    assert stat12 == 'leastsq'

    # Case 6: first ds has errors, second has not, CStat statistic
    # I expect stat.name to be cstat for all of them.
    ui.load_data(2, data)
    ui.load_data(1, data, use_errors=True)

    ui.set_stat("cstat")

    si = ui.get_stat_info()

    stat1 = si[0].statname
    stat2 = si[1].statname
    stat12 = si[2].statname

    assert stat1 == 'cstat'
    assert stat2 == 'cstat'
    assert stat12 == 'cstat'

    # Case7: select chi2 as statistic. One of the ds does not provide
    # errors. I expect sherpa to raise a StatErr exception.
    ui.set_stat('chi2')

    with pytest.raises(StatErr):
        ui.get_stat_info()

    # Case8: select chi2 as statistic. Both datasets provide errors
    # I expect stat to be 'chi2'
    ui.load_data(2, data, use_errors=True)
    si = ui.get_stat_info()

    stat1 = si[0].statname
    stat2 = si[1].statname
    stat12 = si[2].statname

    assert stat1 == 'chi2'
    assert stat2 == 'chi2'
    assert stat12 == 'chi2'
Ejemplo n.º 14
0
 def test_wstat_calc_stat_info(self):
     ui.load_pha("stat", self.make_path("3c273.pi"))
     ui.set_source("stat", ui.powlaw1d.p1)
     ui.set_stat("wstat")
     ui.fit("stat")
     ui.get_stat_info()
Ejemplo n.º 15
0
def test_can_use_swift_data(make_data_path):
    """A basic check that we can read in and use the Swift data.

    Unlike the previous tests, that directly access the io module,
    this uses the ui interface.
    """

    # QUS are there pytest fixtures that ensure the state is
    # clean on entry and exit?
    ui.clean()

    # The Swift PHA file does not have the ANCRFILE/RESPFILE keywords
    # set up, so the responses have to be manually added.
    #
    ui.load_pha(make_data_path(PHAFILE))
    ui.load_rmf(make_data_path(RMFFILE))
    ui.load_arf(make_data_path(ARFFILE))

    assert ui.get_analysis() == 'energy'

    ui.set_source(ui.powlaw1d.pl)
    ui.set_par('pl.ampl', 0.0003)

    # The responses have the first bin start at an energy of 0,
    # which causes issues for Sherpa. There should be a
    # RuntimeWarning due to a divide by zero.
    #
    with pytest.warns(RuntimeWarning) as record:
        stat = ui.calc_stat()

    # The exact form of the message depends on the Python version;
    # this could be checked, but it feels excessive for this
    # particular test, which is just a regression check, so use a
    # more lax approach.
    #
    assert len(record) == 1
    assert record[0].message.args[0] in \
        ['divide by zero encountered in divide',
         'divide by zero encountered in true_divide']

    # The stat value depends on what power-law model is used. With
    # xspowerlaw it is NaN, but with powlaw1d it is finite.
    #
    # This check is purely a regression test, so the value has
    # not been externally validated.
    #
    # assert np.isnan(stat)
    assert_allclose(stat, 58.2813692358182)

    # Manually adjust the first bin to avoid this problem.
    # Add in asserts just in case this gets "fixed" in the
    # I/O layer (as XSPEC does).
    #
    arf = ui.get_arf()
    rmf = ui.get_rmf()
    assert arf.energ_lo[0] == 0.0
    assert rmf.energ_lo[0] == 0.0
    assert rmf.e_min[0] == 0.0

    # The bin widths are ~ 0.005 or ~ 0.01 keV, so pick a value
    # smaller than this.
    #
    ethresh = 1e-6
    arf.energ_lo[0] = ethresh
    rmf.energ_lo[0] = ethresh
    rmf.e_min[0] = ethresh

    # Pick an energy range which isn't affected by the first
    # bin.
    #
    # Unfortunately, using a range of 0.3-8.0 gives 771 bins
    # in XSPEC - channels 30 to 800 - but 772 bins in Sherpa.
    # If I use ignore(None, 0.3); ignore(8.0, None) instead
    # then the result is 771 bins. This is because the e_min/max
    # of the RMF has channel widths of 0.01 keV, starting at 0,
    # so both 0.3 and 8.0 fall on a bin boundary. So, it's either
    # a difference in < or <= (or > vs >=), or a rounding issue
    # due to floating-point conversion leading to one bin boundary
    # being slightly different in Sherpa vs XSPEC).
    #
    # When using ui.notice(0.3, 8.0); ui.get_indep(filter=True)
    # returns 772 channels, 30 to 801.
    #
    # Using ui.notice(0.3, 7.995) selects channels 30 to 800. So
    # this range is used. Alternatively, channel 801 could have been
    # excluded explicitly.
    #
    # ui.notice(0.3, 8.0)
    ui.notice(0.3, 7.995)

    # XSPEC 12.9.1b calculation of the statistic:
    #   chi sq = 203.88 from 771 bins with 769 dof
    #   cstat  = 568.52
    #
    # There are known differences between XSPEC and Sherpa
    # with chi2xspecvar. This only affects data sets where
    # there is background subtraction, which is not the case
    # here. See https://github.com/sherpa/sherpa/issues/356
    #
    ui.set_stat('chi2xspecvar')
    stat_xvar = ui.get_stat_info()

    assert len(stat_xvar) == 1
    stat_xvar = stat_xvar[0]
    assert stat_xvar.numpoints == 771
    assert stat_xvar.dof == 769
    assert_allclose(stat_xvar.statval, 203.88,
                    rtol=0, atol=0.005)

    ui.set_stat('cstat')
    stat_cstat = ui.get_stat_info()

    assert len(stat_cstat) == 1
    stat_cstat = stat_cstat[0]
    assert stat_cstat.numpoints == 771
    assert stat_cstat.dof == 769
    assert_allclose(stat_cstat.statval, 568.52,
                    rtol=0, atol=0.005)

    ui.clean()
Ejemplo n.º 16
0
def test_can_use_swift_data(make_data_path):
    """A basic check that we can read in and use the Swift data.

    Unlike the previous tests, that directly access the io module,
    this uses the ui interface.
    """

    # QUS are there pytest fixtures that ensure the state is
    # clean on entry and exit?
    ui.clean()

    # The Swift PHA file does not have the ANCRFILE/RESPFILE keywords
    # set up, so the responses have to be manually added.
    #
    ui.load_pha(make_data_path(PHAFILE))

    rmffile = make_data_path(RMFFILE)
    with warnings.catch_warnings(record=True) as ws:
        warnings.simplefilter("always")
        ui.load_rmf(rmffile)

    validate_replacement_warning(ws, 'RMF', rmffile)

    arffile = make_data_path(ARFFILE)
    with warnings.catch_warnings(record=True) as ws:
        warnings.simplefilter("always")
        ui.load_arf(arffile)

    validate_replacement_warning(ws, 'ARF', arffile)

    assert ui.get_analysis() == 'energy'

    arf = ui.get_arf()
    rmf = ui.get_rmf()
    assert arf.energ_lo[0] == EMIN
    assert rmf.energ_lo[0] == EMIN
    assert rmf.e_min[0] == 0.0

    ui.set_source(ui.powlaw1d.pl)
    ui.set_par('pl.ampl', 0.0003)

    stat = ui.calc_stat()

    # This check is purely a regression test, so the value has
    # not been externally validated.
    #
    assert_allclose(stat, 58.2813692358182)

    # Pick an energy range which isn't affected by the first
    # bin.
    #
    # Unfortunately, using a range of 0.3-8.0 gives 771 bins
    # in XSPEC - channels 30 to 800 - but 772 bins in Sherpa.
    # If I use ignore(None, 0.3); ignore(8.0, None) instead
    # then the result is 771 bins. This is because the e_min/max
    # of the RMF has channel widths of 0.01 keV, starting at 0,
    # so both 0.3 and 8.0 fall on a bin boundary. So, it's either
    # a difference in < or <= (or > vs >=), or a rounding issue
    # due to floating-point conversion leading to one bin boundary
    # being slightly different in Sherpa vs XSPEC).
    #
    # When using ui.notice(0.3, 8.0); ui.get_indep(filter=True)
    # returns 772 channels, 30 to 801.
    #
    # Using ui.notice(0.3, 7.995) selects channels 30 to 800. So
    # this range is used. Alternatively, channel 801 could have been
    # excluded explicitly.
    #
    # ui.notice(0.3, 8.0)
    ui.notice(0.3, 7.995)

    # XSPEC 12.9.1b calculation of the statistic:
    #   chi sq = 203.88 from 771 bins with 769 dof
    #   cstat  = 568.52
    #
    # There are known differences between XSPEC and Sherpa
    # with chi2xspecvar. This only affects data sets where
    # there is background subtraction, which is not the case
    # here. See https://github.com/sherpa/sherpa/issues/356
    #
    ui.set_stat('chi2xspecvar')
    stat_xvar = ui.get_stat_info()

    assert len(stat_xvar) == 1
    stat_xvar = stat_xvar[0]
    assert stat_xvar.numpoints == 771
    assert stat_xvar.dof == 769
    assert_allclose(stat_xvar.statval, 203.88,
                    rtol=0, atol=0.005)

    ui.set_stat('cstat')
    stat_cstat = ui.get_stat_info()

    assert len(stat_cstat) == 1
    stat_cstat = stat_cstat[0]
    assert stat_cstat.numpoints == 771
    assert stat_cstat.dof == 769
    assert_allclose(stat_cstat.statval, 568.52,
                    rtol=0, atol=0.005)

    ui.clean()
Ejemplo n.º 17
0
 def test_wstat_calc_stat_info(self):
     ui.load_pha("stat", self.make_path("3c273.pi"))
     ui.set_source("stat", ui.powlaw1d.p1)
     ui.set_stat("wstat")
     ui.fit("stat")
     ui.get_stat_info()
Ejemplo n.º 18
0
def test_xspec_con_ui_cflux(make_data_path, clean_astro_ui, restore_xspec_settings):
    """Check cflux from the UI layer with a response."""

    from sherpa.astro import xspec

    infile = make_data_path('3c273.pi')
    ui.load_pha('random', infile)
    ui.subtract('random')
    ui.ignore(None, 0.5)
    ui.ignore(7, None)

    ui.set_source('random', 'xsphabs.gal * xscflux.sflux(powlaw1d.pl)')
    mdl = ui.get_source('random')

    assert mdl.name == '(xsphabs.gal * xscflux.sflux(powlaw1d.pl))'
    assert len(mdl.pars) == 7
    assert mdl.pars[0].fullname == 'gal.nH'
    assert mdl.pars[1].fullname == 'sflux.Emin'
    assert mdl.pars[2].fullname == 'sflux.Emax'
    assert mdl.pars[3].fullname == 'sflux.lg10Flux'
    assert mdl.pars[4].fullname == 'pl.gamma'
    assert mdl.pars[5].fullname == 'pl.ref'
    assert mdl.pars[6].fullname == 'pl.ampl'

    assert isinstance(mdl.lhs, xspec.XSphabs)
    assert isinstance(mdl.rhs, xspec.XSConvolutionModel)

    gal = ui.get_model_component('gal')
    sflux = ui.get_model_component('sflux')
    pl = ui.get_model_component('pl')
    assert isinstance(gal, xspec.XSphabs)
    assert isinstance(sflux, xspec.XScflux)
    assert isinstance(pl, PowLaw1D)

    # the convolution model needs the normalization to be fixed
    # (not for this example, as we are not fitting, but do this
    # anyway for reference)
    pl.ampl.frozen = True

    sflux.emin = 1
    sflux.emax = 5
    sflux.lg10Flux = -12.3027

    pl.gamma = 2.03
    gal.nh = 0.039

    ui.set_xsabund('angr')
    ui.set_xsxsect('vern')

    # check we get the "expected" statistic (so this is a regression
    # test).
    #
    ui.set_stat('chi2gehrels')
    sinfo = ui.get_stat_info()

    assert len(sinfo) == 1
    sinfo = sinfo[0]
    assert sinfo.numpoints == 40
    assert sinfo.dof == 37
    assert sinfo.statval == pytest.approx(21.25762265234619)

    # Do we get the same flux from Sherpa's calc_energy_flux?
    #
    cflux = ui.calc_energy_flux(id='random', model=sflux(pl), lo=1, hi=5)
    lcflux = np.log10(cflux)
    assert lcflux == pytest.approx(sflux.lg10Flux.val)
Ejemplo n.º 19
0
# 
# $TS = Cstat(source) - Cstat(no source)$
# 
# The criterion for a significant source detection is typically that it should improve the test statistic by at least 25 or 30. We have added only 3 sources to save time, but you should keep doing this till del(stat) is less than the required number.

# In[ ]:


from astropy.stats import gaussian_fwhm_to_sigma
from astropy.table import Table

rows = []
for g in gs:
    ampl = g.ampl.val
    g.ampl = 0
    stati = sh.get_stat_info()[0].statval
    g.ampl = ampl
    statf = sh.get_stat_info()[0].statval
    delstat = stati - statf

    geom = resid.geom
    # sherpa uses 1 based indexing
    coord = geom.pix_to_coord((g.xpos.val - 1, g.ypos.val - 1))
    pix_scale = geom.pixel_scales.mean().deg
    sigma = g.fwhm.val * pix_scale * gaussian_fwhm_to_sigma
    rows.append(
        dict(delstat=delstat, glon=coord[0], glat=coord[1], sigma=sigma)
    )

table = Table(rows=rows, names=rows[0])
for name in table.colnames:
Ejemplo n.º 20
0
import sherpa.astro.ui as sau

sau.load_pha("3c273.pi")
sau.set_source(sau.powlaw1d.p1)
sau.guess(p1)
sau.set_stat("wstat")
sau.fit()
stats = sau.get_stat_info()

Ejemplo n.º 21
0
    def test_chi2(self):

        # Case 1: first ds has no error, second has, chi2-derived (chi2gehrels)
        # statistic. I expect stat.name to be chi2gehrels for ds1, chi2 for
        # ds2, chi2gehrels for ds1,2
        ui.load_data(1, self.data)
        ui.load_data(2, self.data, use_errors=True)

        ui.set_source(1, "gauss1d.g1")
        ui.set_source(2, "gauss1d.g1")

        ui.set_stat("chi2gehrels")

        si = ui.get_stat_info()

        stat1 = si[0].statname
        stat2 = si[1].statname
        stat12 = si[2].statname

        self.assertEqual('chi2gehrels', stat1)
        self.assertEqual('chi2', stat2)
        self.assertEqual('chi2gehrels', stat12)

        # Case 2: first ds has errors, second has not, chi2-derived
        # (chi2gehrels) statistic. I expect stat.name to be chi2 for ds1,
        # chi2gehrels for ds2, chi2gehrels for ds1,2
        ui.load_data(2, self.data)
        ui.load_data(1, self.data, use_errors=True)

        si = ui.get_stat_info()

        stat1 = si[0].statname
        stat2 = si[1].statname
        stat12 = si[2].statname

        self.assertEqual('chi2gehrels', stat2)
        self.assertEqual('chi2', stat1)
        self.assertEqual('chi2gehrels', stat12)

        # Case 3: both datasets have errors, chi2-derived (chi2gehrels)
        # statistic. I expect stat.name to be chi2 for all of them.
        ui.load_data(2, self.data, use_errors=True)
        ui.load_data(1, self.data, use_errors=True)

        si = ui.get_stat_info()

        stat1 = si[0].statname
        stat2 = si[1].statname
        stat12 = si[2].statname

        self.assertEqual('chi2', stat2)
        self.assertEqual('chi2', stat1)
        self.assertEqual('chi2', stat12)

        # Case 4: first ds has errors, second has not, LeastSq statistic
        # I expect stat.name to be leastsq for all of them.
        ui.load_data(2, self.data)
        ui.load_data(1, self.data, use_errors=True)

        ui.set_stat("leastsq")

        si = ui.get_stat_info()

        stat1 = si[0].statname
        stat2 = si[1].statname
        stat12 = si[2].statname

        self.assertEqual('leastsq', stat2)
        self.assertEqual('leastsq', stat1)
        self.assertEqual('leastsq', stat12)

        # Case 5: both ds have errors, LeastSq statistic
        # I expect stat.name to be leastsq for all of them.
        ui.load_data(2, self.data, use_errors=True)
        ui.load_data(1, self.data, use_errors=True)

        ui.set_stat("leastsq")

        si = ui.get_stat_info()

        stat1 = si[0].statname
        stat2 = si[1].statname
        stat12 = si[2].statname

        self.assertEqual('leastsq', stat2)
        self.assertEqual('leastsq', stat1)
        self.assertEqual('leastsq', stat12)

        # Case 6: first ds has errors, second has not, CStat statistic
        # I expect stat.name to be cstat for all of them.
        ui.load_data(2, self.data)
        ui.load_data(1, self.data, use_errors=True)

        ui.set_stat("cstat")

        si = ui.get_stat_info()

        stat1 = si[0].statname
        stat2 = si[1].statname
        stat12 = si[2].statname

        self.assertEqual('cstat', stat2)
        self.assertEqual('cstat', stat1)
        self.assertEqual('cstat', stat12)

        # Case7: select chi2 as statistic. One of the ds does not provide
        # errors. I expect sherpa to raise a StatErr exception.
        ui.set_stat('chi2')

        caught = False

        from sherpa.utils.err import StatErr
        try:
            ui.get_stat_info()
        except StatErr:
            caught = True

        self.assertTrue(caught, msg='StatErr was not caught')

        # Case8: select chi2 as statistic. Both datasets provide errors
        # I expect stat to be 'chi2'
        ui.load_data(2, self.data, use_errors=True)
        si = ui.get_stat_info()

        stat1 = si[0].statname
        stat2 = si[1].statname
        stat12 = si[2].statname

        self.assertEqual('chi2', stat2)
        self.assertEqual('chi2', stat1)
        self.assertEqual('chi2', stat12)
Ejemplo n.º 22
0
def test_can_use_swift_data(make_data_path, is_known_warning):
    """A basic check that we can read in and use the Swift data.

    Unlike the previous tests, that directly access the io module,
    this uses the ui interface.
    """

    # QUS are there pytest fixtures that ensure the state is
    # clean on entry and exit?
    ui.clean()

    # The Swift PHA file does not have the ANCRFILE/RESPFILE keywords
    # set up, so the responses have to be manually added.
    #
    ui.load_pha(make_data_path(PHAFILE))

    rmffile = make_data_path(RMFFILE)
    with warnings.catch_warnings(record=True) as ws:
        warnings.simplefilter("always")
        ui.load_rmf(rmffile)

    validate_replacement_warning(ws, 'RMF', rmffile, is_known_warning)

    arffile = make_data_path(ARFFILE)
    with warnings.catch_warnings(record=True) as ws:
        warnings.simplefilter("always")
        ui.load_arf(arffile)

    validate_replacement_warning(ws, 'ARF', arffile, is_known_warning)

    assert ui.get_analysis() == 'energy'

    arf = ui.get_arf()
    rmf = ui.get_rmf()
    assert arf.energ_lo[0] == EMIN
    assert rmf.energ_lo[0] == EMIN
    assert rmf.e_min[0] == 0.0

    ui.set_source(ui.powlaw1d.pl)
    ui.set_par('pl.ampl', 0.0003)

    stat = ui.calc_stat()

    # This check is purely a regression test, so the value has
    # not been externally validated.
    #
    assert_allclose(stat, 58.2813692358182)

    # Pick an energy range which isn't affected by the first
    # bin.
    #
    # Unfortunately, using a range of 0.3-8.0 gives 771 bins
    # in XSPEC - channels 30 to 800 - but 772 bins in Sherpa.
    # If I use ignore(None, 0.3); ignore(8.0, None) instead
    # then the result is 771 bins. This is because the e_min/max
    # of the RMF has channel widths of 0.01 keV, starting at 0,
    # so both 0.3 and 8.0 fall on a bin boundary. So, it's either
    # a difference in < or <= (or > vs >=), or a rounding issue
    # due to floating-point conversion leading to one bin boundary
    # being slightly different in Sherpa vs XSPEC).
    #
    # When using ui.notice(0.3, 8.0); ui.get_indep(filter=True)
    # returns 772 channels, 30 to 801.
    #
    # Using ui.notice(0.3, 7.995) selects channels 30 to 800. So
    # this range is used. Alternatively, channel 801 could have been
    # excluded explicitly.
    #
    # ui.notice(0.3, 8.0)
    ui.notice(0.3, 7.995)

    # XSPEC 12.9.1b calculation of the statistic:
    #   chi sq = 203.88 from 771 bins with 769 dof
    #   cstat  = 568.52
    #
    # There are known differences between XSPEC and Sherpa
    # with chi2xspecvar. This only affects data sets where
    # there is background subtraction, which is not the case
    # here. See https://github.com/sherpa/sherpa/issues/356
    #
    ui.set_stat('chi2xspecvar')
    stat_xvar = ui.get_stat_info()

    assert len(stat_xvar) == 1
    stat_xvar = stat_xvar[0]
    assert stat_xvar.numpoints == 771
    assert stat_xvar.dof == 769
    assert_allclose(stat_xvar.statval, 203.88,
                    rtol=0, atol=0.005)

    ui.set_stat('cstat')
    stat_cstat = ui.get_stat_info()

    assert len(stat_cstat) == 1
    stat_cstat = stat_cstat[0]
    assert stat_cstat.numpoints == 771
    assert stat_cstat.dof == 769
    assert_allclose(stat_cstat.statval, 568.52,
                    rtol=0, atol=0.005)

    ui.clean()
Ejemplo n.º 23
0
    def test_chi2(self):

        # Case 1: first ds has no error, second has, chi2-derived (chi2gehrels)
        # statistic. I expect stat.name to be chi2gehrels for ds1, chi2 for
        # ds2, chi2gehrels for ds1,2
        ui.load_data(1, self.data)
        ui.load_data(2, self.data, use_errors=True)

        ui.set_source(1, "gauss1d.g1")
        ui.set_source(2, "gauss1d.g1")

        ui.set_stat("chi2gehrels")

        si = ui.get_stat_info()

        stat1 = si[0].statname
        stat2 = si[1].statname
        stat12 = si[2].statname

        self.assertEqual('chi2gehrels', stat1)
        self.assertEqual('chi2', stat2)
        self.assertEqual('chi2gehrels', stat12)

        # Case 2: first ds has errors, second has not, chi2-derived
        # (chi2gehrels) statistic. I expect stat.name to be chi2 for ds1,
        # chi2gehrels for ds2, chi2gehrels for ds1,2
        ui.load_data(2, self.data)
        ui.load_data(1, self.data, use_errors=True)

        si = ui.get_stat_info()

        stat1 = si[0].statname
        stat2 = si[1].statname
        stat12 = si[2].statname

        self.assertEqual('chi2gehrels', stat2)
        self.assertEqual('chi2', stat1)
        self.assertEqual('chi2gehrels', stat12)

        # Case 3: both datasets have errors, chi2-derived (chi2gehrels)
        # statistic. I expect stat.name to be chi2 for all of them.
        ui.load_data(2, self.data, use_errors=True)
        ui.load_data(1, self.data, use_errors=True)

        si = ui.get_stat_info()

        stat1 = si[0].statname
        stat2 = si[1].statname
        stat12 = si[2].statname

        self.assertEqual('chi2', stat2)
        self.assertEqual('chi2', stat1)
        self.assertEqual('chi2', stat12)

        # Case 4: first ds has errors, second has not, LeastSq statistic
        # I expect stat.name to be leastsq for all of them.
        ui.load_data(2, self.data)
        ui.load_data(1, self.data, use_errors=True)

        ui.set_stat("leastsq")

        si = ui.get_stat_info()

        stat1 = si[0].statname
        stat2 = si[1].statname
        stat12 = si[2].statname

        self.assertEqual('leastsq', stat2)
        self.assertEqual('leastsq', stat1)
        self.assertEqual('leastsq', stat12)

        # Case 5: both ds have errors, LeastSq statistic
        # I expect stat.name to be leastsq for all of them.
        ui.load_data(2, self.data, use_errors=True)
        ui.load_data(1, self.data, use_errors=True)

        ui.set_stat("leastsq")

        si = ui.get_stat_info()

        stat1 = si[0].statname
        stat2 = si[1].statname
        stat12 = si[2].statname

        self.assertEqual('leastsq', stat2)
        self.assertEqual('leastsq', stat1)
        self.assertEqual('leastsq', stat12)

        # Case 6: first ds has errors, second has not, CStat statistic
        # I expect stat.name to be cstat for all of them.
        ui.load_data(2, self.data)
        ui.load_data(1, self.data, use_errors=True)

        ui.set_stat("cstat")

        si = ui.get_stat_info()

        stat1 = si[0].statname
        stat2 = si[1].statname
        stat12 = si[2].statname

        self.assertEqual('cstat', stat2)
        self.assertEqual('cstat', stat1)
        self.assertEqual('cstat', stat12)

        # Case7: select chi2 as statistic. One of the ds does not provide
        # errors. I expect sherpa to raise a StatErr exception.
        ui.set_stat('chi2')

        caught = False

        from sherpa.utils.err import StatErr
        try:
            ui.get_stat_info()
        except StatErr:
            caught = True

        self.assertTrue(caught, msg='StatErr was not caught')

        # Case8: select chi2 as statistic. Both datasets provide errors
        # I expect stat to be 'chi2'
        ui.load_data(2, self.data, use_errors=True)
        si = ui.get_stat_info()

        stat1 = si[0].statname
        stat2 = si[1].statname
        stat12 = si[2].statname

        self.assertEqual('chi2', stat2)
        self.assertEqual('chi2', stat1)
        self.assertEqual('chi2', stat12)