Example #1
0
def test_filter_notice_bad_361(make_data_path):
    """Test out issue 361: notice then ignore bad.

    Since the data is grouped, the ignore_bad call is expected to
    drop the filter expression, with a warning message.
    """

    logger = logging.getLogger('sherpa')
    hdlr = MockLoggingHandler(level='WARNING')
    logger.addHandler(hdlr)

    stats = setup_model(make_data_path)

    ui.notice(0.5, 8.0)
    ui.ignore_bad()
    s1 = ui.calc_stat()
    assert s1 == pytest.approx(stats['bad'])

    msgs = hdlr.messages
    assert msgs['warning'] == [
        'filtering grouped data with quality ' +
        'flags, previous filters deleted'
    ]
    for k in ['debug', 'info', 'error', 'critical']:
        assert msgs[k] == []
def test_filter_notice_bad_361(make_data_path, caplog):
    """Test out issue 361: notice then ignore bad.

    Since the data is grouped, the ignore_bad call is expected to
    drop the filter expression, with a warning message.
    """

    stats = setup_model(make_data_path)

    # We don't care about these warnings, so I could just
    # store the number and check that we get an extra one
    # below, but use this as a canary to check when the
    # system changes.
    #
    assert len(caplog.records) == 5

    ui.notice(0.5, 8.0)
    with caplog.at_level(logging.INFO, logger='sherpa'):
        ui.ignore_bad()

    assert len(caplog.records) == 6

    lname, lvl, msg = caplog.record_tuples[5]
    assert lname == 'sherpa.astro.data'
    assert lvl == logging.WARNING
    assert msg == 'filtering grouped data with quality ' + \
        'flags, previous filters deleted'

    s1 = ui.calc_stat()
    assert s1 == pytest.approx(stats['bad'])
Example #3
0
def check_stat2(expected):
    """Is calc_stat() == expected?"""

    stat = ui.calc_stat()

    # this used to check to a given number of decimal places
    # rather than relative
    assert stat == pytest.approx(expected)
Example #4
0
    def _check_stat(self, nbins, expected):

        # check the filter sizes (mainly so that these tests
        # get flagged as in need of a look if anything changes
        # in other parts of the code, such as filtering and binning
        #
        self.assertEqual(nbins, ui.get_data().get_dep(True).size)

        stat = ui.calc_stat()
        self.assertAlmostEqual(expected, stat, places=7)
Example #5
0
    def _check_stat(self, nbins, expected):

        # check the filter sizes (mainly so that these tests
        # get flagged as in need of a look if anything changes
        # in other parts of the code, such as filtering and binning
        #
        self.assertEqual(nbins, ui.get_data().get_dep(True).size)

        stat = ui.calc_stat()
        self.assertAlmostEqual(expected, stat, places=7)
def test_fit_profile(model, stat, pars, reset_seed, clean_astro_ui):
    """Regression test simple 1D fits"""

    setup_data1d_fit()
    ui.set_source(model('mdl'))
    ui.guess()
    ui.fit()

    assert ui.calc_stat() == pytest.approx(stat)
    assert np.asarray(mdl.thawedpars) == pytest.approx(np.asarray(pars))
Example #7
0
def check_stat(nbins, expected, idval=None):

    # check the filter sizes (mainly so that these tests
    # get flagged as in need of a look if anything changes
    # in other parts of the code, such as filtering and binning
    #
    assert ui.get_data(idval).get_dep(True).size == nbins

    stat = ui.calc_stat(idval)

    # this used to check to a given number of decimal places
    # rather than relative
    assert stat == pytest.approx(expected)
Example #8
0
def test_filter_basic(make_data_path):
    """Test out issue 361 without ignore_bad calls.

    This should not trigger the bug behind issue 361.
    """

    stats = setup_model(make_data_path)

    s1 = ui.calc_stat()
    assert s1 == pytest.approx(stats['all'])

    ui.ignore_bad()
    s2 = ui.calc_stat()
    assert s2 == pytest.approx(stats['bad'])

    ui.notice(None, None)
    s3 = ui.calc_stat()
    assert s3 == pytest.approx(stats['all'])

    ui.notice(0.5, 8.0)
    s4 = ui.calc_stat()
    assert s4 == pytest.approx(stats['0.5-8.0'])
Example #9
0
def test_filter_bad_notice_361(make_data_path):
    """Test out issue 361: ignore bad then notice.

    This is expected to fail with NumPy version 1.13 and should cause
    a DeprecationWarning with earlier versions (I do not know when
    the warning was added).
    """

    stats = setup_model(make_data_path)

    ui.ignore_bad()
    ui.notice(0.5, 8.0)
    s1 = ui.calc_stat()
    assert s1 == pytest.approx(stats['0.5-8.0'])
Example #10
0
def setup_imgdata_source():
    """Set the model and fit setup"""
    ui.set_source(ui.gauss2d.g1 + ui.const2d.c1)

    c1.c0 = 10
    g1.fwhm = 4
    g1.xpos = 4
    g1.ypos = 3
    g1.ampl = 100

    ui.set_stat('cstat')
    ui.set_method("simplex")

    # Fit and check we are in the expected place (so this is a
    # pre-condition for the tests).
    #
    ui.fit()
    assert ui.calc_stat() == pytest.approx(362.9264257040166)
Example #11
0
File: qq.py Project: amasini90/BXA
def qq_export(id=None, bkg=False, outfile='qq.txt', elow=None, ehigh=None):
    """
	Export Q-Q plot into a file for plotting.

	:param id: spectrum id to use (see get_bkg_plot/get_data_plot)
	:param bkg: whether to use get_bkg_plot or get_data_plot
	:param outfile: filename to write results into
	:param elow: low energy limit
	:param ehigh: low energy limit

	Example::

		qq.qq_export('bg', outfile='my_bg_qq', elow=0.2, ehigh=10)

	"""
    # data
    d = ui.get_bkg_plot(id=id) if bkg else ui.get_data_plot(id=id)
    e = d.x
    mask = logical_and(e >= elow, e <= ehigh)
    data = d.y[mask].cumsum()
    d = ui.get_bkg_model_plot(id=id) if bkg else ui.get_model_plot(id=id)
    e = d.xlo
    mask = logical_and(e >= elow, e <= ehigh)
    e = e[mask]
    model = d.y[mask].cumsum()
    last_stat = ui.get_stat()
    ui.set_stat(ksstat)
    ks = ui.calc_stat()
    ui.set_stat(cvmstat)
    cvm = ui.calc_stat()
    ui.set_stat(adstat)
    ad = ui.calc_stat()
    ui.set_stat(last_stat)
    ad = ui.calc_stat()

    ui.set_stat('chi2gehrels')
    chi2 = ui.calc_stat()
    ui.set_stat('cstat')
    cstat = ui.calc_stat()
    ui.set_stat(last_stat)
    stats = dict(ks=ks, cvm=cvm, ad=ad, cstat=cstat, chi2=chi2)

    numpy.savetxt(outfile, numpy.transpose([e, data, model]))
    json.dump(stats, open(outfile + '.json', 'w'), indent=4)
Example #12
0
def qq_export(id=None, bkg=False, outfile='qq.txt', elow=None, ehigh=None):
	"""
	Export Q-Q plot into a file for plotting.

	:param id: spectrum id to use (see get_bkg_plot/get_data_plot)
	:param bkg: whether to use get_bkg_plot or get_data_plot
	:param outfile: filename to write results into
	:param elow: low energy limit
	:param ehigh: low energy limit

	Example::

		qq.qq_export('bg', outfile='my_bg_qq', elow=0.2, ehigh=10)
	"""
	# data
	d = ui.get_bkg_plot(id=id) if bkg else ui.get_data_plot(id=id)
	e = d.x
	mask = logical_and(e >= elow, e <= ehigh)
	data = d.y[mask].cumsum()
	d = ui.get_bkg_model_plot(id=id) if bkg else ui.get_model_plot(id=id)
	e = d.xlo
	mask = logical_and(e >= elow, e <= ehigh)
	e = e[mask]
	model = d.y[mask].cumsum()
	last_stat = ui.get_stat()
	ui.set_stat(ksstat)
	ks = ui.calc_stat()
	ui.set_stat(cvmstat)
	cvm = ui.calc_stat()
	ui.set_stat(adstat)
	ad = ui.calc_stat()
	ui.set_stat(last_stat)
	ad = ui.calc_stat()
	
	ui.set_stat('chi2gehrels')
	chi2 = ui.calc_stat()
	ui.set_stat('cstat')
	cstat = ui.calc_stat()
	ui.set_stat(last_stat)
	stats = dict(ks=ks, cvm=cvm, ad=ad, cstat=cstat, chi2=chi2)
	
	numpy.savetxt(outfile, numpy.transpose([e, data, model]))
	json.dump(stats, open(outfile + '.json', 'w'), indent=4)
def test_load_pha2_compare_meg_order1(make_data_path):
    """Do we read in the MEG +/-1 orders?"""

    # The MEG -1 order is dataset 9
    # The MEG +1 order is dataset 10
    #
    pha2file = make_data_path('3c120_pha2')
    meg_p1file = make_data_path('3c120_meg_1.pha')
    meg_m1file = make_data_path('3c120_meg_-1.pha')

    ui.load_pha('meg_p1', meg_p1file)
    ui.load_pha('meg_m1', meg_m1file)

    orig_ids = set(ui.list_data_ids())
    assert 'meg_p1' in orig_ids
    assert 'meg_m1' in orig_ids

    ui.load_pha(pha2file)

    for n, lbl in zip([9, 10], ["-1", "1"]):
        h = '3c120_meg_{}'.format(lbl)
        ui.load_arf(n, make_data_path(h + '.arf'))
        ui.load_rmf(n, make_data_path(h + '.rmf'))

    # check that loading the pha2 file doesn't overwrite existing
    # data
    new_ids = set(ui.list_data_ids())

    for i in range(1, 13):
        orig_ids.add(i)

    assert orig_ids == new_ids

    # Check that the same model gives the same statistic
    # value; this should check that the data and response are
    # read in, that grouping and filtering work, and that
    # model evaluation is the same, without having to
    # check these steps individually.
    #
    # The model is not meant to be physically meaningful,
    # just one that reasonably represents the data and
    # can be evaluated without requiring XSPEC.
    #
    pmdl = ui.create_model_component('powlaw1d', 'pmdl')
    pmdl.gamma = 0.318
    pmdl.ampl = 2.52e-3

    ncts = 20
    for i in [9, 10, "meg_m1", "meg_p1"]:
        ui.set_analysis(i, 'wave')
        ui.group_counts(i, ncts)
        ui.notice_id(i, 2, 12)
        ui.set_source(i, pmdl)

    ui.set_stat('chi2datavar')
    s9 = ui.calc_stat(9)
    s10 = ui.calc_stat(10)

    sm1 = ui.calc_stat('meg_m1')
    sp1 = ui.calc_stat('meg_p1')

    # Since these should be the same, we use an equality test
    # rather than approximation. At least until it becomes
    # a problem.
    #
    assert s9 == sm1
    assert s10 == sp1

    # The values were calculated using CIAO 4.9, Linux64, with
    # Python 3.5.
    #
    assert s9 == pytest.approx(1005.4378559390879)
    assert s10 == pytest.approx(1119.980439489647)
Example #14
0
# Results are automatically printed to the screen
sau.fit()
sau.covar()

# Sherpa uses fwhm instead of sigma as extension parameter ... need to convert
# http://cxc.harvard.edu/sherpa/ahelp/gauss2d.html
fwhm_to_sigma = 1. / np.sqrt(8 * np.log(2))
cov = sau.get_covar_results()
sigma = fwhm_to_sigma * cov.parvals[0]
sigma_err = fwhm_to_sigma * cov.parmaxes[0]
print('sigma: {0} +- {1}'.format(sigma, sigma_err))

# Compute correlation coefficient for sigma and norm
c = cov.extra_output
c_norm = c[3, 3]
c_sigma = fwhm_to_sigma**2 * c[0, 0]
c_norm_sigma = fwhm_to_sigma * c[0, 3]
corr_norm_sigma = c_norm_sigma / np.sqrt(c_norm * c_sigma)
print('corr_norm_sigma: {0}'.format(corr_norm_sigma))

# Save model excess image
sau.save_model('model_sherpa.fits.gz', clobber=True)

# Compute TS
L1 = sau.calc_stat()
sau.set_source('const2d.background')
sau.fit()
L0 = sau.calc_stat()
TS = 2 * (L0 - L1)
print('TS: {:.5f}'.format(TS))
Example #15
0
def test_fake_pha_issue_1209(make_data_path, clean_astro_ui, tmp_path):
    """Check issue #1209.

    See also sherpa/astro/tests/test_fake_pha.py for

        test_fake_pha_has_valid_ogip_keywords_all_fake
        test_fake_pha_has_valid_ogip_keywords_from_real

    The session fake_pha includes quite a lot of logic which
    makes that the test case for #1209 should be done at this
    level, to complement the tests mentioned above.

    """

    infile = make_data_path("acisf01575_001N001_r0085_pha3.fits.gz")
    ui.load_pha(infile)
    ui.set_source(ui.powlaw1d.pl)
    pl.gamma = 1.8
    pl.ampl = 1e-4

    arf = ui.get_arf()
    rmf = ui.get_rmf()

    # check the TOTCTS setting in the input file
    d1 = ui.get_data()
    assert d1.header["TOTCTS"] == 855
    assert d1.counts.sum() == 855

    ui.set_source("newid", pl)
    ui.fake_pha("newid",
                exposure=ui.get_exposure(),
                bkg=ui.get_bkg(),
                rmf=rmf,
                arf=arf,
                backscal=ui.get_backscal())
    stat = ui.calc_stat("newid")

    outfile = tmp_path / "sim.pha"
    ui.save_pha("newid", str(outfile))

    ui.load_pha(3, str(outfile))
    d3 = ui.get_data(3)
    assert isinstance(d3, ui.DataPHA)

    assert d3.exposure == pytest.approx(37664.157219191)
    assert d3.areascal == pytest.approx(1.0)
    assert d3.backscal == pytest.approx(2.2426552620567e-06)

    assert d3.background_ids == []
    assert d3.response_ids == []

    # check the header
    hdr = d3.header
    assert hdr["TELESCOP"] == "CHANDRA"
    assert hdr["INSTRUME"] == "ACIS"
    assert hdr["FILTER"] == "none"

    # check some other values related to #1209 and #488 (OGIP)
    #
    assert "TOTCTS" not in hdr
    assert hdr["GROUPING"] == 0
    assert hdr["QUALITY"] == 0
    assert hdr["SYS_ERR"] == 0

    # We should get the same value - the responses are not written
    # to the temporary directory and so we need to load them
    # directly.
    #
    ui.set_rmf(3, rmf)
    ui.set_arf(3, arf)
    ui.set_source(3, pl)
    assert ui.calc_stat(3) == pytest.approx(stat)
Example #16
0
    def _check_stat2(self, expected):
        """Is calc_stat() == expected?"""

        stat = ui.calc_stat()
        self.assertAlmostEqual(expected, stat, places=7)
Example #17
0
def test_can_use_swift_data(make_data_path):
    """A basic check that we can read in and use the Swift data.

    Unlike the previous tests, that directly access the io module,
    this uses the ui interface.
    """

    # QUS are there pytest fixtures that ensure the state is
    # clean on entry and exit?
    ui.clean()

    # The Swift PHA file does not have the ANCRFILE/RESPFILE keywords
    # set up, so the responses have to be manually added.
    #
    ui.load_pha(make_data_path(PHAFILE))
    ui.load_rmf(make_data_path(RMFFILE))
    ui.load_arf(make_data_path(ARFFILE))

    assert ui.get_analysis() == 'energy'

    ui.set_source(ui.powlaw1d.pl)
    ui.set_par('pl.ampl', 0.0003)

    # The responses have the first bin start at an energy of 0,
    # which causes issues for Sherpa. There should be a
    # RuntimeWarning due to a divide by zero.
    #
    with pytest.warns(RuntimeWarning) as record:
        stat = ui.calc_stat()

    # The exact form of the message depends on the Python version;
    # this could be checked, but it feels excessive for this
    # particular test, which is just a regression check, so use a
    # more lax approach.
    #
    assert len(record) == 1
    assert record[0].message.args[0] in \
        ['divide by zero encountered in divide',
         'divide by zero encountered in true_divide']

    # The stat value depends on what power-law model is used. With
    # xspowerlaw it is NaN, but with powlaw1d it is finite.
    #
    # This check is purely a regression test, so the value has
    # not been externally validated.
    #
    # assert np.isnan(stat)
    assert_allclose(stat, 58.2813692358182)

    # Manually adjust the first bin to avoid this problem.
    # Add in asserts just in case this gets "fixed" in the
    # I/O layer (as XSPEC does).
    #
    arf = ui.get_arf()
    rmf = ui.get_rmf()
    assert arf.energ_lo[0] == 0.0
    assert rmf.energ_lo[0] == 0.0
    assert rmf.e_min[0] == 0.0

    # The bin widths are ~ 0.005 or ~ 0.01 keV, so pick a value
    # smaller than this.
    #
    ethresh = 1e-6
    arf.energ_lo[0] = ethresh
    rmf.energ_lo[0] = ethresh
    rmf.e_min[0] = ethresh

    # Pick an energy range which isn't affected by the first
    # bin.
    #
    # Unfortunately, using a range of 0.3-8.0 gives 771 bins
    # in XSPEC - channels 30 to 800 - but 772 bins in Sherpa.
    # If I use ignore(None, 0.3); ignore(8.0, None) instead
    # then the result is 771 bins. This is because the e_min/max
    # of the RMF has channel widths of 0.01 keV, starting at 0,
    # so both 0.3 and 8.0 fall on a bin boundary. So, it's either
    # a difference in < or <= (or > vs >=), or a rounding issue
    # due to floating-point conversion leading to one bin boundary
    # being slightly different in Sherpa vs XSPEC).
    #
    # When using ui.notice(0.3, 8.0); ui.get_indep(filter=True)
    # returns 772 channels, 30 to 801.
    #
    # Using ui.notice(0.3, 7.995) selects channels 30 to 800. So
    # this range is used. Alternatively, channel 801 could have been
    # excluded explicitly.
    #
    # ui.notice(0.3, 8.0)
    ui.notice(0.3, 7.995)

    # XSPEC 12.9.1b calculation of the statistic:
    #   chi sq = 203.88 from 771 bins with 769 dof
    #   cstat  = 568.52
    #
    # There are known differences between XSPEC and Sherpa
    # with chi2xspecvar. This only affects data sets where
    # there is background subtraction, which is not the case
    # here. See https://github.com/sherpa/sherpa/issues/356
    #
    ui.set_stat('chi2xspecvar')
    stat_xvar = ui.get_stat_info()

    assert len(stat_xvar) == 1
    stat_xvar = stat_xvar[0]
    assert stat_xvar.numpoints == 771
    assert stat_xvar.dof == 769
    assert_allclose(stat_xvar.statval, 203.88,
                    rtol=0, atol=0.005)

    ui.set_stat('cstat')
    stat_cstat = ui.get_stat_info()

    assert len(stat_cstat) == 1
    stat_cstat = stat_cstat[0]
    assert stat_cstat.numpoints == 771
    assert stat_cstat.dof == 769
    assert_allclose(stat_cstat.statval, 568.52,
                    rtol=0, atol=0.005)

    ui.clean()
Example #18
0
def mht(parnames, mu, sigma, niter=1000, id=None,
        file=None, verbose=True, normalize=True,
        draw=draw_t, accept=accept_tcash, **kwargs):
    """Metropolis-Hastings.

    The default distribution is the t distribution, and the statistic is
    assumed to be the Cash statistic.

    The kwargs are passed through to the draw and accept routines.

    The draw routine is used to create a new proposal.
    The accept routine is used to determine whether to accept the proposal.

    If verbose is True then the iteration results are printed to STDOUT
    after each iteration. If file is not None then the iteration results
    are printed to the given file wach iteration. If normalize is True
    then the displayed results (whether to STDOUT or file) are relative
    to the best-fit values rather than absolute ones (so the values for
    the xpos parameter are written out as xpos-xpos_0 where xpos_0 is
    the value from the input mu argument). This also holds for the
    statistic value (so the results are statistic-statistic_0). The
    reason for normalize is to try and avoid lose of information
    without having to display numbers to 15 decimal places.
    """

    # Should we just change to cash here instead of throwing an error?
    #
    if ui.get_stat_name() != "cash":
        raise RuntimeError, "Statistic must be cash, not %s" % ui.get_stat_name()

    if id == None:
        idval = ui.get_default_id()
    else:
        idval = id

    # Output storage
    #
    nelem = niter + 1
    npars = mu.size
    if npars != len(parnames):
        raise RuntimeError, "mu.size = %d  len(parnames) = %d!" % (npars, len(parnames))

    params = np.zeros((nelem,npars))
    stats  = np.zeros(nelem)
    alphas = np.zeros(nelem)

    # Using a bool is technically nicer, but stick with an int8 here for easier processing
    # of the output.
    #
    ##acceptflag = np.zeros(nelem, dtype=np.bool)
    acceptflag = np.zeros(nelem, dtype=np.int8)

    params[0] = mu.copy()
    current = mu.copy()
    alphas[0] = 0

    _set_par_vals(parnames, current)
    stats[0] = ui.calc_stat(id=idval)

    if normalize:
        outstr = "# iteration accept d_statistic %s" % " d_".join(parnames)
    else:
        outstr = "# iteration accept statistic %s" % " ".join(parnames)
    if verbose:
        print outstr
    if file != None:
	fout = open(file, "w")
    	fout.write(outstr)
        fout.write("\n")

    def draw_to_string(idx):
        "Return the given draw as a string for display/output"
        if normalize:
            outstr = "%-6d %1d %g %s" % (idx, acceptflag[idx], stats[idx]-stats[0], " ".join(["%g" % (v-v0) for (v,v0) in zip(params[idx],params[0])]))
        else:
            outstr = "%-6d %1d %g %s" % (idx, acceptflag[idx], stats[idx], alphas[idx], " ".join(["%g" % v for v in params[idx]]))
        return outstr

    # Iterations
    # - no burn in at present
    # - the 0th element of the params array is the input value
    # - we loop until all parameters are within the allowable
    #   range; should there be some check to ensure we are not
    #   rejecting a huge number of proposals, which would indicate
    #   that the limits need increasing or very low s/n data?
    #
    for i in range(1,nelem,1):

        current = params[i-1]

        # Create a proposal and set the parameter values. If any lie
        # outside the allowed range then catch this (ParameterError)
        # and create a new proposal.
        #
        while True:
            try:
                proposal = draw(mu, current, sigma, **kwargs)
                _set_par_vals(parnames, proposal)
                break
            except ParameterErr:
                pass

        # Do we accept this proposal?
        #
        stat_temp = ui.calc_stat(id=idval)
        alphas[i] = np.exp( -0.5*stat_temp + dmvt(current, mu, sigma, 4) + 0.5*stats[i-1] - dmvt(proposal, mu, sigma, 4) )

        if accept(current, stats[i-1], proposal, stat_temp,
                  mu, sigma, **kwargs):
            params[i] = proposal.copy()
            stats[i]  = stat_temp
            acceptflag[i] = 1
        else:
            params[i] = params[i-1]
            stats[i]  = stats[i-1]
            acceptflag[i] = 0

        outstr = draw_to_string(i)
    if verbose:
        print outstr
    if file != None:
            fout.write(outstr)
            fout.write("\n")

    if file != None:
	fout.close()
        print "Created: %s" % file

    # Return a dictionary containing the draws
    #
    out = { "parnames": parnames, "statistic": stats, "accept": acceptflag, "alphas": alphas, "iteration": np.arange(0,nelem,1) }
    for (idx,name) in zip(range(npars),parnames):
    	if out.has_key(name):
            raise RuntimeError, "Unexpected name clash: parameter '%s'" % name
        out[name] = params[:,idx]
    return out
def test_can_use_swift_data(make_data_path):
    """A basic check that we can read in and use the Swift data.

    Unlike the previous tests, that directly access the io module,
    this uses the ui interface.
    """

    # QUS are there pytest fixtures that ensure the state is
    # clean on entry and exit?
    ui.clean()

    # The Swift PHA file does not have the ANCRFILE/RESPFILE keywords
    # set up, so the responses have to be manually added.
    #
    ui.load_pha(make_data_path(PHAFILE))

    rmffile = make_data_path(RMFFILE)
    with warnings.catch_warnings(record=True) as ws:
        warnings.simplefilter("always")
        ui.load_rmf(rmffile)

    validate_replacement_warning(ws, 'RMF', rmffile)

    arffile = make_data_path(ARFFILE)
    with warnings.catch_warnings(record=True) as ws:
        warnings.simplefilter("always")
        ui.load_arf(arffile)

    validate_replacement_warning(ws, 'ARF', arffile)

    assert ui.get_analysis() == 'energy'

    arf = ui.get_arf()
    rmf = ui.get_rmf()
    assert arf.energ_lo[0] == EMIN
    assert rmf.energ_lo[0] == EMIN
    assert rmf.e_min[0] == 0.0

    ui.set_source(ui.powlaw1d.pl)
    ui.set_par('pl.ampl', 0.0003)

    stat = ui.calc_stat()

    # This check is purely a regression test, so the value has
    # not been externally validated.
    #
    assert_allclose(stat, 58.2813692358182)

    # Pick an energy range which isn't affected by the first
    # bin.
    #
    # Unfortunately, using a range of 0.3-8.0 gives 771 bins
    # in XSPEC - channels 30 to 800 - but 772 bins in Sherpa.
    # If I use ignore(None, 0.3); ignore(8.0, None) instead
    # then the result is 771 bins. This is because the e_min/max
    # of the RMF has channel widths of 0.01 keV, starting at 0,
    # so both 0.3 and 8.0 fall on a bin boundary. So, it's either
    # a difference in < or <= (or > vs >=), or a rounding issue
    # due to floating-point conversion leading to one bin boundary
    # being slightly different in Sherpa vs XSPEC).
    #
    # When using ui.notice(0.3, 8.0); ui.get_indep(filter=True)
    # returns 772 channels, 30 to 801.
    #
    # Using ui.notice(0.3, 7.995) selects channels 30 to 800. So
    # this range is used. Alternatively, channel 801 could have been
    # excluded explicitly.
    #
    # ui.notice(0.3, 8.0)
    ui.notice(0.3, 7.995)

    # XSPEC 12.9.1b calculation of the statistic:
    #   chi sq = 203.88 from 771 bins with 769 dof
    #   cstat  = 568.52
    #
    # There are known differences between XSPEC and Sherpa
    # with chi2xspecvar. This only affects data sets where
    # there is background subtraction, which is not the case
    # here. See https://github.com/sherpa/sherpa/issues/356
    #
    ui.set_stat('chi2xspecvar')
    stat_xvar = ui.get_stat_info()

    assert len(stat_xvar) == 1
    stat_xvar = stat_xvar[0]
    assert stat_xvar.numpoints == 771
    assert stat_xvar.dof == 769
    assert_allclose(stat_xvar.statval, 203.88,
                    rtol=0, atol=0.005)

    ui.set_stat('cstat')
    stat_cstat = ui.get_stat_info()

    assert len(stat_cstat) == 1
    stat_cstat = stat_cstat[0]
    assert stat_cstat.numpoints == 771
    assert stat_cstat.dof == 769
    assert_allclose(stat_cstat.statval, 568.52,
                    rtol=0, atol=0.005)

    ui.clean()
Example #20
0
set_model(id, model * galabso)
convmodel = get_model(id)
bkg_model = auto_background(id)
set_full_model(id, get_response(id)(model) + bkg_model * get_bkg_scale(id))
#plot_bkg_fit(id)

## we allow the background normalisation to be a free fitting parameter
p = bkg_model.pars[0]
p.max = p.val + 2
p.min = p.val - 2
parameters.append(p)
priors += [priorfuncs.create_uniform_prior_for(p)]

priorfunction = priorfuncs.create_prior_function(priors=priors)
assert not numpy.isnan(
    calc_stat(id)), 'NaN on calc_stat, probably a bad RMF/ARF file for PC'

print('running BXA ...')

solver = BXASolver(id=id, prior=priorfunction, parameters=parameters)

solver.run(resume=True,
           verbose=True,
           n_live_points=args.num_live_points,
           outputfiles_basename=prefix,
           frac_remain=0.01,
           min_ess=1000)

if not os.path.exists('%sfit.json' % prefix):
    print('collecting fit plot data')
    set_analysis(id, 'ener', 'counts')
Example #21
0
def test_pileup_model(make_data_path, clean_astro_ui):
    """Basic check of setting a pileup model.

    It is more to check we can set a pileup model, not to
    check the model works.
    """

    infile = make_data_path('3c273.pi')
    ui.load_pha('pileup', infile)
    ui.subtract('pileup')
    ui.notice(0.3, 7)

    ui.set_stat('chi2datavar')

    # pick xswabs as it is unlikely to change with XSPEC
    ui.set_source('pileup', ui.xswabs.amdl * ui.powlaw1d.pl)

    # get close to the best fit, but don't need to fit
    pl.ampl = 1.82e-4
    pl.gamma = 1.97
    amdl.nh = 0.012

    stat0 = ui.calc_stat('pileup')

    # We want to compare the data to the pileup model,
    # which should be run with the higher-energy bins included,
    # but that's not relevant here where I am just
    # checking the statistic value.

    ui.set_pileup_model('pileup', ui.jdpileup.jdp)

    # pick some values to make the model change the data
    jdp.ftime = 3.2
    jdp.fracexp = 1
    jdp.alpha = 0.95
    jdp.f = 0.91

    # Check pileup is added to get_model
    #
    mlines = str(ui.get_model('pileup')).split('\n')
    assert mlines[0] == 'apply_rmf(jdpileup.jdp((xswabs.amdl * powlaw1d.pl)))'
    assert mlines[3].strip(
    ) == 'jdp.alpha    thawed         0.95            0            1'
    assert mlines[5].strip(
    ) == 'jdp.f        thawed         0.91          0.9            1'
    assert mlines[11].strip(
    ) == 'pl.gamma     thawed         1.97          -10           10'

    # Ensure that the statistic has got worse (technically
    # it coud get better, but not for this case).
    #
    stat1 = ui.calc_stat('pileup')
    assert stat1 > stat0

    # As a test, check the actual statistic values
    # (evaluated with XSPEC 12.11.0 and Sherpa with the
    # master branch 2020/07/29, on Linux).
    #
    assert stat0 == pytest.approx(35.99899827358692)
    assert stat1 == pytest.approx(36.58791181460404)

    # Can we remove the pileup model?
    #
    ui.delete_pileup_model('pileup')

    # Check pileup is not in get_model
    #
    mlines = str(ui.get_model('pileup')).split('\n')

    # Numeric display depends on Python and/or NumPy
    # for the exposure time. I should have set the exposure
    # time to an integer value.
    #
    # assert mlines[0] == 'apply_rmf(apply_arf((38564.608926889 * (xswabs.amdl * powlaw1d.pl))))'

    toks = mlines[0].split()
    assert len(toks) == 5
    assert toks[0].startswith('apply_rmf(apply_arf((38564.608')
    assert toks[1] == '*'
    assert toks[2] == '(xswabs.amdl'
    assert toks[3] == '*'
    assert toks[4] == 'powlaw1d.pl))))'

    assert mlines[4].strip(
    ) == 'pl.gamma     thawed         1.97          -10           10'

    stat2 = ui.calc_stat('pileup')
    assert stat2 == stat0
Example #22
0
    def _check_stat2(self, expected):
        """Is calc_stat() == expected?"""

        stat = ui.calc_stat()
        self.assertAlmostEqual(expected, stat, places=7)
Example #23
0
def test_can_use_swift_data(make_data_path, clean_astro_ui):
    """A basic check that we can read in and use the Swift data.

    Unlike the previous tests, that directly access the io module,
    this uses the ui interface.
    """

    # The Swift PHA file does not have the ANCRFILE/RESPFILE keywords
    # set up, so the responses have to be manually added.
    #
    ui.load_pha(make_data_path(PHAFILE))

    rmffile = make_data_path(RMFFILE)
    with warnings.catch_warnings(record=True) as ws:
        warnings.simplefilter("always")
        ui.load_rmf(rmffile)

    validate_replacement_warning(ws, 'RMF', rmffile)

    arffile = make_data_path(ARFFILE)
    with warnings.catch_warnings(record=True) as ws:
        warnings.simplefilter("always")
        ui.load_arf(arffile)

    validate_replacement_warning(ws, 'ARF', arffile)

    assert ui.get_analysis() == 'energy'

    arf = ui.get_arf()
    rmf = ui.get_rmf()
    assert arf.energ_lo[0] == EMIN
    assert rmf.energ_lo[0] == EMIN
    assert rmf.e_min[0] == 0.0

    ui.set_source(ui.powlaw1d.pl)
    ui.set_par('pl.ampl', 0.0003)

    stat = ui.calc_stat()

    # This check is purely a regression test, so the value has
    # not been externally validated.
    #
    assert_allclose(stat, 58.2813692358182)

    # Pick an energy range which isn't affected by the first
    # bin.
    #
    # Unfortunately, using a range of 0.3-8.0 gives 771 bins
    # in XSPEC - channels 30 to 800 - but 770 bins in Sherpa,
    # channels 31 to 800.
    #
    # Note that the channel numbering starts at 0:
    # % dmlist target_sr.pha header,clean,raw | grep TLMIN
    # TLMIN1       = 0                    / Lowest legal channel number
    #
    # and so it's not clear when XSPEC says 30-800 what it
    # means. From https://github.com/sherpa/sherpa/issues/1211#issuecomment-881647128
    # we have that the first bin it is using is
    #     0.29-0.30
    # and the last bin is
    #     7.99-8.00
    # and I've checked with iplot that it has renumbered the
    # channels to 1-1024 from 0-1023
    #
    # % dmlist swxpc0to12s6_20130101v014.rmf.gz"[ebounds][channel=28:31]" data,clean
    #  CHANNEL    E_MIN                E_MAX
    #         28     0.28000000119209     0.28999999165535
    #         29     0.28999999165535     0.30000001192093
    #         30     0.30000001192093     0.31000000238419
    #         31     0.31000000238419     0.31999999284744
    # % dmlist swxpc0to12s6_20130101v014.rmf.gz"[ebounds][channel=798:801]" data,clean
    #  CHANNEL    E_MIN                E_MAX
    #        798         7.9800000191         7.9899997711
    #        799         7.9899997711                  8.0
    #        800                  8.0         8.0100002289
    #        801         8.0100002289         8.0200004578
    #
    # If I use ignore(None, 0.3); ignore(8.0, None) instead then the
    # result is 771 bins (channels 31 to 800). This is because the
    # e_min/max of the RMF has channel widths of 0.01 keV, starting at
    # 0, so both 0.3 and 8.0 fall on a bin boundary. So, it's either a
    # difference in < or <= (or > vs >=), or a rounding issue due to
    # floating-point conversion leading to one bin boundary being
    # slightly different in Sherpa vs XSPEC).
    #
    # When using ui.notice(0.3, 8.0); ui.get_indep(filter=True)
    # returns 770 channels, 31 to 800.
    #
    # Using ui.notice(0.3, 7.995) selects channels 31 to 800.
    # Using ui.notice(0.299, 8.0) selects channels 30 to 800.
    # Using ui.notice(0.299, 7.995) selects channels 30 to 800.
    #
    ui.notice(0.299, 8.0)

    # Check the selected range
    pha = ui.get_data()
    expected = np.zeros(1024, dtype=bool)
    expected[29:800] = True
    assert pha.mask == pytest.approx(expected)
    assert pha.get_mask() == pytest.approx(expected)

    # XSPEC 12.9.1b calculation of the statistic:
    #   chi sq = 203.88 from 771 bins with 769 dof
    #   cstat  = 568.52
    #
    # There are known differences between XSPEC and Sherpa
    # with chi2xspecvar. This only affects data sets where
    # there is background subtraction, which is not the case
    # here. See https://github.com/sherpa/sherpa/issues/356
    #
    ui.set_stat('chi2xspecvar')
    stat_xvar = ui.get_stat_info()

    assert len(stat_xvar) == 1
    stat_xvar = stat_xvar[0]
    assert stat_xvar.numpoints == 771
    assert stat_xvar.dof == 769
    assert_allclose(stat_xvar.statval, 203.88, rtol=0, atol=0.005)

    ui.set_stat('cstat')
    stat_cstat = ui.get_stat_info()

    assert len(stat_cstat) == 1
    stat_cstat = stat_cstat[0]
    assert stat_cstat.numpoints == 771
    assert stat_cstat.dof == 769
    assert_allclose(stat_cstat.statval, 568.52, rtol=0, atol=0.005)
Example #24
0
sau.fit()
sau.covar()

# Sherpa uses fwhm instead of sigma as extension parameter ... need to convert
# http://cxc.harvard.edu/sherpa/ahelp/gauss2d.html
fwhm_to_sigma = 1. / np.sqrt(8 * np.log(2))
cov = sau.get_covar_results()
sigma = fwhm_to_sigma * cov.parvals[0]
sigma_err = fwhm_to_sigma * cov.parmaxes[0]
print('sigma: {0} +- {1}'.format(sigma, sigma_err))

# Compute correlation coefficient for sigma and norm
c = cov.extra_output
c_norm = c[3, 3]
c_sigma = fwhm_to_sigma ** 2 * c[0, 0]
c_norm_sigma = fwhm_to_sigma * c[0, 3]
corr_norm_sigma = c_norm_sigma / np.sqrt(c_norm * c_sigma)
print('corr_norm_sigma: {0}'.format(corr_norm_sigma))

# Save model excess image
sau.save_model('model_sherpa.fits.gz', clobber=True)

# Compute TS
L1 = sau.calc_stat()
sau.set_source('const2d.background')
sau.fit()
L0 = sau.calc_stat()
TS = 2 * (L0 - L1)
print('TS: {:.5f}'.format(TS))

Example #25
0
def test_can_use_swift_data(make_data_path, is_known_warning):
    """A basic check that we can read in and use the Swift data.

    Unlike the previous tests, that directly access the io module,
    this uses the ui interface.
    """

    # QUS are there pytest fixtures that ensure the state is
    # clean on entry and exit?
    ui.clean()

    # The Swift PHA file does not have the ANCRFILE/RESPFILE keywords
    # set up, so the responses have to be manually added.
    #
    ui.load_pha(make_data_path(PHAFILE))

    rmffile = make_data_path(RMFFILE)
    with warnings.catch_warnings(record=True) as ws:
        warnings.simplefilter("always")
        ui.load_rmf(rmffile)

    validate_replacement_warning(ws, 'RMF', rmffile, is_known_warning)

    arffile = make_data_path(ARFFILE)
    with warnings.catch_warnings(record=True) as ws:
        warnings.simplefilter("always")
        ui.load_arf(arffile)

    validate_replacement_warning(ws, 'ARF', arffile, is_known_warning)

    assert ui.get_analysis() == 'energy'

    arf = ui.get_arf()
    rmf = ui.get_rmf()
    assert arf.energ_lo[0] == EMIN
    assert rmf.energ_lo[0] == EMIN
    assert rmf.e_min[0] == 0.0

    ui.set_source(ui.powlaw1d.pl)
    ui.set_par('pl.ampl', 0.0003)

    stat = ui.calc_stat()

    # This check is purely a regression test, so the value has
    # not been externally validated.
    #
    assert_allclose(stat, 58.2813692358182)

    # Pick an energy range which isn't affected by the first
    # bin.
    #
    # Unfortunately, using a range of 0.3-8.0 gives 771 bins
    # in XSPEC - channels 30 to 800 - but 772 bins in Sherpa.
    # If I use ignore(None, 0.3); ignore(8.0, None) instead
    # then the result is 771 bins. This is because the e_min/max
    # of the RMF has channel widths of 0.01 keV, starting at 0,
    # so both 0.3 and 8.0 fall on a bin boundary. So, it's either
    # a difference in < or <= (or > vs >=), or a rounding issue
    # due to floating-point conversion leading to one bin boundary
    # being slightly different in Sherpa vs XSPEC).
    #
    # When using ui.notice(0.3, 8.0); ui.get_indep(filter=True)
    # returns 772 channels, 30 to 801.
    #
    # Using ui.notice(0.3, 7.995) selects channels 30 to 800. So
    # this range is used. Alternatively, channel 801 could have been
    # excluded explicitly.
    #
    # ui.notice(0.3, 8.0)
    ui.notice(0.3, 7.995)

    # XSPEC 12.9.1b calculation of the statistic:
    #   chi sq = 203.88 from 771 bins with 769 dof
    #   cstat  = 568.52
    #
    # There are known differences between XSPEC and Sherpa
    # with chi2xspecvar. This only affects data sets where
    # there is background subtraction, which is not the case
    # here. See https://github.com/sherpa/sherpa/issues/356
    #
    ui.set_stat('chi2xspecvar')
    stat_xvar = ui.get_stat_info()

    assert len(stat_xvar) == 1
    stat_xvar = stat_xvar[0]
    assert stat_xvar.numpoints == 771
    assert stat_xvar.dof == 769
    assert_allclose(stat_xvar.statval, 203.88,
                    rtol=0, atol=0.005)

    ui.set_stat('cstat')
    stat_cstat = ui.get_stat_info()

    assert len(stat_cstat) == 1
    stat_cstat = stat_cstat[0]
    assert stat_cstat.numpoints == 771
    assert stat_cstat.dof == 769
    assert_allclose(stat_cstat.statval, 568.52,
                    rtol=0, atol=0.005)

    ui.clean()
Example #26
0
def test_746(make_data_path, clean_astro_ui):
    """Test https://github.com/sherpa/sherpa/issues/746

    Something in #444 (reverted in #759) caused:

      - the fit to fail (niter=2)
      - the line amplitude not to change significantly
      - the statistic reported by the fit to be different to
        that returned by calc_stat

    Something with how the cache code handles analysis=wave
    appears to be the problem. This test takes the line data
    from 746 and adds it into the existing PHA2 file we have
    (3c120) to replicate the problem. Fortunately this
    wavelength range contains essentially no counts, so we
    can just add in the counts to make a fake line and check
    we can fit it.
    """

    ui.load_pha(make_data_path('3c120_pha2.gz'))
    ui.load_arf(10, make_data_path('3c120_meg_1.arf.gz'))
    ui.load_rmf(10, make_data_path('3c120_meg_1.rmf.gz'))

    # Add in the line
    d10 = ui.get_data(10)
    idx = np.arange(4068, 4075, dtype=np.int)
    d10.counts[idx] = [1, 1, 1, 2, 3, 1, 1]

    # group the data
    ui.group_width(id=10, num=2)

    ui.set_analysis('wave')
    ui.notice(21.4, 21.7)

    # internal check that getting the expected data
    expected = np.zeros(32)
    expected[[9, 10, 11, 12]] = [2, 3, 4, 1]
    expected[26] = 1  # this count is from the 3c120 data
    d = d10.get_dep(filter=True)
    assert d == pytest.approx(expected)

    line = ui.create_model_component('gauss1d', 'name')

    # treat the line as a delta function
    line.fwhm.val = 0.00001
    line.fwhm.frozen = True
    line.pos = 21.6
    line.pos.frozen = True

    line.ampl = 2

    ui.set_source(10, line)

    # the original fit used levmar, so use that here
    # (it looks like it also fails with simplex)
    ui.set_method('levmar')
    ui.set_stat('cstat')

    sinit = ui.calc_stat(10)
    ui.fit(10)
    fr = ui.get_fit_results()
    sfinal = ui.calc_stat(10)

    # Did the
    #   - fit improve
    #   - take more than two iterations
    #   - report the same statistic values as calc_stat
    #   - change the fit parameter
    #
    assert sfinal < sinit

    assert fr.succeeded
    assert fr.nfev > 2

    assert fr.istatval == sinit
    assert fr.statval == sfinal

    assert line.ampl.val > 100
    assert len(fr.parvals) == 1
    assert fr.parvals[0] == line.ampl.val

    # some simple checks to throw in because we can
    assert fr.parnames == ('name.ampl', )
    assert fr.datasets == (10,)
    assert fr.statname == 'cstat'
    assert fr.methodname == 'levmar'
    assert fr.itermethodname == 'none'
    assert fr.numpoints == 32
    assert fr.dof == 31

    # Now add in some "absolute" checks to act as regression tests.
    # If these fail then it doesn't necessarily mean something bad
    # has happened.
    #
    assert fr.nfev == 15
    assert sinit == pytest.approx(82.72457294394245)
    assert sfinal == pytest.approx(15.39963248224592)
    assert line.ampl.val == pytest.approx(113.95646989927054)