示例#1
0
    def setUp(self):

        self._old_logger_level = logger.getEffectiveLevel()
        logger.setLevel(logging.ERROR)

        ui.set_stat('wstat')

        infile = self.make_path('3c273.pi')
        ui.load_pha(1, infile)

        # Change the backscale value slightly so that the
        # results are different to other runs with this file.
        #
        nbins = ui.get_data(1).get_dep(False).size
        bscal = 0.9 * np.ones(nbins) * ui.get_backscal(1)
        ui.set_backscal(1, backscale=bscal)

        ui.set_source(1, ui.powlaw1d.pl)

        # The powerlaw slope and normalization are
        # intended to be "a reasonable approximation"
        # to the data, just to make sure that any statistic
        # calculation doesn't blow-up too much.
        #
        ui.set_par("pl.gamma", 1.7)
        ui.set_par("pl.ampl", 1.7e-4)
示例#2
0
def fit_coeffs(method='simplex'):
    method = method
    dummy_data = np.zeros(100)
    dummy_times = np.arange(100)
    ui.load_arrays(1, dummy_times, dummy_data)

    ui.set_method(method)
    ui.get_method().config.update(SHERPA_CONFIGS.get(method, {}))

    calc_model = CalcModel()
    ui.load_user_model(calc_model, 'axo_mod')  # sets global axo_mod

    parnames = []
    for row in range(N_ROWS):
        for col in range(N_COLS):
            parnames.append('adj_{}_{}'.format(row, col))
    ui.add_user_pars('axo_mod', parnames)
    ui.set_model(1, 'axo_mod')

    calc_stat = CalcStat(axo_mod, M_2d, displ_x)
    ui.load_user_stat('axo_stat', calc_stat, lambda x: np.ones_like(x))
    ui.set_stat(axo_stat)
    calc_model.set_calc_stat(calc_stat)

    # Set frozen, min, and max attributes for each axo_mod parameter
    for par in axo_mod.pars:
        par.val = 0.0
        par.min = -5
        par.max = 5

    ui.fit(1)

    coeffs = np.array([(par.val) for pars in axo_mod.pars])
    return coeffs
示例#3
0
def test_cstat_comparison_xspec(make_data_path, l, h, ndp, ndof, statval):
    """Compare CSTAT values for a data set to XSPEC.

    This checks that the "UI layer" works, although ideally there
    should be a file that can be read in rather than having to
    manipulate it (the advantage here is that it means there is
    no messing around with adding a file to the test data set).

    The XSPEC version used was 12.9.0o.
    """

    dset = create_xspec_comparison_dataset(make_data_path,
                                           keep_background=False)

    ui.clean()
    ui.set_data(dset)
    # use powlaw1d rather than xspowerlaw so do not need XSPEC
    ui.set_source(ui.powlaw1d.pl)
    ui.set_par('pl.ampl', 1e-4)

    ui.set_stat('cstat')
    ui.set_analysis('channel')

    validate_xspec_result(l, h, ndp, ndof, statval)
    ui.clean()
示例#4
0
def test_xspecvar_no_grouping_no_bg_comparison_xspec(make_data_path,
                                                     l, h, ndp, ndof, statval):
    """Compare chi2xspecvar values for a data set to XSPEC.

    The data set has no background.

    See test_cstat_comparison_xspec. Note that at present
    Sherpa and XSPEC treat bins with 0 values in them differently:
    see https://github.com/sherpa/sherpa/issues/356
    so for this test all bins are forced to have at least one
    count in them (source -> 5 is added per channel,background ->
    3 is added per channel).

    The XSPEC version used was 12.9.0o.
    """

    dset = create_xspec_comparison_dataset(make_data_path,
                                           keep_background=False)

    # Lazy, so add it to "bad" channels too
    dset.counts += 5

    ui.clean()
    ui.set_data(dset)

    ui.set_source(ui.powlaw1d.pl)
    ui.set_par('pl.ampl', 5e-4)

    ui.set_stat('chi2xspecvar')
    ui.set_analysis('energy')

    validate_xspec_result(l, h, ndp, ndof, statval)
    ui.clean()
示例#5
0
 def test_chi2datavar(self):
     num = 3
     xy = numpy.array(range(num))
     ui.load_arrays(1, xy, xy, Data1D)
     ui.set_stat('chi2datavar')
     err = ui.get_staterror()
     numpy.testing.assert_allclose(err, numpy.sqrt(xy), rtol=1e-7, atol=1e-7)
示例#6
0
def test_user_stat_unit():
    given_stat_error = [1.1, 2.2, 3.3]
    given_sys_error = [10.1, 10.2, 10.3]

    def calc_stat(data, _model, staterror, syserror=None, weight=None):
        # Make sure values are being injected correctly
        np.testing.assert_array_equal(given_stat_error, staterror)
        np.testing.assert_array_equal(given_sys_error, syserror)
        return 3.235, np.ones_like(data)

    xdata = [1, 2, 3]
    ydata = xdata

    ui.load_arrays(1, xdata, ydata, None, given_sys_error, Data1D)

    ui.set_model(1, 'polynom1d.p')

    ui.load_user_stat('customstat', calc_stat, lambda x: given_stat_error)
    ui.set_stat(eval('customstat'))

    try:
        ui.fit(1)
    except StatErr:
        pytest.fail("Call should not be throwing any exception (bug #341)")

    # Test the result is what we made the user stat return
    assert 3.235 == ui.get_fit_results().statval
示例#7
0
def test_eqwith_err1(make_data_path, restore_xspec_settings):

    def check1(e0, e1, e2):
        assert e0 == approx(0.028335201547206704, rel=1.0e-3)
        assert e1 == approx(-0.00744118799274448756, rel=1.0e-3)
        assert e2 == approx(0.0706249544851336, rel=1.0e-3)

    ui.set_xsabund('angr')
    ui.set_xsxsect('bcmc')

    ui.load_pha(make_data_path('3c273.pi'))
    ui.notice(0.5, 7.0)
    ui.set_stat("chi2datavar")
    ui.set_method("simplex")
    ui.set_model('powlaw1d.p1+gauss1d.g1')
    g1.fwhm = 0.1
    g1.pos = 2.0
    ui.freeze(g1.pos, g1.fwhm)
    ui.fit()

    numpy.random.seed(2345)
    e = ui.eqwidth(p1, p1 + g1, error=True, niter=100)
    check1(e[0], e[1], e[2])
    params = e[3]

    numpy.random.seed(2345)
    e = ui.eqwidth(p1, p1 + g1, error=True, params=params, niter=100)
    check1(e[0], e[1], e[2])

    parvals = ui.get_fit_results().parvals
    assert parvals[0] == approx(1.9055272902160334, rel=1.0e-3)
    assert parvals[1] == approx(0.00017387966749772638, rel=1.0e-3)
    assert parvals[2] == approx(1.279415076070516e-05, rel=1.0e-3)
示例#8
0
def test_xspecvar_no_grouping_comparison_xspec(make_data_path,
                                               l, h, ndp, ndof, statval):
    """Compare chi2xspecvar values for a data set to XSPEC.

    The data set has a background. See
    test_xspecvar_no_grouping_no_bg_comparison_xspec

    The XSPEC version used was 12.9.0o.
    """

    dset = create_xspec_comparison_dataset(make_data_path,
                                           keep_background=True)

    # Lazy, so add it to "bad" channels too
    dset.counts += 5
    dset.get_background().counts += 3

    ui.clean()
    ui.set_data(dset)
    ui.subtract()

    ui.set_source(ui.powlaw1d.pl)
    ui.set_par('pl.ampl', 5e-4)

    ui.set_stat('chi2xspecvar')
    ui.set_analysis('energy')

    validate_xspec_result(l, h, ndp, ndof, statval)
    ui.clean()
示例#9
0
def test_background():

    tmpdir = tempfile.mkdtemp()
    curdir = os.getcwd()
    os.chdir(tmpdir)

    kT_sim = 1.0
    Z_sim = 0.0
    norm_sim = 4.0e-2
    nH_sim = 0.04
    redshift = 0.01

    exp_time = (200., "ks")
    area = (1000., "cm**2")
    fov = (10.0, "arcmin")

    prng = 24

    agen = ApecGenerator(0.05, 12.0, 5000, broadening=False)
    spec = agen.get_spectrum(kT_sim, Z_sim, redshift, norm_sim)
    spec.apply_foreground_absorption(norm_sim)

    events = make_background(area, exp_time, fov, (30.0, 45.0), spec, prng=prng)
    events.write_simput_file("bkgnd", overwrite=True)

    instrument_simulator("bkgnd_simput.fits", "bkgnd_evt.fits", 
                         exp_time, "sq_acisi_cy19", [30.0, 45.0],
                         overwrite=True, foreground=False, ptsrc_bkgnd=False,
                         instr_bkgnd=False,
                         prng=prng)

    write_spectrum("bkgnd_evt.fits", "background_evt.pi", overwrite=True)

    os.system("cp %s %s ." % (arf.filename, rmf.filename))

    load_user_model(mymodel, "wapec")
    add_user_pars("wapec", ["nH", "kT", "metallicity", "redshift", "norm"],
                  [0.01, 4.0, 0.2, redshift, norm_sim*0.8],
                  parmins=[0.0, 0.1, 0.0, -20.0, 0.0],
                  parmaxs=[10.0, 20.0, 10.0, 20.0, 1.0e9],
                  parfrozen=[False, False, False, True, False])

    load_pha("background_evt.pi")
    set_stat("cstat")
    set_method("simplex")
    ignore(":0.5, 8.0:")
    set_model("wapec")
    fit()
    res = get_fit_results()

    assert np.abs(res.parvals[0]-nH_sim)/nH_sim < 0.1
    assert np.abs(res.parvals[1]-kT_sim)/kT_sim < 0.05
    assert np.abs(res.parvals[2]-Z_sim) < 0.05
    assert np.abs(res.parvals[3]-norm_sim)/norm_sim < 0.05

    os.chdir(curdir)
    shutil.rmtree(tmpdir)
示例#10
0
def test_point_source():

    tmpdir = tempfile.mkdtemp()
    curdir = os.getcwd()
    os.chdir(tmpdir)

    nH_sim = 0.02
    norm_sim = 1.0e-4
    alpha_sim = 0.95
    redshift = 0.02

    exp_time = (100., "ks")
    area = (3000., "cm**2")

    spec = Spectrum.from_powerlaw(alpha_sim, redshift, norm_sim, 
                                  emin=0.1, emax=11.5, nbins=2000)

    spec.apply_foreground_absorption(nH_sim, model="tbabs")

    positions = [(30.01, 45.0)]

    events = make_point_sources(area, exp_time, positions, (30.0, 45.0),
                                spec, prng=prng)

    events.write_simput_file("ptsrc", overwrite=True)

    instrument_simulator("ptsrc_simput.fits", "ptsrc_evt.fits",
                         exp_time, "sq_aciss_cy19", [30.0, 45.0],
                         overwrite=True, foreground=False, ptsrc_bkgnd=False,
                         instr_bkgnd=False,
                         prng=prng)

    write_spectrum("ptsrc_evt.fits", "point_source_evt.pi", overwrite=True)

    os.system("cp %s %s ." % (arf.filename, rmf.filename))

    load_user_model(mymodel, "tplaw")
    add_user_pars("tplaw", ["nH", "norm", "redshift", "alpha"],
                  [0.02, norm_sim*0.8, redshift, 0.9],
                  parmins=[0.0, 0.0, 0.0, 0.1],
                  parmaxs=[10.0, 1.0e9, 10.0, 10.0],
                  parfrozen=[True, False, True, False])

    load_pha("point_source_evt.pi")
    set_stat("cstat")
    set_method("simplex")
    ignore(":0.4, 9.0:")
    set_model("tplaw")
    fit()
    res = get_fit_results()

    assert np.abs(res.parvals[0]-norm_sim)/norm_sim < 0.05
    assert np.abs(res.parvals[1]-alpha_sim)/alpha_sim < 0.05

    os.chdir(curdir)
    shutil.rmtree(tmpdir)
示例#11
0
 def test_get_stat_info(self):
     fname_3c273 = self.make_path("3c273.pi")
     ui.load_pha(fname_3c273)
     src = ui.xspowerlaw.pl
     ui.set_source(src)
     ui.guess('pl')
     ui.set_stat('wstat')
     stat_info = ui.get_stat_info()[0]
     assert stat_info.dof == 44
     assert stat_info.numpoints == 46
示例#12
0
 def test_xspec(self):
     ui.load_arrays(1, self.x, self.y)
     ui.set_source("xspowerlaw.p")
     ui.set_method("moncar")
     ui.set_stat("chi2xspecvar")
     ui.fit()
     model = ui.get_model_component("p")
     expected = [-1.3686404, 0.5687635]
     observed = [model.PhoIndex.val, model.norm.val]
     assert_almost_equal(observed, expected)
示例#13
0
def fit_adjuster_set(coeffs, adj_idxs, method='simplex'):
    """
    Find best fit parameters for an arbitrary subset of adjustors
    specified by the array ``adj_idxs``.  The input ``coeffs`` are
    the best-fit adjustor coefficients for the last iteration.
    """
    import sherpa.astro.ui as ui

    dummy_data = np.zeros(100)
    dummy_times = np.arange(100)
    ui.load_arrays(1, dummy_times, dummy_data)

    ui.set_method(method)
    ui.get_method().config.update(SHERPA_CONFIGS.get(method, {}))

    calc_model = CalcModel()
    ui.load_user_model(calc_model, 'axo_mod')  # sets global axo_mod

    parnames = []
    for adj_idx in adj_idxs:
        parnames.append('adj_{}'.format(adj_idx))
    ui.add_user_pars('axo_mod', parnames)
    ui.set_model(1, 'axo_mod')

    coeffs = coeffs.copy()  # Don't modify input coeffs
    coeffs[coeffs < 0] = 0  # Don't allow negative coeffs

    # Set frozen, min, and max attributes for each axo_mod parameter
    for adj_idx, par in zip(adj_idxs, axo_mod.pars):
        par.min = -1000
        par.max = 1000
        par.val = coeffs[adj_idx]
        print 'Setting {} to {}'.format(adj_idx, par.val)

    # Compute base adjusted displacements assuming all the fitted actuators
    # have zero drive level.
    coeffs[adj_idxs] = 0
    base_adj_displ = M_2d.dot(coeffs)

    m_2d = M_2d[:, adj_idxs].copy()
    print m_2d.shape
    calc_stat = CalcStat(base_adj_displ, m_2d, DISPL_X)
    ui.load_user_stat('axo_stat', calc_stat, lambda x: np.ones_like(x))
    ui.set_stat(axo_stat)
    calc_model.set_calc_stat(calc_stat)

    ui.fit(1)

    # Update coeffs with the values determined in fitting
    for adj_idx, par in zip(adj_idxs, axo_mod.pars):
        coeffs[adj_idx] = abs(par.val)

    return coeffs, ui.get_fit_results()
示例#14
0
文件: test_fit.py 项目: mirca/gammapy
def test_sherpa_fit(tmpdir):
    # this is to make sure that the written PHA files work with sherpa
    pha1 = gammapy_extra.filename("datasets/hess-crab4_pha/pha_obs23592.fits")

    import sherpa.astro.ui as sau
    from sherpa.models import PowLaw1D
    sau.load_pha(pha1)
    sau.set_stat('wstat')
    model = PowLaw1D('powlaw1d.default')
    model.ref = 1e9
    model.ampl = 1
    model.gamma = 2
    sau.set_model(model * 1e-20)
    sau.fit()
    assert_allclose(model.pars[0].val, 2.0281484215403616, atol=1e-4)
    assert_allclose(model.pars[2].val, 2.3528406790143097, atol=1e-4)
示例#15
0
文件: test_fit.py 项目: cdeil/gammapy
    def test_sherpa_fit(self, tmpdir):
        # this is to make sure that the written PHA files work with sherpa
        import sherpa.astro.ui as sau
        from sherpa.models import PowLaw1D

        self.obs_list.write(tmpdir, use_sherpa=True)
        filename = tmpdir / 'pha_obs23523.fits'
        sau.load_pha(str(filename))
        sau.set_stat('wstat')
        model = PowLaw1D('powlaw1d.default')
        model.ref = 1e9
        model.ampl = 1
        model.gamma = 2
        sau.set_model(model * 1e-20)
        sau.fit()
        assert_allclose(model.pars[0].val, 2.0881699260935838)
        assert_allclose(model.pars[2].val, 1.6234222129479836)
示例#16
0
def test_user_model_stat_docs():
    """
    This test reproduces the documentation shown at:
    http://cxc.harvard.edu/sherpa4.4/statistics/#userstat

    and:
    http://cxc.harvard.edu/sherpa/threads/user_model/

    I tried to be as faithful as possible to the original, although the examples in thedocs
    are not completely self-contained, so some changes were necessary. I changed the numpy
    reference, as it is imported as `np` here, and added a clean up of the environment
    before doing anything.

    For the model, the difference is that I am not importing the function from an
    external module, plus the dataset is different.

    Also, the stats docs do not perform a fit.
    """
    def my_stat_func(data, model, staterror, syserror=None, weight=None):
        # A simple function to replicate χ2
        fvec = ((data - model) / staterror)**2
        stat = fvec.sum()
        return (stat, fvec)

    def my_staterr_func(data):
        # A simple staterror function
        return np.sqrt(data)

    def myline(pars, x):
        return pars[0]*x + pars[1]

    x = [1, 2, 3]
    y = [4, 5, 6.01]

    ui.clean()
    ui.load_arrays(1, x, y)
    ui.load_user_stat("mystat", my_stat_func, my_staterr_func)
    ui.set_stat(eval('mystat'))
    ui.load_user_model(myline, "myl")
    ui.add_user_pars("myl", ["m", "b"])
    ui.set_model(eval('myl'))

    ui.fit()

    assert ui.get_par("myl.m").val == approx(1, abs=0.01)
    assert ui.get_par("myl.b").val == approx(3, abs=0.01)
示例#17
0
文件: wstat.py 项目: tibaldo/gammapy
def wfit(dataids=None):
    listids = ()
    if dataids is None:
        listids = sau.list_data_ids()
    else:
        listids = dataids

    wstat = w_statistic(listids)
    sau.load_user_stat("mystat", wstat, wstat.CATstat_err_LV)
    sau.set_stat(mystat)
    sau.set_method("neldermead")  # set_method("moncar")
    sau.set_conf_opt("max_rstat", 1000)  # We don't use a specific maximum reduced statistic value
    # since we don't expect the cstat to be anywhere near the
    # large number limit
    sau.fit(*listids)

    sau.conf()
示例#18
0
文件: smoke.py 项目: DougBurke/sherpa
    def test_xspec(self):
        """
        Perform a very simple fit with an xspec model.

        Also check that the results make sense.

        This test proves that the xspec extension properly works, and that there are no obvious building, linking, or
        environment issues that would prevent the xspec model from running.
        """
        ui.load_arrays(1, self.x, self.y)
        ui.set_source("xspowerlaw.p")
        ui.set_method("moncar")
        ui.set_stat("chi2xspecvar")
        ui.fit()
        model = ui.get_model_component("p")
        expected = [-1.3686404, 0.5687635]
        observed = [model.PhoIndex.val, model.norm.val]
        assert_almost_equal(observed, expected)
示例#19
0
def test_eqwith_err(make_data_path, restore_xspec_settings):

    def check(a0, a1, a2):
        assert a0 == approx(0.16443033244310976, rel=1e-3)
        assert a1 == approx(0.09205564216156815, rel=1e-3)
        assert a2 == approx(0.23933118287470895, rel=1e-3)

    ui.set_method('neldermead')
    ui.set_stat('cstat')
    ui.set_xsabund('angr')
    ui.set_xsxsect('bcmc')

    ui.load_data(make_data_path('12845.pi'))
    ui.notice(0.5, 7)

    ui.set_model("xsphabs.gal*xszphabs.zabs*(powlaw1d.p1+xszgauss.g1)")
    ui.set_par(gal.nh, 0.08)
    ui.freeze(gal)

    ui.set_par(zabs.redshift, 0.518)
    ui.set_par(g1.redshift, 0.518)
    ui.set_par(g1.Sigma, 0.01)
    ui.freeze(g1.Sigma)
    ui.set_par(g1.LineE, min=6.0, max=7.0)

    ui.fit()

    numpy.random.seed(12345)
    result = ui.eqwidth(p1, p1 + g1, error=True, niter=100)
    check(result[0], result[1], result[2])
    params = result[3]

    numpy.random.seed(12345)
    result = ui.eqwidth(p1, p1 + g1, error=True, params=params, niter=100)
    check(result[0], result[1], result[2])

    parvals = ui.get_fit_results().parvals
    assert parvals[0] == approx(0.6111340686157877, rel=1.0e-3)
    assert parvals[1] == approx(1.6409785803466297, rel=1.0e-3)
    assert parvals[2] == approx(8.960926761312153e-05, rel=1.0e-3)
    assert parvals[3] == approx(6.620017726014523, rel=1.0e-3)
    assert parvals[4] == approx(1.9279114810359657e-06, rel=1.0e-3)
示例#20
0
    def setUp(self):

        self._old_logger_level = logger.getEffectiveLevel()
        logger.setLevel(logging.ERROR)

        ui.set_stat('wstat')

        infile1 = self.make_path('3c273.pi')
        infile2 = self.make_path('9774.pi')
        ui.load_pha(1, infile1)
        ui.load_pha(2, infile2)

        # Since 9774.pi isn't grouped, group it. Note that this
        # call groups the background to 20 counts per bin. In this
        # case we do not want that; instead we want to use the same
        # grouping scheme as the source file.
        #
        # Note: this is related to issue 227
        #
        ui.group_counts(2, 20)
        ui.set_grouping(2, bkg_id=1, val=ui.get_grouping(2))

        # There's no need to have the same model in both datasets,
        # but assume the same source model can be used, with a
        # normalization difference.
        #
        ui.set_source(1, ui.powlaw1d.pl1)
        ui.set_source(2, ui.const1d.c2 * ui.get_source(1))

        # The powerlaw slope and normalization are
        # intended to be "a reasonable approximation"
        # to the data, just to make sure that any statistic
        # calculation doesn't blow-up too much.
        #
        # Note: the model values for 3c273 are slighly different
        #       to the single-PHA-file case, so stat results are
        #       slightly different
        #
        ui.set_par("pl1.gamma", 1.7)
        ui.set_par("pl1.ampl", 1.6e-4)
        ui.set_par("c2.c0", 45)
示例#21
0
def test_wstat_comparison_xspec(make_data_path, l, h, ndp, ndof, statval):
    """Compare WSTAT values for a data set to XSPEC.

    See test_cstat_comparison_xspec.

    The XSPEC version used was 12.9.0o.
    """

    dset = create_xspec_comparison_dataset(make_data_path,
                                           keep_background=True)

    ui.clean()
    ui.set_data(dset)
    ui.set_source(ui.powlaw1d.pl)
    ui.set_par('pl.ampl', 1e-4)

    ui.set_stat('wstat')
    ui.set_analysis('channel')

    validate_xspec_result(l, h, ndp, ndof, statval)
    ui.clean()
示例#22
0
    def setUp(self):

        # defensive programming (one of the tests has been seen to fail
        # when the whole test suite is run without this)
        ui.clean()

        self._old_logger_level = logger.getEffectiveLevel()
        logger.setLevel(logging.ERROR)

        ui.set_stat('wstat')

        infile = self.make_path('3c273.pi')
        ui.load_pha(1, infile)

        ui.set_source(1, ui.powlaw1d.pl)

        # The powerlaw slope and normalization are
        # intended to be "a reasonable approximation"
        # to the data, just to make sure that any statistic
        # calculation doesn't blow-up too much.
        #
        ui.set_par("pl.gamma", 1.782)
        ui.set_par("pl.ampl", 1.622e-4)
示例#23
0
    def test_sherpa_fit(self, tmpdir):
        # this is to make sure that the written PHA files work with sherpa
        import sherpa.astro.ui as sau
        from sherpa.models import PowLaw1D

        # TODO: this works a little bit, but some info and warnings
        # from Sherpa remain. Not sure what to do, OK as-is for now.
        import logging

        logging.getLogger("sherpa").setLevel("ERROR")

        self.obs_list.write(tmpdir, use_sherpa=True)
        filename = tmpdir / "pha_obs23523.fits"
        sau.load_pha(str(filename))
        sau.set_stat("wstat")
        model = PowLaw1D("powlaw1d.default")
        model.ref = 1e9
        model.ampl = 1
        model.gamma = 2
        sau.set_model(model * 1e-20)
        sau.fit()
        assert_allclose(model.pars[0].val, 2.732, rtol=1e-3)
        assert_allclose(model.pars[2].val, 4.647, rtol=1e-3)
示例#24
0
    def setUp(self):

        self._old_logger_level = logger.getEffectiveLevel()
        logger.setLevel(logging.ERROR)

        ui.set_stat('wstat')

        infile = self.make_path('9774.pi')
        ui.load_pha(1, infile)

        ui.group_counts(1, 20)

        # Unlike the test_wstat_two_scalar case, the grouping
        # is not copied over.
        # ui.set_grouping(1, bkg_id=1, val=ui.get_grouping(1))

        ui.set_source(1, ui.const1d.c1 * ui.powlaw1d.pl1)

        # These should be the same as test_wstat_two_scalar
        #
        ui.set_par("pl1.gamma", 1.7)
        ui.set_par("pl1.ampl", 1.6e-4)
        ui.set_par("c1.c0", 45)
示例#25
0
def fit_draws(draws, parname, nbins=50, params=None, plot=True, verbose=True):
    """Fit a gaussian to the histogram of the given parameter.

    Before using this routine you should use get_parameter_info()
    to extract the parameter info for use by get_draws(). This is
    because using this routine will invalidate the internal
    data structures that get_draws() uses when its params argument
    is None.

    If params is not None then it should be the return value of
    get_parameter_info().

    If plot is True then a plot of the histogram and fit will be
    made.

    If verbose is True then a quick comparison of the fit
    results will be displayed.
    """

    if parname not in draws["parnames"]:
        raise RuntimeError, "Unknown parameter '%s'" % parname

    # Exclude any point with an iteration number of 0
    #
    idx = draws["iteration"] > 0
    parvals = draws[parname][idx]

    (hy, hx) = np.histogram(parvals, bins=nbins, new=True)
    xlo = hx[:-1]
    xhi = hx[1:]

    id = parname
    ui.load_arrays(id, 0.5 * (xlo + xhi), hy)

    # We can guess the amplitude and position fairly reliably;
    # for the FWHM we just use the inter-quartile range of the
    # X axis.
    #
    ui.set_source(id, ui.gauss1d.gparam)
    gparam.pos = xlo[xlo.size // 2]
    gparam.ampl = hy[xlo.size // 2]
    gparam.fwhm = xlo[xlo.size * 3 // 4] - xlo[xlo.size // 4]

    # Get the best-fit value if available
    if params != None:
        p0 = dict(zip(params["parnames"], params["parvals"]))[parname]

    logger = logging.getLogger("sherpa")
    olvl = logger.level
    logger.setLevel(40)

    ostat = ui.get_stat_name()
    ui.set_stat("leastsq")
    ui.fit(id)
    ui.set_stat(ostat)

    logger.setLevel(olvl)

    if plot:
        # We manually create the plot since we want to use a histogram for the
        # data and the Sherpa plots use curves.
        #
        ##dplot = ui.get_data_plot(id)
        mplot = ui.get_model_plot(id)

        chips.lock()
        try:
            chips.open_undo_buffer()
            chips.erase()
            chips.add_histogram(xlo, xhi, hy)
            ##chips.add_histogram(xlo, xhi, mplot.y, ["line.color", "red", "line.style", "dot"])
            chips.add_curve(mplot.x, mplot.y,
                            ["line.color", "red", "symbol.style", "none"])

            if params != None:
                chips.add_vline(
                    p0, ["line.color", "green", "line.style", "longdash"])

            chips.set_plot_xlabel(parname)
        except:
            chips.discard_undo_buffer()
            chips.unlock()
            raise
        chips.close_undo_buffer()
        chips.unlock()

    sigma = gparam.fwhm.val / (2.0 * np.sqrt(2 * np.log(2)))

    if verbose:
        print ""
        print "Fit to histogram of draws for parameter %s gives" % parname
        print "     mean     = %g" % gparam.pos.val
        print "     sigma    = %g" % sigma
        print ""

        if params != None:
            idx = params["parnames"] == parname
            print "     best fit = %g" % p0
            print "  covar sigma = %g" % params["parmaxes"][idx][0]
            print ""

    return (gparam.pos.val, sigma, gparam.ampl.val)
示例#26
0
dep.set_par('xswabs.nh', 0.0255)
dep.freeze('xswabs.nh')

# Initialize abundance to 0.5 and thaw
dep.set_par('xsmekal.abundanc', 0.5)
dep.thaw('xsmekal.abundanc')

# Set redshift
dep.set_par('xsmekal.redshift', redshift)

# Do the initial onion-peeling fit with
#   Levenberg-Marquardt optimization method
#   XSPEC variance with a Chi^2 fit statistic
#
set_method("levmar")
set_stat("chi2xspecvar")

dep.guess()

# Display the central annulus (using Sherpa routines)
set_xlog()
plot_fit(0)
plt.savefig('m87_ann0_guess.png')

onion = dep.fit()

# Display the central annulus (using Sherpa routines)
plot_fit_delchi(0)
plt.savefig('m87_ann0_fit.png')

# Create basic plots
示例#27
0
## Define source models
abs1 = ui.xsphabs.abs1
pow1 = ui.powlaw1d.pow1
src_model1 = abs1 * pow1
src_models = [src_model1, src_model1 * ui.const1d.ratio_12]

vals_iter = izip(count(1), src_models, ds.get_response(), bkg_models, ds.get_bkg_scale())
for i, src_model, rsp, bkg_model, bkg_scale in vals_iter:
    ds[i].set_full_model(rsp(src_model) + bkg_scale * bkg_model)
    ds[i].set_bkg_full_model(bkg_model)

## Fit background
ds.notice(0.5, 8.0)
ui.set_method("neldermead")
ui.set_stat("cash")

ui.thaw(c1.c0)
ui.thaw(c2.c0)
ui.fit_bkg()
ui.freeze(c1.c0)
ui.freeze(c2.c0)

ui.set_par(abs1.nh, 0.0398)
ui.freeze(abs1)
ui.fit()
ds.plot_fit()
# ds.plot_bkg_fit()

# FIT OUTPUT:
#
示例#28
0
def test_xspec_con_ui_cflux(make_data_path, clean_astro_ui, restore_xspec_settings):
    """Check cflux from the UI layer with a response."""

    from sherpa.astro import xspec

    infile = make_data_path('3c273.pi')
    ui.load_pha('random', infile)
    ui.subtract('random')
    ui.ignore(None, 0.5)
    ui.ignore(7, None)

    ui.set_source('random', 'xsphabs.gal * xscflux.sflux(powlaw1d.pl)')
    mdl = ui.get_source('random')

    assert mdl.name == '(xsphabs.gal * xscflux.sflux(powlaw1d.pl))'
    assert len(mdl.pars) == 7
    assert mdl.pars[0].fullname == 'gal.nH'
    assert mdl.pars[1].fullname == 'sflux.Emin'
    assert mdl.pars[2].fullname == 'sflux.Emax'
    assert mdl.pars[3].fullname == 'sflux.lg10Flux'
    assert mdl.pars[4].fullname == 'pl.gamma'
    assert mdl.pars[5].fullname == 'pl.ref'
    assert mdl.pars[6].fullname == 'pl.ampl'

    assert isinstance(mdl.lhs, xspec.XSphabs)
    assert isinstance(mdl.rhs, xspec.XSConvolutionModel)

    gal = ui.get_model_component('gal')
    sflux = ui.get_model_component('sflux')
    pl = ui.get_model_component('pl')
    assert isinstance(gal, xspec.XSphabs)
    assert isinstance(sflux, xspec.XScflux)
    assert isinstance(pl, PowLaw1D)

    # the convolution model needs the normalization to be fixed
    # (not for this example, as we are not fitting, but do this
    # anyway for reference)
    pl.ampl.frozen = True

    sflux.emin = 1
    sflux.emax = 5
    sflux.lg10Flux = -12.3027

    pl.gamma = 2.03
    gal.nh = 0.039

    ui.set_xsabund('angr')
    ui.set_xsxsect('vern')

    # check we get the "expected" statistic (so this is a regression
    # test).
    #
    ui.set_stat('chi2gehrels')
    sinfo = ui.get_stat_info()

    assert len(sinfo) == 1
    sinfo = sinfo[0]
    assert sinfo.numpoints == 40
    assert sinfo.dof == 37
    assert sinfo.statval == pytest.approx(21.25762265234619)

    # Do we get the same flux from Sherpa's calc_energy_flux?
    #
    cflux = ui.calc_energy_flux(id='random', model=sflux(pl), lo=1, hi=5)
    lcflux = np.log10(cflux)
    assert lcflux == pytest.approx(sflux.lg10Flux.val)
示例#29
0
文件: xagnfitter.py 项目: neobar/BXA
id = 1
filename = args.filenames[0]
elo, ehi = args.energyrange.split(':')
elo, ehi = float(elo), float(ehi)
load_pha(id, filename)
try:
    assert get_rmf(id).energ_lo[0] > 0
    assert get_arf(id).energ_lo[0] > 0
    assert (get_bkg(id).counts > 0).sum() > 0
except:
    traceback.print_exc()
    sys.exit(0)

set_xlog()
set_ylog()
set_stat('cstat')
set_xsabund('wilm')
set_xsxsect('vern')
set_analysis(id, 'ener', 'counts')
ignore(None, elo)
ignore(ehi, None)
notice(elo, ehi)

prefix = filename + '_xagnfitter_out_'

#import json
#z = float(open(filename + '.z').read())
#galnh_value = float(open(filename + '.nh').read())

galabso = auto_galactic_absorption()
galabso.nH.freeze()
示例#30
0
def test_background():

    tmpdir = tempfile.mkdtemp()
    curdir = os.getcwd()
    os.chdir(tmpdir)

    kT_sim = 1.0
    Z_sim = 0.0
    norm_sim = 4.0e-2
    nH_sim = 0.04
    redshift = 0.01

    exp_time = (200., "ks")
    area = (1000., "cm**2")
    fov = (10.0, "arcmin")

    prng = 24

    agen = ApecGenerator(0.05, 12.0, 5000, broadening=False)
    spec = agen.get_spectrum(kT_sim, Z_sim, redshift, norm_sim)
    spec.apply_foreground_absorption(norm_sim)

    events = make_background(area,
                             exp_time,
                             fov, (30.0, 45.0),
                             spec,
                             prng=prng)
    events.write_simput_file("bkgnd", overwrite=True)

    instrument_simulator("bkgnd_simput.fits",
                         "bkgnd_evt.fits",
                         exp_time,
                         "sq_acisi_cy19", [30.0, 45.0],
                         overwrite=True,
                         foreground=False,
                         ptsrc_bkgnd=False,
                         instr_bkgnd=False,
                         prng=prng)

    write_spectrum("bkgnd_evt.fits", "background_evt.pi", overwrite=True)

    os.system("cp %s %s ." % (arf.filename, rmf.filename))

    load_user_model(mymodel, "wapec")
    add_user_pars("wapec", ["nH", "kT", "metallicity", "redshift", "norm"],
                  [0.01, 4.0, 0.2, redshift, norm_sim * 0.8],
                  parmins=[0.0, 0.1, 0.0, -20.0, 0.0],
                  parmaxs=[10.0, 20.0, 10.0, 20.0, 1.0e9],
                  parfrozen=[False, False, False, True, False])

    load_pha("background_evt.pi")
    set_stat("cstat")
    set_method("simplex")
    ignore(":0.5, 8.0:")
    set_model("wapec")
    fit()
    res = get_fit_results()

    assert np.abs(res.parvals[0] - nH_sim) / nH_sim < 0.1
    assert np.abs(res.parvals[1] - kT_sim) / kT_sim < 0.05
    assert np.abs(res.parvals[2] - Z_sim) < 0.05
    assert np.abs(res.parvals[3] - norm_sim) / norm_sim < 0.05

    os.chdir(curdir)
    shutil.rmtree(tmpdir)
示例#31
0
文件: bb_sherpa.py 项目: pyfit/pyfit
# @todo: It is possible to give no staterror, but I'm not sure
# what Sherpa does then. fr.statval is no longer the correct chi2.

# Set up the model and load it as a Sherpa model
def boxbod_func(pars, x):
    b1, b2 = pars
    return BoxBOD.model(x, b1, b2)

ui.load_user_model(boxbod_func, "boxbod")
ui.add_user_pars("boxbod", ["b1", "b2"])
bb = boxbod
ui.set_model(bb)
bb.b1, bb.b2 = BoxBOD.p0[0], BoxBOD.p0[1]

# Perform fit
ui.set_stat('chi2datavar')
#ui.set_method('levmar') #  ['levmar', 'moncar', 'neldermead', 'simplex']
ui.set_method_opt('xtol', 1e-10)
ui.fit() # Compute best-fit parameters
ui.set_covar_opt('eps', 1e-5) # @todo: Why does this parameter have no effect
ui.covariance() # Compute covariance matrix (i.e. errors)
#ui.conf() # Compute profile errors
#ui.show_all() # Print a very nice summary of your session to less

# Report results
fr = ui.get_fit_results()
cr = ui.get_covar_results()

# Report results (we have to apply the s factor ourselves)
popt = np.array(fr.parvals)
chi2 = fr.statval
示例#32
0
def test_chi2(make_data_path, clean_astro_ui):
    "bugs #11400, #13297, #12365"

    data = make_data_path('3c273.pi')

    # Case 1: first ds has no error, second has, chi2-derived (chi2gehrels)
    # statistic. I expect stat.name to be chi2gehrels for ds1, chi2 for
    # ds2, chi2gehrels for ds1,2
    ui.load_data(1, data)
    ui.load_data(2, data, use_errors=True)

    ui.set_source(1, "gauss1d.g1")
    ui.set_source(2, "gauss1d.g1")

    ui.set_stat("chi2gehrels")

    si = ui.get_stat_info()

    stat1 = si[0].statname
    stat2 = si[1].statname
    stat12 = si[2].statname

    assert stat1 == 'chi2gehrels'
    assert stat2 == 'chi2'
    assert stat12 == 'chi2gehrels'

    # Case 2: first ds has errors, second has not, chi2-derived
    # (chi2gehrels) statistic. I expect stat.name to be chi2 for ds1,
    # chi2gehrels for ds2, chi2gehrels for ds1,2
    ui.load_data(2, data)
    ui.load_data(1, data, use_errors=True)

    si = ui.get_stat_info()

    stat1 = si[0].statname
    stat2 = si[1].statname
    stat12 = si[2].statname

    assert stat1 == 'chi2'
    assert stat2 == 'chi2gehrels'
    assert stat12 == 'chi2gehrels'

    # Case 3: both datasets have errors, chi2-derived (chi2gehrels)
    # statistic. I expect stat.name to be chi2 for all of them.
    ui.load_data(2, data, use_errors=True)
    ui.load_data(1, data, use_errors=True)

    si = ui.get_stat_info()

    stat1 = si[0].statname
    stat2 = si[1].statname
    stat12 = si[2].statname

    assert stat1 == 'chi2'
    assert stat2 == 'chi2'
    assert stat12 == 'chi2'

    # Case 4: first ds has errors, second has not, LeastSq statistic
    # I expect stat.name to be leastsq for all of them.
    ui.load_data(2, data)
    ui.load_data(1, data, use_errors=True)

    ui.set_stat("leastsq")

    si = ui.get_stat_info()

    stat1 = si[0].statname
    stat2 = si[1].statname
    stat12 = si[2].statname

    assert stat1 == 'leastsq'
    assert stat2 == 'leastsq'
    assert stat12 == 'leastsq'

    # Case 5: both ds have errors, LeastSq statistic
    # I expect stat.name to be leastsq for all of them.
    ui.load_data(2, data, use_errors=True)
    ui.load_data(1, data, use_errors=True)

    ui.set_stat("leastsq")

    si = ui.get_stat_info()

    stat1 = si[0].statname
    stat2 = si[1].statname
    stat12 = si[2].statname

    assert stat1 == 'leastsq'
    assert stat2 == 'leastsq'
    assert stat12 == 'leastsq'

    # Case 6: first ds has errors, second has not, CStat statistic
    # I expect stat.name to be cstat for all of them.
    ui.load_data(2, data)
    ui.load_data(1, data, use_errors=True)

    ui.set_stat("cstat")

    si = ui.get_stat_info()

    stat1 = si[0].statname
    stat2 = si[1].statname
    stat12 = si[2].statname

    assert stat1 == 'cstat'
    assert stat2 == 'cstat'
    assert stat12 == 'cstat'

    # Case7: select chi2 as statistic. One of the ds does not provide
    # errors. I expect sherpa to raise a StatErr exception.
    ui.set_stat('chi2')

    with pytest.raises(StatErr):
        ui.get_stat_info()

    # Case8: select chi2 as statistic. Both datasets provide errors
    # I expect stat to be 'chi2'
    ui.load_data(2, data, use_errors=True)
    si = ui.get_stat_info()

    stat1 = si[0].statname
    stat2 = si[1].statname
    stat12 = si[2].statname

    assert stat1 == 'chi2'
    assert stat2 == 'chi2'
    assert stat12 == 'chi2'
示例#33
0
def test_load_multi_arfsrmfs(make_data_path, clean_astro_ui):
    """Added in #728 to ensure cache parameter is sent along by
    MultiResponseSumModel (fix #717).

    This has since been simplified to switch from xsapec to
    powlaw1d as it drops the need for XSPEC and is a simpler
    model, so is less affected by changes in the model code.

    A fit of the Sherpa powerlaw-model to 3c273.pi with a
    single response in CIAO 4.11 (background subtracted,
    0.5-7 keV) returns gamma = 1.9298, ampl = 1.73862e-4
    so doubling the response should halve the amplitude but
    leave the gamma value the same when using two responses,
    as below. This is with chi2datavar.
    """

    pha_pi = make_data_path("3c273.pi")
    ui.load_pha(1, pha_pi)
    ui.load_pha(2, pha_pi)

    arf = make_data_path("3c273.arf")
    rmf = make_data_path("3c273.rmf")

    ui.load_multi_arfs(1, [arf, arf], [1, 2])
    ui.load_multi_arfs(2, [arf, arf], [1, 2])

    ui.load_multi_rmfs(1, [rmf, rmf], [1, 2])
    ui.load_multi_rmfs(2, [rmf, rmf], [1, 2])

    # Check multiple responses have been loaded
    #
    d1 = ui.get_data(1)
    d2 = ui.get_data(2)
    assert d1.response_ids == [1, 2]
    assert d2.response_ids == [1, 2]

    # Unfortunately we load the same response so it's hard
    # to tell the difference here!
    #
    assert ui.get_arf(resp_id=1).name == arf
    assert ui.get_arf(resp_id=2).name == arf
    assert ui.get_rmf(2, resp_id=1).name == rmf
    assert ui.get_rmf(2, resp_id=2).name == rmf

    ui.notice(0.5, 7)
    ui.subtract(1)
    ui.subtract(2)

    src = ui.create_model_component('powlaw1d', 'src')
    ui.set_model(1, src)
    ui.set_model(2, src)

    # ensure the test is repeatable by running with a known
    # statistic and method
    #
    ui.set_method('levmar')
    ui.set_stat('chi2datavar')

    # Really what we care about for fixing #717 is that
    # fit does not error out, but it's useful to know that
    # the fit has changed the parameter values (which were
    # both 1 before the fit).
    #
    ui.fit()
    fr = ui.get_fit_results()
    assert fr.succeeded
    assert fr.datasets == (1, 2)

    assert src.gamma.val == pytest.approx(1.9298, rel=1.0e-4)
    assert src.ampl.val == pytest.approx(1.73862e-4 / 2, rel=1.0e-4)
示例#34
0
def load_chi2asym_stat():
    """"Load and set the chi2asym statistic"""
    import sherpa.astro.ui as sau
    sau.load_user_stat("chi2asym", chi2asym_stat_func, chi2asym_err_func)
    sau.set_stat(chi2asym)
示例#35
0
def test_annulus():

    tmpdir = tempfile.mkdtemp()
    curdir = os.getcwd()
    os.chdir(tmpdir)

    r_in = 10.0
    r_out = 30.0

    e = spec.generate_energies(exp_time, area, prng=prng)

    ann_src = AnnulusModel(ra0, dec0, r_in, r_out, e.size, prng=prng)

    write_photon_list("ann",
                      "ann",
                      e.flux,
                      ann_src.ra,
                      ann_src.dec,
                      e,
                      overwrite=True)

    instrument_simulator("ann_simput.fits",
                         "ann_evt.fits",
                         exp_time,
                         "hdxi", [ra0, dec0],
                         ptsrc_bkgnd=False,
                         instr_bkgnd=False,
                         foreground=False,
                         prng=prng)

    inst = get_instrument_from_registry("hdxi")
    arf = AuxiliaryResponseFile(inst["arf"])
    cspec = ConvolvedSpectrum(spec, arf)
    ph_flux = cspec.get_flux_in_band(0.5, 7.0)[0].value
    S0 = ph_flux / (np.pi * (r_out**2 - r_in**2))

    write_radial_profile("ann_evt.fits",
                         "ann_evt_profile.fits", [ra0, dec0],
                         1.1 * r_in,
                         0.9 * r_out,
                         100,
                         ctr_type="celestial",
                         emin=0.5,
                         emax=7.0,
                         overwrite=True)

    load_data(1, "ann_evt_profile.fits", 3, ["RMID", "SUR_BRI", "SUR_BRI_ERR"])
    set_stat("chi2")
    set_method("levmar")
    set_source("const1d.src")
    src.c0 = 0.8 * S0

    fit()
    set_covar_opt("sigma", 1.645)
    covar()
    res = get_covar_results()

    assert np.abs(res.parvals[0] - S0) < res.parmaxes[0]

    os.chdir(curdir)
    shutil.rmtree(tmpdir)
示例#36
0
def test_beta_model_flux():
    tmpdir = tempfile.mkdtemp()
    curdir = os.getcwd()
    os.chdir(tmpdir)

    r_c = 20.0
    beta = 1.0

    prng = 34

    e = spec.generate_energies(exp_time, area, prng=prng)

    beta_src = BetaModel(ra0, dec0, r_c, beta, e.size, prng=prng)

    write_photon_list("beta",
                      "beta",
                      e.flux,
                      beta_src.ra,
                      beta_src.dec,
                      e,
                      overwrite=True)

    instrument_simulator("beta_simput.fits",
                         "beta_evt.fits",
                         exp_time,
                         "acisi_cy0", [ra0, dec0],
                         ptsrc_bkgnd=False,
                         instr_bkgnd=False,
                         foreground=False,
                         roll_angle=37.0,
                         prng=prng)

    ph_flux = spec.get_flux_in_band(0.5, 7.0)[0].value
    S0 = 3.0 * ph_flux / (2.0 * np.pi * r_c * r_c)

    wspec = spec.new_spec_from_band(0.5, 7.0)

    make_exposure_map("beta_evt.fits",
                      "beta_expmap.fits",
                      wspec.emid.value,
                      weights=wspec.flux.value,
                      overwrite=True)

    write_radial_profile("beta_evt.fits",
                         "beta_evt_profile.fits", [ra0, dec0],
                         0.0,
                         100.0,
                         200,
                         ctr_type="celestial",
                         emin=0.5,
                         emax=7.0,
                         expmap_file="beta_expmap.fits",
                         overwrite=True)

    load_data(1, "beta_evt_profile.fits", 3,
              ["RMID", "SUR_FLUX", "SUR_FLUX_ERR"])
    set_stat("chi2")
    set_method("levmar")
    set_source("beta1d.src")
    src.beta = 1.0
    src.r0 = 10.0
    src.ampl = 0.8 * S0
    freeze(src.xpos)

    fit()
    set_covar_opt("sigma", 1.645)
    covar()
    res = get_covar_results()

    assert np.abs(res.parvals[0] - r_c) < res.parmaxes[0]
    assert np.abs(res.parvals[1] - beta) < res.parmaxes[1]
    assert np.abs(res.parvals[2] - S0) < res.parmaxes[2]

    os.chdir(curdir)
    shutil.rmtree(tmpdir)
示例#37
0
def test_can_use_swift_data(make_data_path):
    """A basic check that we can read in and use the Swift data.

    Unlike the previous tests, that directly access the io module,
    this uses the ui interface.
    """

    # QUS are there pytest fixtures that ensure the state is
    # clean on entry and exit?
    ui.clean()

    # The Swift PHA file does not have the ANCRFILE/RESPFILE keywords
    # set up, so the responses have to be manually added.
    #
    ui.load_pha(make_data_path(PHAFILE))
    ui.load_rmf(make_data_path(RMFFILE))
    ui.load_arf(make_data_path(ARFFILE))

    assert ui.get_analysis() == 'energy'

    ui.set_source(ui.powlaw1d.pl)
    ui.set_par('pl.ampl', 0.0003)

    # The responses have the first bin start at an energy of 0,
    # which causes issues for Sherpa. There should be a
    # RuntimeWarning due to a divide by zero.
    #
    with pytest.warns(RuntimeWarning) as record:
        stat = ui.calc_stat()

    # The exact form of the message depends on the Python version;
    # this could be checked, but it feels excessive for this
    # particular test, which is just a regression check, so use a
    # more lax approach.
    #
    assert len(record) == 1
    assert record[0].message.args[0] in \
        ['divide by zero encountered in divide',
         'divide by zero encountered in true_divide']

    # The stat value depends on what power-law model is used. With
    # xspowerlaw it is NaN, but with powlaw1d it is finite.
    #
    # This check is purely a regression test, so the value has
    # not been externally validated.
    #
    # assert np.isnan(stat)
    assert_allclose(stat, 58.2813692358182)

    # Manually adjust the first bin to avoid this problem.
    # Add in asserts just in case this gets "fixed" in the
    # I/O layer (as XSPEC does).
    #
    arf = ui.get_arf()
    rmf = ui.get_rmf()
    assert arf.energ_lo[0] == 0.0
    assert rmf.energ_lo[0] == 0.0
    assert rmf.e_min[0] == 0.0

    # The bin widths are ~ 0.005 or ~ 0.01 keV, so pick a value
    # smaller than this.
    #
    ethresh = 1e-6
    arf.energ_lo[0] = ethresh
    rmf.energ_lo[0] = ethresh
    rmf.e_min[0] = ethresh

    # Pick an energy range which isn't affected by the first
    # bin.
    #
    # Unfortunately, using a range of 0.3-8.0 gives 771 bins
    # in XSPEC - channels 30 to 800 - but 772 bins in Sherpa.
    # If I use ignore(None, 0.3); ignore(8.0, None) instead
    # then the result is 771 bins. This is because the e_min/max
    # of the RMF has channel widths of 0.01 keV, starting at 0,
    # so both 0.3 and 8.0 fall on a bin boundary. So, it's either
    # a difference in < or <= (or > vs >=), or a rounding issue
    # due to floating-point conversion leading to one bin boundary
    # being slightly different in Sherpa vs XSPEC).
    #
    # When using ui.notice(0.3, 8.0); ui.get_indep(filter=True)
    # returns 772 channels, 30 to 801.
    #
    # Using ui.notice(0.3, 7.995) selects channels 30 to 800. So
    # this range is used. Alternatively, channel 801 could have been
    # excluded explicitly.
    #
    # ui.notice(0.3, 8.0)
    ui.notice(0.3, 7.995)

    # XSPEC 12.9.1b calculation of the statistic:
    #   chi sq = 203.88 from 771 bins with 769 dof
    #   cstat  = 568.52
    #
    # There are known differences between XSPEC and Sherpa
    # with chi2xspecvar. This only affects data sets where
    # there is background subtraction, which is not the case
    # here. See https://github.com/sherpa/sherpa/issues/356
    #
    ui.set_stat('chi2xspecvar')
    stat_xvar = ui.get_stat_info()

    assert len(stat_xvar) == 1
    stat_xvar = stat_xvar[0]
    assert stat_xvar.numpoints == 771
    assert stat_xvar.dof == 769
    assert_allclose(stat_xvar.statval, 203.88,
                    rtol=0, atol=0.005)

    ui.set_stat('cstat')
    stat_cstat = ui.get_stat_info()

    assert len(stat_cstat) == 1
    stat_cstat = stat_cstat[0]
    assert stat_cstat.numpoints == 771
    assert stat_cstat.dof == 769
    assert_allclose(stat_cstat.statval, 568.52,
                    rtol=0, atol=0.005)

    ui.clean()
示例#38
0
    def test_chi2(self):

        # Case 1: first ds has no error, second has, chi2-derived (chi2gehrels)
        # statistic. I expect stat.name to be chi2gehrels for ds1, chi2 for
        # ds2, chi2gehrels for ds1,2
        ui.load_data(1, self.data)
        ui.load_data(2, self.data, use_errors=True)

        ui.set_source(1, "gauss1d.g1")
        ui.set_source(2, "gauss1d.g1")

        ui.set_stat("chi2gehrels")

        si = ui.get_stat_info()

        stat1 = si[0].statname
        stat2 = si[1].statname
        stat12 = si[2].statname

        self.assertEqual('chi2gehrels', stat1)
        self.assertEqual('chi2', stat2)
        self.assertEqual('chi2gehrels', stat12)

        # Case 2: first ds has errors, second has not, chi2-derived
        # (chi2gehrels) statistic. I expect stat.name to be chi2 for ds1,
        # chi2gehrels for ds2, chi2gehrels for ds1,2
        ui.load_data(2, self.data)
        ui.load_data(1, self.data, use_errors=True)

        si = ui.get_stat_info()

        stat1 = si[0].statname
        stat2 = si[1].statname
        stat12 = si[2].statname

        self.assertEqual('chi2gehrels', stat2)
        self.assertEqual('chi2', stat1)
        self.assertEqual('chi2gehrels', stat12)

        # Case 3: both datasets have errors, chi2-derived (chi2gehrels)
        # statistic. I expect stat.name to be chi2 for all of them.
        ui.load_data(2, self.data, use_errors=True)
        ui.load_data(1, self.data, use_errors=True)

        si = ui.get_stat_info()

        stat1 = si[0].statname
        stat2 = si[1].statname
        stat12 = si[2].statname

        self.assertEqual('chi2', stat2)
        self.assertEqual('chi2', stat1)
        self.assertEqual('chi2', stat12)

        # Case 4: first ds has errors, second has not, LeastSq statistic
        # I expect stat.name to be leastsq for all of them.
        ui.load_data(2, self.data)
        ui.load_data(1, self.data, use_errors=True)

        ui.set_stat("leastsq")

        si = ui.get_stat_info()

        stat1 = si[0].statname
        stat2 = si[1].statname
        stat12 = si[2].statname

        self.assertEqual('leastsq', stat2)
        self.assertEqual('leastsq', stat1)
        self.assertEqual('leastsq', stat12)

        # Case 5: both ds have errors, LeastSq statistic
        # I expect stat.name to be leastsq for all of them.
        ui.load_data(2, self.data, use_errors=True)
        ui.load_data(1, self.data, use_errors=True)

        ui.set_stat("leastsq")

        si = ui.get_stat_info()

        stat1 = si[0].statname
        stat2 = si[1].statname
        stat12 = si[2].statname

        self.assertEqual('leastsq', stat2)
        self.assertEqual('leastsq', stat1)
        self.assertEqual('leastsq', stat12)

        # Case 6: first ds has errors, second has not, CStat statistic
        # I expect stat.name to be cstat for all of them.
        ui.load_data(2, self.data)
        ui.load_data(1, self.data, use_errors=True)

        ui.set_stat("cstat")

        si = ui.get_stat_info()

        stat1 = si[0].statname
        stat2 = si[1].statname
        stat12 = si[2].statname

        self.assertEqual('cstat', stat2)
        self.assertEqual('cstat', stat1)
        self.assertEqual('cstat', stat12)

        # Case7: select chi2 as statistic. One of the ds does not provide
        # errors. I expect sherpa to raise a StatErr exception.
        ui.set_stat('chi2')

        caught = False

        from sherpa.utils.err import StatErr
        try:
            ui.get_stat_info()
        except StatErr:
            caught = True

        self.assertTrue(caught, msg='StatErr was not caught')

        # Case8: select chi2 as statistic. Both datasets provide errors
        # I expect stat to be 'chi2'
        ui.load_data(2, self.data, use_errors=True)
        si = ui.get_stat_info()

        stat1 = si[0].statname
        stat2 = si[1].statname
        stat12 = si[2].statname

        self.assertEqual('chi2', stat2)
        self.assertEqual('chi2', stat1)
        self.assertEqual('chi2', stat12)
示例#39
0
import sherpa.astro.ui as sau

sau.load_pha("3c273.pi")
sau.set_source(sau.powlaw1d.p1)
sau.guess(p1)
sau.set_stat("wstat")
sau.fit()
stats = sau.get_stat_info()

示例#40
0
def test_can_use_swift_data(make_data_path):
    """A basic check that we can read in and use the Swift data.

    Unlike the previous tests, that directly access the io module,
    this uses the ui interface.
    """

    # QUS are there pytest fixtures that ensure the state is
    # clean on entry and exit?
    ui.clean()

    # The Swift PHA file does not have the ANCRFILE/RESPFILE keywords
    # set up, so the responses have to be manually added.
    #
    ui.load_pha(make_data_path(PHAFILE))

    rmffile = make_data_path(RMFFILE)
    with warnings.catch_warnings(record=True) as ws:
        warnings.simplefilter("always")
        ui.load_rmf(rmffile)

    validate_replacement_warning(ws, 'RMF', rmffile)

    arffile = make_data_path(ARFFILE)
    with warnings.catch_warnings(record=True) as ws:
        warnings.simplefilter("always")
        ui.load_arf(arffile)

    validate_replacement_warning(ws, 'ARF', arffile)

    assert ui.get_analysis() == 'energy'

    arf = ui.get_arf()
    rmf = ui.get_rmf()
    assert arf.energ_lo[0] == EMIN
    assert rmf.energ_lo[0] == EMIN
    assert rmf.e_min[0] == 0.0

    ui.set_source(ui.powlaw1d.pl)
    ui.set_par('pl.ampl', 0.0003)

    stat = ui.calc_stat()

    # This check is purely a regression test, so the value has
    # not been externally validated.
    #
    assert_allclose(stat, 58.2813692358182)

    # Pick an energy range which isn't affected by the first
    # bin.
    #
    # Unfortunately, using a range of 0.3-8.0 gives 771 bins
    # in XSPEC - channels 30 to 800 - but 772 bins in Sherpa.
    # If I use ignore(None, 0.3); ignore(8.0, None) instead
    # then the result is 771 bins. This is because the e_min/max
    # of the RMF has channel widths of 0.01 keV, starting at 0,
    # so both 0.3 and 8.0 fall on a bin boundary. So, it's either
    # a difference in < or <= (or > vs >=), or a rounding issue
    # due to floating-point conversion leading to one bin boundary
    # being slightly different in Sherpa vs XSPEC).
    #
    # When using ui.notice(0.3, 8.0); ui.get_indep(filter=True)
    # returns 772 channels, 30 to 801.
    #
    # Using ui.notice(0.3, 7.995) selects channels 30 to 800. So
    # this range is used. Alternatively, channel 801 could have been
    # excluded explicitly.
    #
    # ui.notice(0.3, 8.0)
    ui.notice(0.3, 7.995)

    # XSPEC 12.9.1b calculation of the statistic:
    #   chi sq = 203.88 from 771 bins with 769 dof
    #   cstat  = 568.52
    #
    # There are known differences between XSPEC and Sherpa
    # with chi2xspecvar. This only affects data sets where
    # there is background subtraction, which is not the case
    # here. See https://github.com/sherpa/sherpa/issues/356
    #
    ui.set_stat('chi2xspecvar')
    stat_xvar = ui.get_stat_info()

    assert len(stat_xvar) == 1
    stat_xvar = stat_xvar[0]
    assert stat_xvar.numpoints == 771
    assert stat_xvar.dof == 769
    assert_allclose(stat_xvar.statval, 203.88, rtol=0, atol=0.005)

    ui.set_stat('cstat')
    stat_cstat = ui.get_stat_info()

    assert len(stat_cstat) == 1
    stat_cstat = stat_cstat[0]
    assert stat_cstat.numpoints == 771
    assert stat_cstat.dof == 769
    assert_allclose(stat_cstat.statval, 568.52, rtol=0, atol=0.005)

    ui.clean()
示例#41
0
def test_beta_model():
    tmpdir = tempfile.mkdtemp()
    curdir = os.getcwd()
    os.chdir(tmpdir)

    r_c = 20.0
    beta = 1.0

    exp_time = Quantity(500.0, "ks")

    e = spec.generate_energies(exp_time, area, prng=prng)

    beta_src = BetaModel(ra0, dec0, r_c, beta, e.size, prng=prng)

    write_photon_list("beta",
                      "beta",
                      e.flux,
                      beta_src.ra,
                      beta_src.dec,
                      e,
                      overwrite=True)

    instrument_simulator("beta_simput.fits",
                         "beta_evt.fits",
                         exp_time,
                         "hdxi", [ra0, dec0],
                         ptsrc_bkgnd=False,
                         instr_bkgnd=False,
                         foreground=False,
                         prng=prng)

    inst = get_instrument_from_registry("hdxi")
    arf = AuxiliaryResponseFile(inst["arf"])
    cspec = ConvolvedSpectrum(spec, arf)
    ph_flux = cspec.get_flux_in_band(0.5, 7.0)[0].value
    S0 = 3.0 * ph_flux / (2.0 * np.pi * r_c * r_c)

    write_radial_profile("beta_evt.fits",
                         "beta_evt_profile.fits", [ra0, dec0],
                         0.0,
                         100.0,
                         200,
                         ctr_type="celestial",
                         emin=0.5,
                         emax=7.0,
                         overwrite=True)

    load_data(1, "beta_evt_profile.fits", 3,
              ["RMID", "SUR_BRI", "SUR_BRI_ERR"])
    set_stat("chi2")
    set_method("levmar")
    set_source("beta1d.src")
    src.beta = 1.0
    src.r0 = 10.0
    src.ampl = 0.8 * S0
    freeze(src.xpos)

    fit()
    set_covar_opt("sigma", 1.645)
    covar()
    res = get_covar_results()

    assert np.abs(res.parvals[0] - r_c) < res.parmaxes[0]
    assert np.abs(res.parvals[1] - beta) < res.parmaxes[1]
    assert np.abs(res.parvals[2] - S0) < res.parmaxes[2]

    os.chdir(curdir)
    shutil.rmtree(tmpdir)
示例#42
0
def test_load_pha2_compare_meg_order1(make_data_path, clean_astro_ui):
    """Do we read in the MEG +/-1 orders?"""

    # The MEG -1 order is dataset 9
    # The MEG +1 order is dataset 10
    #
    pha2file = make_data_path('3c120_pha2')
    meg_p1file = make_data_path('3c120_meg_1.pha')
    meg_m1file = make_data_path('3c120_meg_-1.pha')

    ui.load_pha('meg_p1', meg_p1file)
    ui.load_pha('meg_m1', meg_m1file)

    orig_ids = set(ui.list_data_ids())
    assert 'meg_p1' in orig_ids
    assert 'meg_m1' in orig_ids

    ui.load_pha(pha2file)

    for n, lbl in zip([9, 10], ["-1", "1"]):
        h = '3c120_meg_{}'.format(lbl)
        ui.load_arf(n, make_data_path(h + '.arf'))
        ui.load_rmf(n, make_data_path(h + '.rmf'))

    # check that loading the pha2 file doesn't overwrite existing
    # data
    new_ids = set(ui.list_data_ids())

    for i in range(1, 13):
        orig_ids.add(i)

    assert orig_ids == new_ids

    # Check that the same model gives the same statistic
    # value; this should check that the data and response are
    # read in, that grouping and filtering work, and that
    # model evaluation is the same, without having to
    # check these steps individually.
    #
    # The model is not meant to be physically meaningful,
    # just one that reasonably represents the data and
    # can be evaluated without requiring XSPEC.
    #
    pmdl = ui.create_model_component('powlaw1d', 'pmdl')
    pmdl.gamma = 0.318
    pmdl.ampl = 2.52e-3

    ncts = 20
    for i in [9, 10, "meg_m1", "meg_p1"]:
        ui.set_analysis(i, 'wave')
        ui.group_counts(i, ncts)
        ui.notice_id(i, 2, 12)
        ui.set_source(i, pmdl)

    ui.set_stat('chi2datavar')
    s9 = ui.calc_stat(9)
    s10 = ui.calc_stat(10)

    sm1 = ui.calc_stat('meg_m1')
    sp1 = ui.calc_stat('meg_p1')

    # Since these should be the same, we use an equality test
    # rather than approximation. At least until it becomes
    # a problem.
    #
    assert s9 == sm1
    assert s10 == sp1

    # The values were calculated using CIAO 4.9, Linux64, with
    # Python 3.5.
    #
    assert s9 == pytest.approx(1005.4378559390879)
    assert s10 == pytest.approx(1119.980439489647)
示例#43
0
def test_341():
    """
    The original reporter of bug #341 had a special implementation that should be captured
    by this test. The implementation has a proxy model that takes care of updating the actual
    model when it is evaluated. During a recent refactoring of the Stat and Fit code
    (PR #287) a regression was introduced by short-circuiting the evaluation of the model.

    """
    class ExampleModel(object):
        """ Class to define model
        """
        def __init__(self, x, y):
            self.x = np.array(x)
            self.y = np.array(y)
            self.parvals = [1, 2]
            self.parnames = ("m", "b")

        def calc_stat(self):
            return float(np.sum(np.abs(self.y - self.model())))

        def model(self):
            return self.parvals[0] * self.x + self.parvals[1]

    class CalcModel(object):
        """ Class to update model parameters
        """
        def __init__(self, model):
            self.model = model

        def __call__(self, pars, x):
            self.model.parvals = pars
            return np.ones_like(x)

    class CalcStat(object):
        """ Class to determine fit statistic
        """
        def __init__(self, model):
            self.model = model

        def __call__(self, _data, _model, *args, **kwargs):
            fit_stat = self.model.calc_stat()

            return fit_stat, np.ones(1)

    xdata = [1, 2, 3]
    ydata = [4, 5, 6]
    newmodel = ExampleModel(xdata, ydata)

    dummy_data = np.zeros(1)
    dummy_times = np.arange(1)
    ui.load_arrays(1, dummy_times, dummy_data)

    method = 'simplex'
    ui.set_method(method)

    ui.load_user_model(CalcModel(newmodel), 'simplemodel')
    ui.add_user_pars('simplemodel', newmodel.parnames)
    ui.set_model(1, 'simplemodel')

    calc_stat = CalcStat(newmodel)
    ui.load_user_stat('customstat', calc_stat, lambda x: np.ones_like(x))
    ui.set_stat(eval('customstat'))

    ui.fit(1)

    assert ui.get_par("simplemodel.m").val == approx(1, abs=0.00001)
    assert ui.get_par("simplemodel.b").val == approx(3, abs=0.00001)
示例#44
0
def test_746(make_data_path, clean_astro_ui):
    """Test https://github.com/sherpa/sherpa/issues/746

    Something in #444 (reverted in #759) caused:

      - the fit to fail (niter=2)
      - the line amplitude not to change significantly
      - the statistic reported by the fit to be different to
        that returned by calc_stat

    Something with how the cache code handles analysis=wave
    appears to be the problem. This test takes the line data
    from 746 and adds it into the existing PHA2 file we have
    (3c120) to replicate the problem. Fortunately this
    wavelength range contains essentially no counts, so we
    can just add in the counts to make a fake line and check
    we can fit it.
    """

    ui.load_pha(make_data_path('3c120_pha2.gz'))
    ui.load_arf(10, make_data_path('3c120_meg_1.arf.gz'))
    ui.load_rmf(10, make_data_path('3c120_meg_1.rmf.gz'))

    # Add in the line
    d10 = ui.get_data(10)
    idx = np.arange(4068, 4075, dtype=np.int)
    d10.counts[idx] = [1, 1, 1, 2, 3, 1, 1]

    # group the data
    ui.group_width(id=10, num=2)

    ui.set_analysis('wave')
    ui.notice(21.4, 21.7)

    # internal check that getting the expected data
    expected = np.zeros(32)
    expected[[9, 10, 11, 12]] = [2, 3, 4, 1]
    expected[26] = 1  # this count is from the 3c120 data
    d = d10.get_dep(filter=True)
    assert d == pytest.approx(expected)

    line = ui.create_model_component('gauss1d', 'name')

    # treat the line as a delta function
    line.fwhm.val = 0.00001
    line.fwhm.frozen = True
    line.pos = 21.6
    line.pos.frozen = True

    line.ampl = 2

    ui.set_source(10, line)

    # the original fit used levmar, so use that here
    # (it looks like it also fails with simplex)
    ui.set_method('levmar')
    ui.set_stat('cstat')

    sinit = ui.calc_stat(10)
    ui.fit(10)
    fr = ui.get_fit_results()
    sfinal = ui.calc_stat(10)

    # Did the
    #   - fit improve
    #   - take more than two iterations
    #   - report the same statistic values as calc_stat
    #   - change the fit parameter
    #
    assert sfinal < sinit

    assert fr.succeeded
    assert fr.nfev > 2

    assert fr.istatval == sinit
    assert fr.statval == sfinal

    assert line.ampl.val > 100
    assert len(fr.parvals) == 1
    assert fr.parvals[0] == line.ampl.val

    # some simple checks to throw in because we can
    assert fr.parnames == ('name.ampl', )
    assert fr.datasets == (10,)
    assert fr.statname == 'cstat'
    assert fr.methodname == 'levmar'
    assert fr.itermethodname == 'none'
    assert fr.numpoints == 32
    assert fr.dof == 31

    # Now add in some "absolute" checks to act as regression tests.
    # If these fail then it doesn't necessarily mean something bad
    # has happened.
    #
    assert fr.nfev == 15
    assert sinit == pytest.approx(82.72457294394245)
    assert sfinal == pytest.approx(15.39963248224592)
    assert line.ampl.val == pytest.approx(113.95646989927054)
示例#45
0
def test_can_use_swift_data(make_data_path, clean_astro_ui):
    """A basic check that we can read in and use the Swift data.

    Unlike the previous tests, that directly access the io module,
    this uses the ui interface.
    """

    # The Swift PHA file does not have the ANCRFILE/RESPFILE keywords
    # set up, so the responses have to be manually added.
    #
    ui.load_pha(make_data_path(PHAFILE))

    rmffile = make_data_path(RMFFILE)
    with warnings.catch_warnings(record=True) as ws:
        warnings.simplefilter("always")
        ui.load_rmf(rmffile)

    validate_replacement_warning(ws, 'RMF', rmffile)

    arffile = make_data_path(ARFFILE)
    with warnings.catch_warnings(record=True) as ws:
        warnings.simplefilter("always")
        ui.load_arf(arffile)

    validate_replacement_warning(ws, 'ARF', arffile)

    assert ui.get_analysis() == 'energy'

    arf = ui.get_arf()
    rmf = ui.get_rmf()
    assert arf.energ_lo[0] == EMIN
    assert rmf.energ_lo[0] == EMIN
    assert rmf.e_min[0] == 0.0

    ui.set_source(ui.powlaw1d.pl)
    ui.set_par('pl.ampl', 0.0003)

    stat = ui.calc_stat()

    # This check is purely a regression test, so the value has
    # not been externally validated.
    #
    assert_allclose(stat, 58.2813692358182)

    # Pick an energy range which isn't affected by the first
    # bin.
    #
    # Unfortunately, using a range of 0.3-8.0 gives 771 bins
    # in XSPEC - channels 30 to 800 - but 770 bins in Sherpa,
    # channels 31 to 800.
    #
    # Note that the channel numbering starts at 0:
    # % dmlist target_sr.pha header,clean,raw | grep TLMIN
    # TLMIN1       = 0                    / Lowest legal channel number
    #
    # and so it's not clear when XSPEC says 30-800 what it
    # means. From https://github.com/sherpa/sherpa/issues/1211#issuecomment-881647128
    # we have that the first bin it is using is
    #     0.29-0.30
    # and the last bin is
    #     7.99-8.00
    # and I've checked with iplot that it has renumbered the
    # channels to 1-1024 from 0-1023
    #
    # % dmlist swxpc0to12s6_20130101v014.rmf.gz"[ebounds][channel=28:31]" data,clean
    #  CHANNEL    E_MIN                E_MAX
    #         28     0.28000000119209     0.28999999165535
    #         29     0.28999999165535     0.30000001192093
    #         30     0.30000001192093     0.31000000238419
    #         31     0.31000000238419     0.31999999284744
    # % dmlist swxpc0to12s6_20130101v014.rmf.gz"[ebounds][channel=798:801]" data,clean
    #  CHANNEL    E_MIN                E_MAX
    #        798         7.9800000191         7.9899997711
    #        799         7.9899997711                  8.0
    #        800                  8.0         8.0100002289
    #        801         8.0100002289         8.0200004578
    #
    # If I use ignore(None, 0.3); ignore(8.0, None) instead then the
    # result is 771 bins (channels 31 to 800). This is because the
    # e_min/max of the RMF has channel widths of 0.01 keV, starting at
    # 0, so both 0.3 and 8.0 fall on a bin boundary. So, it's either a
    # difference in < or <= (or > vs >=), or a rounding issue due to
    # floating-point conversion leading to one bin boundary being
    # slightly different in Sherpa vs XSPEC).
    #
    # When using ui.notice(0.3, 8.0); ui.get_indep(filter=True)
    # returns 770 channels, 31 to 800.
    #
    # Using ui.notice(0.3, 7.995) selects channels 31 to 800.
    # Using ui.notice(0.299, 8.0) selects channels 30 to 800.
    # Using ui.notice(0.299, 7.995) selects channels 30 to 800.
    #
    ui.notice(0.299, 8.0)

    # Check the selected range
    pha = ui.get_data()
    expected = np.zeros(1024, dtype=bool)
    expected[29:800] = True
    assert pha.mask == pytest.approx(expected)
    assert pha.get_mask() == pytest.approx(expected)

    # XSPEC 12.9.1b calculation of the statistic:
    #   chi sq = 203.88 from 771 bins with 769 dof
    #   cstat  = 568.52
    #
    # There are known differences between XSPEC and Sherpa
    # with chi2xspecvar. This only affects data sets where
    # there is background subtraction, which is not the case
    # here. See https://github.com/sherpa/sherpa/issues/356
    #
    ui.set_stat('chi2xspecvar')
    stat_xvar = ui.get_stat_info()

    assert len(stat_xvar) == 1
    stat_xvar = stat_xvar[0]
    assert stat_xvar.numpoints == 771
    assert stat_xvar.dof == 769
    assert_allclose(stat_xvar.statval, 203.88,
                    rtol=0, atol=0.005)

    ui.set_stat('cstat')
    stat_cstat = ui.get_stat_info()

    assert len(stat_cstat) == 1
    stat_cstat = stat_cstat[0]
    assert stat_cstat.numpoints == 771
    assert stat_cstat.dof == 769
    assert_allclose(stat_cstat.statval, 568.52,
                    rtol=0, atol=0.005)
示例#46
0
def test_plot_pvalue(make_data_path, clean_astro_ui, hide_logging):
    """Check plot_pvalue with PHA data."""

    fname = make_data_path("qso.pi")
    ui.load_pha(fname)

    ui.set_stat('cstat')
    ui.set_method("neldermead")

    ui.group_counts(10)
    ui.notice(0.3, 8)

    ui.set_model("xsphabs.abs1*(xspowerlaw.p1 +gauss1d.g1)")

    # move the fit close to the best fit to save a small amount
    # of time.
    abs1.nh = 0.05
    p1.phoindex = 1.28
    p1.norm = 2e-4
    g1.ampl = 1.8e-5

    g1.pos = 3.
    ui.freeze(g1.pos)
    g1.fwhm = 0.1
    ui.freeze(g1.fwhm)

    # Could we reduce the number of bins to save evaluation time?
    # We do want a non-default num value when checking the shapes
    # of the output attributes.
    #
    ui.fit()
    ui.plot_pvalue(p1, p1 + g1, num=100, bins=20)

    tmp = ui.get_pvalue_results()

    assert tmp.null == pytest.approx(210.34566845619273)
    assert tmp.alt == pytest.approx(207.66618095925094)
    assert tmp.lr == pytest.approx(2.679487496941789)

    # Have we returned the correct info?
    #
    # Is it worth checking the stored data (aka how randomised is this
    # output)?
    #
    assert tmp.samples.shape == (100, 2)
    assert tmp.stats.shape == (100, 2)
    assert tmp.ratios.shape == (100, )

    # Check the plot
    #
    tmp = ui.get_pvalue_plot()

    assert tmp.lr == pytest.approx(2.679487496941789)

    assert tmp.xlabel == 'Likelihood Ratio'
    assert tmp.ylabel == 'Frequency'
    assert tmp.title == 'Likelihood Ratio Distribution'

    # It would be nice to check the values here
    #
    assert tmp.ratios.shape == (100, )
    assert tmp.xlo.shape == (21, )
    assert tmp.xhi.shape == (21, )
    assert tmp.y.shape == (21, )
maps["background"].write("analysis_3d/background_2D.fits", overwrite=True)
maps["exposure"].write("analysis_3d/exposure_2D.fits", overwrite=True)
fits.writeto("analysis_3d/psf_2D.fits", psf2D.data, overwrite=True)

# ## 2. Analysis using sherpha
#
# ### Read the maps and store them in a sherpa model
#
# We now have the prepared files which sherpa can read.
# This part of the notebook shows how to do image analysis using sherpa

# In[ ]:

import sherpa.astro.ui as sh

sh.set_stat("cash")
sh.set_method("simplex")

sh.load_image("analysis_3d/counts_2D.fits")
sh.set_coord("logical")

sh.load_table_model("expo", "analysis_3d/exposure_2D.fits")
sh.load_table_model("bkg", "analysis_3d/background_2D.fits")
sh.load_psf("psf", "analysis_3d/psf_2D.fits")

# To speed up this tutorial, we change the fit optimazation method to Levenberg-Marquardt and fix a required tolerance. This can make the fitting less robust, and in practise, you can skip this step unless you understand what is going on.

# In[ ]:

sh.set_method("levmar")
sh.set_method_opt("xtol", 1e-5)
示例#48
0
 def test_wstat_calc_stat_info(self):
     ui.load_pha("stat", self.make_path("3c273.pi"))
     ui.set_source("stat", ui.powlaw1d.p1)
     ui.set_stat("wstat")
     ui.fit("stat")
     ui.get_stat_info()
示例#49
0
    def test_chi2(self):

        # Case 1: first ds has no error, second has, chi2-derived (chi2gehrels)
        # statistic. I expect stat.name to be chi2gehrels for ds1, chi2 for
        # ds2, chi2gehrels for ds1,2
        ui.load_data(1, self.data)
        ui.load_data(2, self.data, use_errors=True)

        ui.set_source(1, "gauss1d.g1")
        ui.set_source(2, "gauss1d.g1")

        ui.set_stat("chi2gehrels")

        si = ui.get_stat_info()

        stat1 = si[0].statname
        stat2 = si[1].statname
        stat12 = si[2].statname

        self.assertEqual('chi2gehrels', stat1)
        self.assertEqual('chi2', stat2)
        self.assertEqual('chi2gehrels', stat12)

        # Case 2: first ds has errors, second has not, chi2-derived
        # (chi2gehrels) statistic. I expect stat.name to be chi2 for ds1,
        # chi2gehrels for ds2, chi2gehrels for ds1,2
        ui.load_data(2, self.data)
        ui.load_data(1, self.data, use_errors=True)

        si = ui.get_stat_info()

        stat1 = si[0].statname
        stat2 = si[1].statname
        stat12 = si[2].statname

        self.assertEqual('chi2gehrels', stat2)
        self.assertEqual('chi2', stat1)
        self.assertEqual('chi2gehrels', stat12)

        # Case 3: both datasets have errors, chi2-derived (chi2gehrels)
        # statistic. I expect stat.name to be chi2 for all of them.
        ui.load_data(2, self.data, use_errors=True)
        ui.load_data(1, self.data, use_errors=True)

        si = ui.get_stat_info()

        stat1 = si[0].statname
        stat2 = si[1].statname
        stat12 = si[2].statname

        self.assertEqual('chi2', stat2)
        self.assertEqual('chi2', stat1)
        self.assertEqual('chi2', stat12)

        # Case 4: first ds has errors, second has not, LeastSq statistic
        # I expect stat.name to be leastsq for all of them.
        ui.load_data(2, self.data)
        ui.load_data(1, self.data, use_errors=True)

        ui.set_stat("leastsq")

        si = ui.get_stat_info()

        stat1 = si[0].statname
        stat2 = si[1].statname
        stat12 = si[2].statname

        self.assertEqual('leastsq', stat2)
        self.assertEqual('leastsq', stat1)
        self.assertEqual('leastsq', stat12)

        # Case 5: both ds have errors, LeastSq statistic
        # I expect stat.name to be leastsq for all of them.
        ui.load_data(2, self.data, use_errors=True)
        ui.load_data(1, self.data, use_errors=True)

        ui.set_stat("leastsq")

        si = ui.get_stat_info()

        stat1 = si[0].statname
        stat2 = si[1].statname
        stat12 = si[2].statname

        self.assertEqual('leastsq', stat2)
        self.assertEqual('leastsq', stat1)
        self.assertEqual('leastsq', stat12)

        # Case 6: first ds has errors, second has not, CStat statistic
        # I expect stat.name to be cstat for all of them.
        ui.load_data(2, self.data)
        ui.load_data(1, self.data, use_errors=True)

        ui.set_stat("cstat")

        si = ui.get_stat_info()

        stat1 = si[0].statname
        stat2 = si[1].statname
        stat12 = si[2].statname

        self.assertEqual('cstat', stat2)
        self.assertEqual('cstat', stat1)
        self.assertEqual('cstat', stat12)

        # Case7: select chi2 as statistic. One of the ds does not provide
        # errors. I expect sherpa to raise a StatErr exception.
        ui.set_stat('chi2')

        caught = False

        from sherpa.utils.err import StatErr
        try:
            ui.get_stat_info()
        except StatErr:
            caught = True

        self.assertTrue(caught, msg='StatErr was not caught')

        # Case8: select chi2 as statistic. Both datasets provide errors
        # I expect stat to be 'chi2'
        ui.load_data(2, self.data, use_errors=True)
        si = ui.get_stat_info()

        stat1 = si[0].statname
        stat2 = si[1].statname
        stat12 = si[2].statname

        self.assertEqual('chi2', stat2)
        self.assertEqual('chi2', stat1)
        self.assertEqual('chi2', stat12)
示例#50
0
def plaw_fit(alpha_sim):

    tmpdir = tempfile.mkdtemp()
    curdir = os.getcwd()
    os.chdir(tmpdir)

    bms = BetaModelSource()
    ds = bms.ds

    def _hard_emission(field, data):
        return YTQuantity(1.0e-18, "s**-1*keV**-1")*data["density"]*data["cell_volume"]/mp
    ds.add_field(("gas", "hard_emission"), function=_hard_emission, units="keV**-1*s**-1")

    nH_sim = 0.02
    abs_model = WabsModel(nH_sim)

    A = YTQuantity(2000., "cm**2")
    exp_time = YTQuantity(2.0e5, "s")
    redshift = 0.01

    sphere = ds.sphere("c", (100.,"kpc"))

    plaw_model = PowerLawSourceModel(1.0, 0.01, 11.0, "hard_emission", 
                                     alpha_sim, prng=prng)

    photons = PhotonList.from_data_source(sphere, redshift, A, exp_time,
                                          plaw_model)

    D_A = photons.parameters["FiducialAngularDiameterDistance"]
    dist_fac = 1.0/(4.*np.pi*D_A*D_A*(1.+redshift)**3).in_cgs()
    norm_sim = float((sphere["hard_emission"]).sum()*dist_fac.in_cgs())*(1.+redshift)

    events = photons.project_photons("z", absorb_model=abs_model,
                                     prng=bms.prng,
                                     no_shifting=True)
    events = ACIS_I(events, rebin=False, convolve_psf=False, prng=bms.prng)
    events.write_spectrum("plaw_model_evt.pi", clobber=True)

    os.system("cp %s ." % events.parameters["ARF"])
    os.system("cp %s ." % events.parameters["RMF"])

    load_user_model(mymodel, "wplaw")
    add_user_pars("wplaw", ["nH", "norm", "redshift", "alpha"],
                  [0.01, norm_sim*1.1, redshift, 0.9], 
                  parmins=[0.0, 0.0, 0.0, 0.1],
                  parmaxs=[10.0, 1.0e9, 10.0, 10.0],
                  parfrozen=[False, False, True, False])

    load_pha("plaw_model_evt.pi")
    set_stat("cstat")
    set_method("simplex")
    ignore(":0.6, 7.0:")
    set_model("wplaw")
    fit()
    set_covar_opt("sigma", 1.645)
    covar()
    res = get_covar_results()

    assert np.abs(res.parvals[0]-nH_sim) < res.parmaxes[0]
    assert np.abs(res.parvals[1]-norm_sim) < res.parmaxes[1]
    assert np.abs(res.parvals[2]-alpha_sim) < res.parmaxes[2]

    os.chdir(curdir)
    shutil.rmtree(tmpdir)
示例#51
0
    def simulate_null_images(img_file: str,
                             psf_file: str,
                             n_null_sims: int,
                             no_core: bool = False,
                             mcmciter: int = 5000,
                             **kwargs) -> None:
        """
        Simulates a specified number of baseline images for a given input observation and a psf file

        :param img_file: Path to the input image file
        :param psf_file: Path to the psf image file
        :param n_null_sims: Number of baseline replicates to be simulated
        :param no_core: Setting this to True will only generate baseline replicates with a flat background while the default value includes a point source at the location of the core
        :param mcmciter: The number of MCMC samples to draw for simulating the baselines
        """
        print("Creating the null file")
        clean()
        set_stat("cstat")
        set_method("simplex")
        load_image(img_file)
        load_psf("mypsf", psf_file)
        set_psf(mypsf)

        if no_core:
            set_model(const2d.c0)
            set_par(c0.c0, min=0)
        else:
            set_model(gauss2d.q1 + const2d.c0)
            set_par(c0.c0, min=0)
            # set_par(q1.fwhm,max=0.5)
            guess(q1)
        fit()
        results = get_fit_results()
        save("core_source_fit.save", clobber=True)
        save_source("null_q1_c1.fits", clobber=True)
        covar()

        if no_core:
            for i in range(n_null_sims):
                fake()
                save_image("sim_null_{}.fits".format(i), clobber=True)
            clean()
            return

        normgauss1d.g1
        g1.pos = q1.fwhm
        g1.fwhm = get_covar_results().parmaxes[0]

        # check if there is a valid upper bound.
        print(get_covar_results())
        if (get_covar_results().parmaxes[0] is None
                or get_covar_results().parmins[1] is None
                or get_covar_results().parmins[0] is None):
            for i in range(n_null_sims):
                fake()
                save_image("sim_null_{}.fits".format(i), clobber=True)
            clean()
            return
        # if not go for the regular
        set_prior(q1.fwhm, g1)
        set_sampler_opt("defaultprior", False)
        set_sampler_opt("priorshape", [True, False, False, False, False])
        set_sampler_opt("originalscale", [True, True, True, True, True])
        if mcmciter < n_null_sims * 100:
            mcmciter = n_null_sims * 100

        # the following code throws an error sometimes #bug
        try:
            stats, accept, params = get_draws(1, niter=mcmciter)
        except:
            params = [np.repeat(q1.fwhm.val, mcmciter)]

        # print('Simulating the null files')
        for i in range(n_null_sims):
            set_par(q1.fwhm, params[0][(i + 1) * 100 - 1])
            fake()
            save_image("sim_null_{}.fits".format(i), clobber=True)
        save_all(outfile="lira_input_baseline_sim.log", clobber=True)
        clean()
示例#52
0
def plaw_fit(alpha_sim):

    tmpdir = tempfile.mkdtemp()
    curdir = os.getcwd()
    os.chdir(tmpdir)

    nH_sim = 0.02
    norm_sim = 1.0e-4
    redshift = 0.01

    exp_time = 5.0e4
    area = 40000.0
    inst_name = "hdxi"

    spec = Spectrum.from_powerlaw(alpha_sim, redshift, norm_sim)
    spec.apply_foreground_absorption(nH_sim)
    e = spec.generate_energies(exp_time, area)

    pt_src = PointSourceModel(30.0, 45.0, e.size)

    write_photon_list("plaw_model",
                      "plaw_model",
                      e.flux,
                      pt_src.ra,
                      pt_src.dec,
                      e,
                      clobber=True)

    instrument_simulator("plaw_model_simput.fits",
                         "plaw_model_evt.fits",
                         exp_time,
                         inst_name, [30.0, 45.0],
                         astro_bkgnd=None,
                         instr_bkgnd_scale=0.0)

    inst = get_instrument_from_registry(inst_name)
    arf = AuxiliaryResponseFile(inst["arf"])
    rmf = RedistributionMatrixFile(inst["rmf"])
    os.system("cp %s ." % arf.filename)
    os.system("cp %s ." % rmf.filename)

    write_spectrum("plaw_model_evt.fits", "plaw_model_evt.pha", clobber=True)

    load_user_model(mymodel, "wplaw")
    add_user_pars("wplaw", ["nH", "norm", "redshift", "alpha"],
                  [0.01, norm_sim * 0.8, redshift, 0.9],
                  parmins=[0.0, 0.0, 0.0, 0.1],
                  parmaxs=[10.0, 1.0e9, 10.0, 10.0],
                  parfrozen=[False, False, True, False])

    load_pha("plaw_model_evt.pha")
    set_stat("cstat")
    set_method("simplex")
    ignore(":0.5, 9.0:")
    set_model("wplaw")
    fit()
    set_covar_opt("sigma", 1.645)
    covar()
    res = get_covar_results()

    assert np.abs(res.parvals[0] - nH_sim) < res.parmaxes[0]
    assert np.abs(res.parvals[1] - norm_sim) < res.parmaxes[1]
    assert np.abs(res.parvals[2] - alpha_sim) < res.parmaxes[2]

    os.chdir(curdir)
    shutil.rmtree(tmpdir)
示例#53
0
dep.set_par('xswabs.nh', 0.0255)
dep.freeze('xswabs.nh')

# Initialize abundance to 0.5 and thaw
dep.set_par('xsmekal.abundanc', 0.5)
dep.thaw('xsmekal.abundanc')

# Set redshift
dep.set_par('xsmekal.redshift', redshift)

# Do the initial onion-peeling fit with
#   Levenberg-Marquardt optimization method
#   XSPEC variance with a Chi^2 fit statistic
#
set_method("levmar")
set_stat("chi2xspecvar")

dep.guess()

# Display the central annulus (using Sherpa routines)
set_xlog()
plot_fit(0)
plt.savefig('m87_ann0_guess.png')

onion = dep.fit()

# Display the central annulus (using Sherpa routines)
plot_fit_delchi(0)
plt.savefig('m87_ann0_fit.png')

# Create basic plots
示例#54
0
文件: qq.py 项目: neobar/BXA
def qq_export(id=None, bkg=False, outfile='qq.txt', elow=None, ehigh=None):
    """
	Export Q-Q plot into a file for plotting.

	:param id: spectrum id to use (see get_bkg_plot/get_data_plot)
	:param bkg: whether to use get_bkg_plot or get_data_plot
	:param outfile: filename to write results into
	:param elow: low energy limit
	:param ehigh: low energy limit

	Example::

		qq.qq_export('bg', outfile='my_bg_qq', elow=0.2, ehigh=10)
	"""
    # data
    d = ui.get_bkg_plot(id=id) if bkg else ui.get_data_plot(id=id)
    e = d.x
    mask = logical_and(e >= elow, e <= ehigh)
    data = d.y[mask].cumsum()
    d = ui.get_bkg_model_plot(id=id) if bkg else ui.get_model_plot(id=id)
    e = d.xlo
    mask = logical_and(e >= elow, e <= ehigh)
    e = e[mask]
    model = d.y[mask].cumsum()
    last_stat = ui.get_stat()
    ui.set_stat(ksstat)
    ks = ui.calc_stat()
    ui.set_stat(cvmstat)
    cvm = ui.calc_stat()
    ui.set_stat(adstat)
    ad = ui.calc_stat()
    ui.set_stat(last_stat)
    ad = ui.calc_stat()

    ui.set_stat('chi2gehrels')
    chi2 = ui.calc_stat()
    ui.set_stat('cstat')
    cstat = ui.calc_stat()
    ui.set_stat(last_stat)
    stats = dict(ks=ks, cvm=cvm, ad=ad, cstat=cstat, chi2=chi2)

    numpy.savetxt(outfile, numpy.transpose([e, data, model]))
    json.dump(stats, open(outfile + '.json', 'w'), indent=4)
示例#55
0
def load_chi2asym_stat():
    """"Load and set the chi2asym statistic"""
    import sherpa.astro.ui as sau
    sau.load_user_stat("chi2asym", chi2asym_stat_func, chi2asym_err_func)
    sau.set_stat(chi2asym)
示例#56
0
def do_beta_model(source, v_field, em_field, axis="z", 
                  prng=None):

    tmpdir = tempfile.mkdtemp()
    curdir = os.getcwd()
    os.chdir(tmpdir)

    if prng is None:
        prng = source.prng

    ds = source.ds

    A = 30000.
    exp_time = 1.0e4
    redshift = 0.05
    nH_sim = 0.02

    sphere = ds.sphere("c", (0.5, "Mpc"))

    kT_sim = source.kT
    Z_sim = source.Z

    thermal_model = ThermalSourceModel("apec", 0.1, 11.5, 20000,
                                       Zmet=Z_sim, prng=prng)
    photons = PhotonList.from_data_source(sphere, redshift, A, exp_time,
                                          thermal_model)

    D_A = photons.parameters["fid_d_a"]

    norm_sim = sphere.quantities.total_quantity(em_field)
    norm_sim *= 1.0e-14/(4*np.pi*D_A*D_A*(1.+redshift)*(1.+redshift))
    norm_sim = float(norm_sim.in_cgs())

    v1, v2 = sphere.quantities.weighted_variance(v_field, em_field)

    if isinstance(axis, string_types):
        if axis == "z":
            fac = 1.0
        else:
            fac = 0.0
    else:
        axis /= np.sqrt(np.dot(axis, axis))
        fac = np.dot(axis, [0.0, 0.0, 1.0])

    sigma_sim = fac*float(v1.in_units("km/s"))
    mu_sim = -fac*float(v2.in_units("km/s"))

    events = photons.project_photons(axis, [30.0, 45.0], absorb_model="tbabs",
                                     nH=nH_sim, prng=prng)

    events.write_simput_file("beta_model", overwrite=True)

    instrument_simulator("beta_model_simput.fits", "beta_model_evt.fits",
                         exp_time, "mucal", [30.0, 45.0],
                         overwrite=True, foreground=False, ptsrc_bkgnd=False,
                         instr_bkgnd=False, 
                         prng=prng)

    write_spectrum("beta_model_evt.fits", "beta_model_evt.pi", overwrite=True)

    os.system("cp %s %s ." % (arf.filename, rmf.filename))

    load_user_model(mymodel, "tbapec")
    add_user_pars("tbapec", ["nH", "kT", "metallicity", "redshift", "norm", "velocity"],
                  [0.02, 4.0, 0.2, 0.04, norm_sim*0.8, 300.0],
                  parmins=[0.0, 0.1, 0.0, -200.0, 0.0, 0.0],
                  parmaxs=[10.0, 20.0, 10.0, 200.0, 1.0e9, 20000.0],
                  parfrozen=[True, False, False, False, False, False])

    load_pha("beta_model_evt.pi")
    set_stat("cstat")
    set_method("levmar")
    ignore(":0.6, 8.0:")
    set_model("tbapec")
    fit()
    res = get_fit_results()

    redshift_sim = (1.0+mu_sim/ckms)*(1.0+redshift) - 1.0

    assert np.abs(res.parvals[0]-kT_sim)/kT_sim < 0.05
    assert np.abs(res.parvals[1]-Z_sim)/Z_sim < 0.05
    assert np.abs(res.parvals[2]-redshift_sim)/redshift_sim < 0.05
    assert np.abs(res.parvals[3]-norm_sim) < 0.05
    assert np.abs(res.parvals[4]-sigma_sim) < 30.0

    os.chdir(curdir)
    shutil.rmtree(tmpdir)
示例#57
0
 def test_wstat_calc_stat_info(self):
     ui.load_pha("stat", self.make_path("3c273.pi"))
     ui.set_source("stat", ui.powlaw1d.p1)
     ui.set_stat("wstat")
     ui.fit("stat")
     ui.get_stat_info()
示例#58
0
def test_vapec_beta_model():

    bms = BetaModelSource()

    tmpdir = tempfile.mkdtemp()
    curdir = os.getcwd()
    os.chdir(tmpdir)

    prng = 45

    ds = bms.ds

    A = 30000.
    exp_time = 1.0e4
    redshift = 0.05
    nH_sim = 0.02

    sphere = ds.sphere("c", (0.5, "Mpc"))

    kT_sim = bms.kT
    Z_sim = bms.Z
    O_sim = bms.O
    Ca_sim = bms.Ca

    var_elem = {"O": ("stream", "oxygen"),
                "Ca": ("stream", "calcium")}

    thermal_model = ThermalSourceModel("apec", 0.1, 11.5, 20000,
                                       var_elem=var_elem,
                                       Zmet=("gas","metallicity"), 
                                       prng=prng)

    photons = PhotonList.from_data_source(sphere, redshift, A, exp_time,
                                          thermal_model)

    D_A = photons.parameters["fid_d_a"]

    norm_sim = sphere.quantities.total_quantity("emission_measure")
    norm_sim *= 1.0e-14/(4*np.pi*D_A*D_A*(1.+redshift)*(1.+redshift))
    norm_sim = float(norm_sim.in_cgs())

    events = photons.project_photons("z", [30.0, 45.0], absorb_model="tbabs",
                                     nH=nH_sim, prng=prng, no_shifting=True)

    new_events = Lynx_Calorimeter(events, prng=prng)

    os.system("cp %s %s ." % (arf.filename, rmf.filename))

    new_events.write_channel_spectrum("var_abund_beta_model_evt.pha", overwrite=True)

    load_user_model(mymodel_var, "tbapec")
    add_user_pars("tbapec", ["nH", "kT", "abund", "redshift", "norm", "O", "Ca"],
                  [nH_sim, 4.0, Z_sim, redshift, norm_sim*0.8, 0.3, 0.5],
                  parmins=[0.0, 0.1, 0.0, -20.0, 0.0, 0.0, 0.0],
                  parmaxs=[10.0, 20.0, 10.0, 20.0, 1.0e9, 10.0, 10.0],
                  parfrozen=[True, False, True, True, False, False, False])

    load_pha("var_abund_beta_model_evt.pha")
    set_stat("cstat")
    set_method("levmar")
    ignore(":0.6, 8.0:")
    set_model("tbapec")
    fit()
    res = get_fit_results()

    assert np.abs(res.parvals[0]-kT_sim)/kT_sim < 0.05
    assert np.abs(res.parvals[1]-norm_sim)/norm_sim < 0.05
    assert np.abs(res.parvals[2]-O_sim)/O_sim < 0.05
    assert np.abs(res.parvals[3]-Ca_sim)/Ca_sim < 0.15

    os.chdir(curdir)
    shutil.rmtree(tmpdir)
示例#59
0
def test_pileup_model(make_data_path, clean_astro_ui):
    """Basic check of setting a pileup model.

    It is more to check we can set a pileup model, not to
    check the model works.
    """

    infile = make_data_path('3c273.pi')
    ui.load_pha('pileup', infile)
    ui.subtract('pileup')
    ui.notice(0.3, 7)

    ui.set_stat('chi2datavar')

    # pick xswabs as it is unlikely to change with XSPEC
    ui.set_source('pileup', ui.xswabs.amdl * ui.powlaw1d.pl)

    # get close to the best fit, but don't need to fit
    pl.ampl = 1.82e-4
    pl.gamma = 1.97
    amdl.nh = 0.012

    stat0 = ui.calc_stat('pileup')

    # We want to compare the data to the pileup model,
    # which should be run with the higher-energy bins included,
    # but that's not relevant here where I am just
    # checking the statistic value.

    ui.set_pileup_model('pileup', ui.jdpileup.jdp)

    # pick some values to make the model change the data
    jdp.ftime = 3.2
    jdp.fracexp = 1
    jdp.alpha = 0.95
    jdp.f = 0.91

    # Check pileup is added to get_model
    #
    mlines = str(ui.get_model('pileup')).split('\n')
    assert mlines[0] == 'apply_rmf(jdpileup.jdp((xswabs.amdl * powlaw1d.pl)))'
    assert mlines[3].strip() == 'jdp.alpha    thawed         0.95            0            1'
    assert mlines[5].strip() == 'jdp.f        thawed         0.91          0.9            1'
    assert mlines[11].strip() == 'pl.gamma     thawed         1.97          -10           10'

    # Ensure that the statistic has got worse (technically
    # it coud get better, but not for this case).
    #
    stat1 = ui.calc_stat('pileup')
    assert stat1 > stat0

    # As a test, check the actual statistic values
    # (evaluated with XSPEC 12.11.0 and Sherpa with the
    # master branch 2020/07/29, on Linux).
    #
    assert stat0 == pytest.approx(35.99899827358692)
    assert stat1 == pytest.approx(36.58791181460404)

    # Can we remove the pileup model?
    #
    ui.delete_pileup_model('pileup')

    # Check pileup is not in get_model
    #
    mlines = str(ui.get_model('pileup')).split('\n')

    # Numeric display depends on Python and/or NumPy
    # for the exposure time. I should have set the exposure
    # time to an integer value.
    #
    # assert mlines[0] == 'apply_rmf(apply_arf((38564.608926889 * (xswabs.amdl * powlaw1d.pl))))'

    toks = mlines[0].split()
    assert len(toks) == 5
    assert toks[0].startswith('apply_rmf(apply_arf((38564.608')
    assert toks[1] == '*'
    assert toks[2] == '(xswabs.amdl'
    assert toks[3] == '*'
    assert toks[4] == 'powlaw1d.pl))))'

    assert mlines[4].strip() == 'pl.gamma     thawed         1.97          -10           10'

    stat2 = ui.calc_stat('pileup')
    assert stat2 == stat0
示例#60
0
def plaw_fit(alpha_sim, prng=None):

    tmpdir = tempfile.mkdtemp()
    curdir = os.getcwd()
    os.chdir(tmpdir)

    bms = BetaModelSource()
    ds = bms.ds

    if prng is None:
        prng = bms.prng

    def _hard_emission(field, data):
        return YTQuantity(
            1.0e-18,
            "s**-1*keV**-1") * data["density"] * data["cell_volume"] / mp

    ds.add_field(("gas", "hard_emission"),
                 function=_hard_emission,
                 units="keV**-1*s**-1")

    nH_sim = 0.02

    A = YTQuantity(2000., "cm**2")
    exp_time = YTQuantity(2.0e5, "s")
    redshift = 0.01

    sphere = ds.sphere("c", (100., "kpc"))

    plaw_model = PowerLawSourceModel(1.0,
                                     0.01,
                                     11.0,
                                     "hard_emission",
                                     alpha_sim,
                                     prng=prng)

    photons = PhotonList.from_data_source(sphere, redshift, A, exp_time,
                                          plaw_model)

    D_A = photons.parameters["fid_d_a"]
    dist_fac = 1.0 / (4. * np.pi * D_A * D_A * (1. + redshift)**3).in_cgs()
    norm_sim = float(
        (sphere["hard_emission"]).sum() * dist_fac.in_cgs()) * (1. + redshift)

    events = photons.project_photons("z", [30., 45.],
                                     absorb_model="wabs",
                                     nH=nH_sim,
                                     prng=bms.prng,
                                     no_shifting=True)

    events.write_simput_file("plaw", overwrite=True)

    instrument_simulator("plaw_simput.fits",
                         "plaw_evt.fits",
                         exp_time,
                         "sq_acisi_cy20", [30.0, 45.0],
                         overwrite=True,
                         foreground=False,
                         ptsrc_bkgnd=False,
                         instr_bkgnd=False,
                         prng=prng)

    write_spectrum("plaw_evt.fits", "plaw_model_evt.pi", overwrite=True)

    os.system("cp %s %s ." % (arf.filename, rmf.filename))

    load_user_model(mymodel, "wplaw")
    add_user_pars("wplaw", ["nH", "norm", "redshift", "alpha"],
                  [0.01, norm_sim * 1.1, redshift, 0.9],
                  parmins=[0.0, 0.0, 0.0, 0.1],
                  parmaxs=[10.0, 1.0e9, 10.0, 10.0],
                  parfrozen=[False, False, True, False])

    load_pha("plaw_model_evt.pi")
    set_stat("cstat")
    set_method("simplex")
    ignore(":0.6, 7.0:")
    set_model("wplaw")
    fit()
    res = get_fit_results()

    assert np.abs(res.parvals[0] - nH_sim) / nH_sim < 0.1
    assert np.abs(res.parvals[1] - norm_sim) / norm_sim < 0.05
    assert np.abs(res.parvals[2] - alpha_sim) / alpha_sim < 0.05

    os.chdir(curdir)
    shutil.rmtree(tmpdir)