Пример #1
0
def test_eqwith_err1(make_data_path, restore_xspec_settings):

    def check1(e0, e1, e2):
        assert e0 == pytest.approx(0.028335201547206704, rel=1.0e-3)
        assert e1 == pytest.approx(-0.00744118799274448756, rel=1.0e-3)
        assert e2 == pytest.approx(0.0706249544851336, rel=1.0e-3)

    ui.set_xsabund('angr')
    ui.set_xsxsect('bcmc')

    ui.load_pha(make_data_path('3c273.pi'))
    ui.notice(0.5, 7.0)
    ui.set_stat("chi2datavar")
    ui.set_method("simplex")
    ui.set_model('powlaw1d.p1+gauss1d.g1')
    g1.fwhm = 0.1
    g1.pos = 2.0
    ui.freeze(g1.pos, g1.fwhm)
    ui.fit()

    np.random.seed(2345)
    e = ui.eqwidth(p1, p1 + g1, error=True, niter=100)
    check1(e[0], e[1], e[2])
    params = e[3]

    np.random.seed(2345)
    e = ui.eqwidth(p1, p1 + g1, error=True, params=params, niter=100)
    check1(e[0], e[1], e[2])

    parvals = ui.get_fit_results().parvals
    assert parvals[0] == pytest.approx(1.9055272902160334, rel=1.0e-3)
    assert parvals[1] == pytest.approx(0.00017387966749772638, rel=1.0e-3)
    assert parvals[2] == pytest.approx(1.279415076070516e-05, rel=1.0e-3)
Пример #2
0
    def test_ARFModelPHA(self):
        from sherpa.astro import ui
        ui.load_pha(self.make_path("3c120_meg_1.pha"))

        # remove the RMF to ensure this is an ARF-only analysis
        # (which is what is needed to trigger the bug that lead to #699)
        ui.get_data().set_rmf(None)

        ui.group_counts(20)
        ui.notice(0.5, 6)
        ui.subtract()
        ui.set_model(ui.xsphabs.abs1 * (ui.xsapec.bubble + ui.powlaw1d.p1))
        ui.set_xsabund('angr')
        ui.set_xsxsect('vern')
        abs1.nh = 0.163
        abs1.nh.freeze()
        p1.ampl = 0.017
        p1.gamma = 1.9
        bubble.kt = 0.5
        bubble.norm = 4.2e-5
        tol = 1.0e-2
        ui.set_method_opt('ftol', tol)
        ui.fit()
        result = ui.get_fit_results()
        assert result.numpoints == self._fit_using_ARFModelPHA['numpoints']
        assert result.dof == self._fit_using_ARFModelPHA['dof']
Пример #3
0
def test_eqwith_err1(make_data_path, restore_xspec_settings):

    def check1(e0, e1, e2):
        assert e0 == approx(0.028335201547206704, rel=1.0e-3)
        assert e1 == approx(-0.00744118799274448756, rel=1.0e-3)
        assert e2 == approx(0.0706249544851336, rel=1.0e-3)

    ui.set_xsabund('angr')
    ui.set_xsxsect('bcmc')

    ui.load_pha(make_data_path('3c273.pi'))
    ui.notice(0.5, 7.0)
    ui.set_stat("chi2datavar")
    ui.set_method("simplex")
    ui.set_model('powlaw1d.p1+gauss1d.g1')
    g1.fwhm = 0.1
    g1.pos = 2.0
    ui.freeze(g1.pos, g1.fwhm)
    ui.fit()

    numpy.random.seed(2345)
    e = ui.eqwidth(p1, p1 + g1, error=True, niter=100)
    check1(e[0], e[1], e[2])
    params = e[3]

    numpy.random.seed(2345)
    e = ui.eqwidth(p1, p1 + g1, error=True, params=params, niter=100)
    check1(e[0], e[1], e[2])

    parvals = ui.get_fit_results().parvals
    assert parvals[0] == approx(1.9055272902160334, rel=1.0e-3)
    assert parvals[1] == approx(0.00017387966749772638, rel=1.0e-3)
    assert parvals[2] == approx(1.279415076070516e-05, rel=1.0e-3)
Пример #4
0
    def fit(self, conf = False):


        ui.ignore(None, None)
        ui.notice(self.start, self.stop)
        self.set_source()
        ui.fit(1)
        if conf:
            ui.conf()
        res = ui.get_fit_results()

        for line in (self.H2lines + self.nonH2lines):
            sourcename = line['source'].split('.')[1]
            print sourcename
            for p in ['pos', 'fwhm', 'ampl']:
                n = '{0}.{1}'.format(sourcename, p)
                _place_val(line, p, ui.get_par(n).val)

        self.const = ui.get_par('c1.c0').val
        self.redchi2 = res.rstat


        if conf:
            res = ui.get_conf_results()
            for line in (self.H2lines + self.nonH2lines):
                sourcename = line['source'].split('.')[1]
                for p in ['pos', 'fwhm', 'ampl']:
                    n = '{0}.{1}'.format(sourcename, p)
                    parmin, parmax = _parminmax(res, n)
                    line[p+'_max'] = parmax
                    line[p+'_min'] = parmin
            # deal with error on const
            parmin, parmax = _parminmax(res, 'c1.c0')
            self.const_min = parmin
            self.const_max = parmax
Пример #5
0
def test_filter_notice_bad_361(make_data_path):
    """Test out issue 361: notice then ignore bad.

    Since the data is grouped, the ignore_bad call is expected to
    drop the filter expression, with a warning message.
    """

    logger = logging.getLogger('sherpa')
    hdlr = MockLoggingHandler(level='WARNING')
    logger.addHandler(hdlr)

    stats = setup_model(make_data_path)

    ui.notice(0.5, 8.0)
    ui.ignore_bad()
    s1 = ui.calc_stat()
    assert s1 == pytest.approx(stats['bad'])

    msgs = hdlr.messages
    assert msgs['warning'] == [
        'filtering grouped data with quality ' +
        'flags, previous filters deleted'
    ]
    for k in ['debug', 'info', 'error', 'critical']:
        assert msgs[k] == []
Пример #6
0
def test_filter_notice_bad_361(make_data_path, caplog):
    """Test out issue 361: notice then ignore bad.

    Since the data is grouped, the ignore_bad call is expected to
    drop the filter expression, with a warning message.
    """

    stats = setup_model(make_data_path)

    # We don't care about these warnings, so I could just
    # store the number and check that we get an extra one
    # below, but use this as a canary to check when the
    # system changes.
    #
    assert len(caplog.records) == 5

    ui.notice(0.5, 8.0)
    with caplog.at_level(logging.INFO, logger='sherpa'):
        ui.ignore_bad()

    assert len(caplog.records) == 6

    lname, lvl, msg = caplog.record_tuples[5]
    assert lname == 'sherpa.astro.data'
    assert lvl == logging.WARNING
    assert msg == 'filtering grouped data with quality ' + \
        'flags, previous filters deleted'

    s1 = ui.calc_stat()
    assert s1 == pytest.approx(stats['bad'])
Пример #7
0
def test_plot_pvalue(make_data_path, clean_astro_ui, hide_log_output):

    fname = make_data_path("qso.pi")
    ui.load_pha(fname)

    ui.set_stat('cstat')
    ui.set_method("neldermead")

    ui.group_counts(10)
    ui.notice(0.3, 8)

    ui.set_model("xsphabs.abs1*xspowerlaw.p1")
    ui.set_model("abs1*(p1+gauss1d.g1)")

    # move the fit close to the best fit to save a small amount
    # of time.
    abs1.nh = 0.05
    p1.phoindex = 1.28
    p1.norm = 2e-4
    g1.ampl = 1.8e-5

    g1.pos = 3.
    ui.freeze(g1.pos)
    g1.fwhm = 0.1
    ui.freeze(g1.fwhm)

    ui.fit()
    ui.plot_pvalue(p1, p1 + g1, num=100)

    tmp = ui.get_pvalue_results()

    assert tmp.null == pytest.approx(210.34566845619273)
    assert tmp.alt == pytest.approx(207.66618095925094)
    assert tmp.lr == pytest.approx(2.679487496941789)
Пример #8
0
def test_load_multi_arfsrmfs(make_data_path, clean_astro_ui):
    """Added in #728 to ensure cache parameter is sent along by
    MultiResponseSumModel (fix #717).

    This has since been simplified to switch from xsapec to
    powlaw1d as it drops the need for XSPEC and is a simpler
    model, so is less affected by changes in the model code.

    A fit of the Sherpa powerlaw-model to 3c273.pi with a
    single response in CIAO 4.11 (background subtracted,
    0.5-7 keV) returns gamma = 1.9298, ampl = 1.73862e-4
    so doubling the response should halve the amplitude but
    leave the gamma value the same when using two responses,
    as below. This is with chi2datavar.
    """

    pha_pi = make_data_path("3c273.pi")
    ui.load_pha(1, pha_pi)
    ui.load_pha(2, pha_pi)

    arf = make_data_path("3c273.arf")
    rmf = make_data_path("3c273.rmf")

    ui.load_multi_arfs(1, [arf, arf], [1, 2])
    ui.load_multi_arfs(2, [arf, arf], [1, 2])

    ui.load_multi_rmfs(1, [rmf, rmf], [1, 2])
    ui.load_multi_rmfs(2, [rmf, rmf], [1, 2])

    ui.notice(0.5, 7)
    ui.subtract(1)
    ui.subtract(2)

    src = ui.create_model_component('powlaw1d', 'src')
    ui.set_model(1, src)
    ui.set_model(2, src)

    # ensure the test is repeatable by running with a known
    # statistic and method
    #
    ui.set_method('levmar')
    ui.set_stat('chi2datavar')

    # Really what we care about for fixing #717 is that
    # fit does not error out, but it's useful to know that
    # the fit has changed the parameter values (which were
    # both 1 before the fit).
    #
    ui.fit()
    fr = ui.get_fit_results()
    assert fr.succeeded
    assert fr.datasets == (1, 2)

    assert src.gamma.val == pytest.approx(1.9298, rel=1.0e-4)
    assert src.ampl.val == pytest.approx(1.73862e-4 / 2, rel=1.0e-4)
Пример #9
0
def basic_pha1(make_data_path):
    """Create a basic PHA-1 data set/setup"""

    ui.set_default_id('tst')
    ui.load_pha(make_data_path('3c273.pi'))
    ui.subtract()
    ui.notice(0.5, 7)
    ui.set_source(ui.powlaw1d.pl)
    pl = ui.get_model_component('pl')
    pl.gamma = 1.93
    pl.ampl = 1.74e-4
Пример #10
0
def fit_sherpa():
    sh.load_data(obs_path)

    sh.set_source("powlaw1d.model")
    model.ref = 1e9  # 1 TeV = 1e9 keV
    model.ampl = 1.23e-20  # in cm**-2 s**-1 keV**-1
    model.gamma = 2.8

    sh.set_stat("cash")
    sh.notice(energy_range[0].to("keV").value, energy_range[1].to("keV").value)
    sh.fit()
    print(sh.get_fit_results())
Пример #11
0
def test_filter_bad_notice_361(make_data_path):
    """Test out issue 361: ignore bad then notice.

    This is expected to fail with NumPy version 1.13 and should cause
    a DeprecationWarning with earlier versions (I do not know when
    the warning was added).
    """

    stats = setup_model(make_data_path)

    ui.ignore_bad()
    ui.notice(0.5, 8.0)
    s1 = ui.calc_stat()
    assert s1 == pytest.approx(stats['0.5-8.0'])
Пример #12
0
def test_filter_bad_grouped(make_data_path, clean_astro_ui):
    """Check behavior when the data is grouped.

    This is a test of the current behavior, to check that
    values still hold. It may be necessary to change this
    test if we change the quality handling.
    """

    infile = make_data_path('q1127_src1_grp30.pi')
    ui.load_pha(infile)
    pha = ui.get_data()
    assert pha.quality_filter is None
    assert pha.mask is True

    assert ui.get_dep().shape == (439, )
    assert pha.quality_filter is None
    assert pha.mask is True

    # The last group is marked as quality=2 and so calling
    # ignore_bad means we lose that group.
    #
    ui.ignore_bad()
    assert ui.get_dep().shape == (438, )
    assert pha.mask is True

    expected = np.ones(1024, dtype=bool)
    expected[996:1025] = False
    assert pha.quality_filter == pytest.approx(expected)

    # What happens when we filter the data? Unlike #1169
    # we do change the noticed range.
    #
    ui.notice(0.5, 7)
    assert pha.quality_filter == pytest.approx(expected)

    # The mask has been filtered to remove the bad channels
    # (this is grouped data)
    expected = np.ones(438, dtype=bool)
    expected[0:15] = False
    expected[410:438] = False
    assert pha.mask == pytest.approx(expected)

    expected = np.ones(996, dtype=bool)
    expected[0:34] = False
    expected[481:996] = False
    assert pha.get_mask() == pytest.approx(expected)
Пример #13
0
 def test_rsp(self):
     fname = self.make_path("qso.pi")
     ui.load_pha(fname)
     ui.set_stat("chi2xspecvar")
     ui.set_method("neldermead")
     ui.group_counts(10)
     ui.notice(0.3, 8)
     ui.set_model("xsphabs.abs1*xspowerlaw.p1")
     ui.set_model("abs1*(p1+gauss1d.g1)")
     g1.pos = 3.
     ui.freeze(g1.pos)
     g1.fwhm = 0.1
     ui.freeze(g1.fwhm)
     ui.set_stat('cstat')
     ui.fit()
     ui.plot_pvalue(p1, p1 + g1, num=100)
     tmp = ui.get_pvalue_results()
     expected = [210.34566845619273, 207.66618095925094, 2.679487496941789]
     self.compare_results(expected, [tmp.null, tmp.alt, tmp.lr])
Пример #14
0
def test_filter_bad_ungrouped(make_data_path, clean_astro_ui):
    """Check behavior when the data is ungrouped.

    This is a test of the current behavior, to check that
    values still hold. It may be necessary to change this
    test if we change the quality handling.
    """

    infile = make_data_path('q1127_src1_grp30.pi')
    ui.load_pha(infile)
    pha = ui.get_data()
    assert pha.quality_filter is None
    assert pha.mask is True

    assert ui.get_dep().shape == (439, )
    ui.ungroup()
    assert ui.get_dep().shape == (1024, )
    assert pha.quality_filter is None
    assert pha.mask is True

    ui.ignore_bad()
    assert ui.get_dep().shape == (1024, )
    assert pha.quality_filter is None

    expected = np.ones(1024, dtype=bool)
    expected[996:1025] = False
    assert pha.mask == pytest.approx(expected)

    # At this point we've changed the mask array so Sherpa thinks
    # we've applied a filter, so a notice is not going to change
    # anything. See issue #1169
    #
    ui.notice(0.5, 7)
    assert pha.mask == pytest.approx(expected)

    # We need to ignore to change the mask.
    #
    ui.ignore(None, 0.5)
    ui.ignore(7, None)
    expected[0:35] = False
    expected[479:1025] = False
    assert pha.mask == pytest.approx(expected)
Пример #15
0
def test_eqwith_err(make_data_path, restore_xspec_settings):

    def check(a0, a1, a2):
        assert a0 == approx(0.16443033244310976, rel=1e-3)
        assert a1 == approx(0.09205564216156815, rel=1e-3)
        assert a2 == approx(0.23933118287470895, rel=1e-3)

    ui.set_method('neldermead')
    ui.set_stat('cstat')
    ui.set_xsabund('angr')
    ui.set_xsxsect('bcmc')

    ui.load_data(make_data_path('12845.pi'))
    ui.notice(0.5, 7)

    ui.set_model("xsphabs.gal*xszphabs.zabs*(powlaw1d.p1+xszgauss.g1)")
    ui.set_par(gal.nh, 0.08)
    ui.freeze(gal)

    ui.set_par(zabs.redshift, 0.518)
    ui.set_par(g1.redshift, 0.518)
    ui.set_par(g1.Sigma, 0.01)
    ui.freeze(g1.Sigma)
    ui.set_par(g1.LineE, min=6.0, max=7.0)

    ui.fit()

    numpy.random.seed(12345)
    result = ui.eqwidth(p1, p1 + g1, error=True, niter=100)
    check(result[0], result[1], result[2])
    params = result[3]

    numpy.random.seed(12345)
    result = ui.eqwidth(p1, p1 + g1, error=True, params=params, niter=100)
    check(result[0], result[1], result[2])

    parvals = ui.get_fit_results().parvals
    assert parvals[0] == approx(0.6111340686157877, rel=1.0e-3)
    assert parvals[1] == approx(1.6409785803466297, rel=1.0e-3)
    assert parvals[2] == approx(8.960926761312153e-05, rel=1.0e-3)
    assert parvals[3] == approx(6.620017726014523, rel=1.0e-3)
    assert parvals[4] == approx(1.9279114810359657e-06, rel=1.0e-3)
Пример #16
0
def test_eqwith_err(make_data_path, restore_xspec_settings):

    def check(a0, a1, a2):
        assert a0 == pytest.approx(0.16443033244310976, rel=1e-3)
        assert a1 == pytest.approx(0.09205564216156815, rel=1e-3)
        assert a2 == pytest.approx(0.23933118287470895, rel=1e-3)

    ui.set_method('neldermead')
    ui.set_stat('cstat')
    ui.set_xsabund('angr')
    ui.set_xsxsect('bcmc')

    ui.load_data(make_data_path('12845.pi'))
    ui.notice(0.5, 7)

    ui.set_model("xsphabs.gal*xszphabs.zabs*(powlaw1d.p1+xszgauss.g1)")
    ui.set_par(gal.nh, 0.08)
    ui.freeze(gal)

    ui.set_par(zabs.redshift, 0.518)
    ui.set_par(g1.redshift, 0.518)
    ui.set_par(g1.Sigma, 0.01)
    ui.freeze(g1.Sigma)
    ui.set_par(g1.LineE, min=6.0, max=7.0)

    ui.fit()

    np.random.seed(12345)
    result = ui.eqwidth(p1, p1 + g1, error=True, niter=100)
    check(result[0], result[1], result[2])
    params = result[3]

    np.random.seed(12345)
    result = ui.eqwidth(p1, p1 + g1, error=True, params=params, niter=100)
    check(result[0], result[1], result[2])

    parvals = ui.get_fit_results().parvals
    assert parvals[0] == pytest.approx(0.6111340686157877, rel=1.0e-3)
    assert parvals[1] == pytest.approx(1.6409785803466297, rel=1.0e-3)
    assert parvals[2] == pytest.approx(8.960926761312153e-05, rel=1.0e-3)
    assert parvals[3] == pytest.approx(6.620017726014523, rel=1.0e-3)
    assert parvals[4] == pytest.approx(1.9279114810359657e-06, rel=1.0e-3)
Пример #17
0
def validate_xspec_result(l, h, npts, ndof, statval):
    """Check that the stat results match those from XSPEC.

    This assumes the first data set returned by get_stat_info
    should be used.
    """

    ui.notice(None, None)
    ui.ignore(None, l)
    ui.ignore(h, None)
    ui.ignore_bad()
    sinfo = ui.get_stat_info()[0]
    assert sinfo.numpoints == npts
    assert sinfo.dof == ndof

    # XSPEC displays results to ~ 2dp, so the tolerance
    # is quite forgiving here. Or I could use pyxspec to
    # calculate and display this.
    #
    assert_allclose(sinfo.statval, statval, rtol=0, atol=0.005)
Пример #18
0
def validate_xspec_result(l, h, npts, ndof, statval):
    """Check that the stat results match those from XSPEC.

    This assumes the first data set returned by get_stat_info
    should be used.
    """

    ui.notice(None, None)
    ui.ignore(None, l)
    ui.ignore(h, None)
    ui.ignore_bad()
    sinfo = ui.get_stat_info()[0]
    assert sinfo.numpoints == npts
    assert sinfo.dof == ndof

    # XSPEC displays results to ~ 2dp, so the tolerance
    # is quite forgiving here. Or I could use pyxspec to
    # calculate and display this.
    #
    assert_allclose(sinfo.statval, statval, rtol=0, atol=0.005)
Пример #19
0
def loadBkgPCA(id=1, readFrom='bkgPCA.json'):
    """
    load PCA background from
    """
    filter0 = ui.get_filter()

    with open(readFrom, 'r') as f:
        parDict = json.load(f)
        parDictRed = {
            k.split('.')[1]: v
            for k, v in parDict.items() if k.startswith('pca')
        }
    fitter = PCAFitter(id=id)
    bkgModel = PCAModel('pca{:d}'.format(id), data=fitter.pca)
    for p in bkgModel.pars:
        p.val = parDictRed[p.name]
    idrsp = get_identity_response(id)
    ui.set_bkg_full_model(id, idrsp(bkgModel))
    ui.ignore()
    ui.notice(filter0)
Пример #20
0
    def load(self, filename):
        self.modelFile = filename
        with open(filename, 'r') as f:
            self.pca = json.load(f)
        for k, v in self.pca.items():
            self.pca[k] = np.array(v)
        nactivedata = self.pca['ihi'] - self.pca['ilo']
        assert self.pca['hi'].shape == (
            nactivedata,
        ), 'spectrum has different number of channels: %d vs %s' % (len(
            self.pca['hi']), self.ndata)
        assert self.pca['lo'].shape == self.pca['hi'].shape
        assert self.pca['mean'].shape == self.pca['hi'].shape
        assert len(self.pca['components']) == nactivedata
        assert nactivedata <= self.ndata
        ilo = int(self.pca['ilo'])
        ihi = int(self.pca['ihi'])
        self.cts = self.data[ilo:ihi]
        self.ncts = self.cts.sum(
        )  # 'have ncts background counts for deconvolution
        self.x = np.arange(ihi - ilo)
        self.ilo = ilo
        self.ihi = ihi

        # Only notice the channels between ilo + 1 and ihi (channel starts from 1, while index from 0).
        # The stat value will be affected, for assessment of goodness-of-fit for background.
        self.grouping0 = ui.get_grouping()
        ui.set_analysis('channel')
        # The channel filters, filter0 and filter_chan are all native channels.
        # ui.get_filter() will, instead, reture the binned channels if the spectrum is grouped.
        if self.grouping0 is not None:
            ui.ungroup()
        self.filter0 = ui.get_filter()
        ui.ignore()
        ui.notice(
            self.ilo + 1, self.ihi
        )  # ui.notice(a, b), from channel a to channel b, including channels a, b.
        self.filter_chan = ui.get_filter()
        ui.set_analysis('energy')
        if self.grouping0 is not None:
            ui.group()
def setup_order_plot(make_data_path):
    """Set up a faked dataset with multiple orders."""

    pha = make_data_path('3c273.pi')
    ui.load_pha(pha)

    # It has already loaded in one response
    arf = ui.get_arf(resp_id=1)
    arf.specresp *= 0.5

    for order, scale in enumerate([0.4, 0.25], 2):
        ui.load_arf(make_data_path('3c273.arf'), resp_id=order)
        ui.load_rmf(make_data_path('3c273.rmf'), resp_id=order)

        arf = ui.get_arf(resp_id=order)
        arf.specresp *= scale

    ui.set_source(ui.powlaw1d.pl)

    ui.notice(0.5, 7)
    ui.ignore(3, 4)
Пример #22
0
def test_filter_basic(make_data_path):
    """Test out issue 361 without ignore_bad calls.

    This should not trigger the bug behind issue 361.
    """

    stats = setup_model(make_data_path)

    s1 = ui.calc_stat()
    assert s1 == pytest.approx(stats['all'])

    ui.ignore_bad()
    s2 = ui.calc_stat()
    assert s2 == pytest.approx(stats['bad'])

    ui.notice(None, None)
    s3 = ui.calc_stat()
    assert s3 == pytest.approx(stats['all'])

    ui.notice(0.5, 8.0)
    s4 = ui.calc_stat()
    assert s4 == pytest.approx(stats['0.5-8.0'])
Пример #23
0
def fitBkgPCA(id=1):
    """
    The source model should be specified.

    Known Issues:
        While the photon indexes resulting from Wstat and PCA-based background fitting are consistent
        for MOS1 and MOS2, the photon index resulting from PCA-based background fitting for pn
        seems to be biased (steeper) by 0.04-0.05. The PCA-based model overestimates
        the background in the hard band.
    """
    bkgmodel = PCAFitter(id=id)  # An id has to be assigned.
    bkgmodel.fit()
    ui.set_analysis('channel')
    ui.ignore()
    ui.notice(bkgmodel.filter_chan)
    ui.set_analysis('energy')
    m, sig = cstatDist(id=id, bkg_id=1)
    bkgmodel.cstatM = m
    bkgmodel.cstatS = sig
    ui.set_analysis('channel')
    ui.ignore()
    ui.notice(bkgmodel.filter0)
    ui.set_analysis('energy')
    return bkgmodel
Пример #24
0
def fit_lines(linelist, id = None, delta_lam = .2, plot = False, outfile = None):
    mymodel = filili.multilinemanager.GaussLines('const1d', id = id, baseline = baseline)
    linelist['fililiname'] = [''] * len(linelist['linename'])
    for i in range(len(linelist['linename'])):
        lname = linelist['linename'][i]
        lwave = linelist['wave'][i]
        previouslines = set(mymodel.line_name_list())
        mymodel.add_line(linename = filter(lambda x: x.isalnum(), lname), pos = lwave)
        linenamelist = mymodel.line_name_list()
        newline = (set(linenamelist) - previouslines).pop()
        linelist['fililiname'][i] = newline
        if i ==0:
            firstline = linenamelist[0]
            ui.get_model_component(firstline).pos.max = lwave + delta_lam/10.
            ui.get_model_component(firstline).pos.min = lwave - delta_lam/10.
        else:
            dl = lwave - linelist['wave'][0]
            ui.link(ui.get_model_component(newline).pos, ui.get_model_component(firstline).pos + dl)
    
    #ui.set_analysis("wave")
    ui.ignore(None, None) #ignores all data
    ui.notice(min(linelist['wave'])-delta_lam, max(linelist['wave']) + delta_lam)
    ui.fit(id)
    if plot:
        ui.plot_fit(id)
        if has_chips:
            pychips.set_curve("crv1",["err.*","true"])
            pychips.set_plot_title(linelist['name'])
            if outfile is not None:
                pychips.print_window(outfile, ['clobber','true'])
        elif has_mpl:
            plt.title(linelist['name'])
            if outfile is not None:
                plt.savefig(outfile)
        else:
            raise NoPlottingSystemError("Neither pychips nor matplotlib are found.")
Пример #25
0
def test_plot_pvalue(make_data_path, clean_astro_ui, hide_logging):
    """Check plot_pvalue with PHA data."""

    fname = make_data_path("qso.pi")
    ui.load_pha(fname)

    ui.set_stat('cstat')
    ui.set_method("neldermead")

    ui.group_counts(10)
    ui.notice(0.3, 8)

    ui.set_model("xsphabs.abs1*(xspowerlaw.p1 +gauss1d.g1)")

    # move the fit close to the best fit to save a small amount
    # of time.
    abs1.nh = 0.05
    p1.phoindex = 1.28
    p1.norm = 2e-4
    g1.ampl = 1.8e-5

    g1.pos = 3.
    ui.freeze(g1.pos)
    g1.fwhm = 0.1
    ui.freeze(g1.fwhm)

    # Could we reduce the number of bins to save evaluation time?
    # We do want a non-default num value when checking the shapes
    # of the output attributes.
    #
    ui.fit()
    ui.plot_pvalue(p1, p1 + g1, num=100, bins=20)

    tmp = ui.get_pvalue_results()

    assert tmp.null == pytest.approx(210.34566845619273)
    assert tmp.alt == pytest.approx(207.66618095925094)
    assert tmp.lr == pytest.approx(2.679487496941789)

    # Have we returned the correct info?
    #
    # Is it worth checking the stored data (aka how randomised is this
    # output)?
    #
    assert tmp.samples.shape == (100, 2)
    assert tmp.stats.shape == (100, 2)
    assert tmp.ratios.shape == (100, )

    # Check the plot
    #
    tmp = ui.get_pvalue_plot()

    assert tmp.lr == pytest.approx(2.679487496941789)

    assert tmp.xlabel == 'Likelihood Ratio'
    assert tmp.ylabel == 'Frequency'
    assert tmp.title == 'Likelihood Ratio Distribution'

    # It would be nice to check the values here
    #
    assert tmp.ratios.shape == (100, )
    assert tmp.xlo.shape == (21, )
    assert tmp.xhi.shape == (21, )
    assert tmp.y.shape == (21, )
Пример #26
0
def test_can_use_swift_data(make_data_path):
    """A basic check that we can read in and use the Swift data.

    Unlike the previous tests, that directly access the io module,
    this uses the ui interface.
    """

    # QUS are there pytest fixtures that ensure the state is
    # clean on entry and exit?
    ui.clean()

    # The Swift PHA file does not have the ANCRFILE/RESPFILE keywords
    # set up, so the responses have to be manually added.
    #
    ui.load_pha(make_data_path(PHAFILE))
    ui.load_rmf(make_data_path(RMFFILE))
    ui.load_arf(make_data_path(ARFFILE))

    assert ui.get_analysis() == 'energy'

    ui.set_source(ui.powlaw1d.pl)
    ui.set_par('pl.ampl', 0.0003)

    # The responses have the first bin start at an energy of 0,
    # which causes issues for Sherpa. There should be a
    # RuntimeWarning due to a divide by zero.
    #
    with pytest.warns(RuntimeWarning) as record:
        stat = ui.calc_stat()

    # The exact form of the message depends on the Python version;
    # this could be checked, but it feels excessive for this
    # particular test, which is just a regression check, so use a
    # more lax approach.
    #
    assert len(record) == 1
    assert record[0].message.args[0] in \
        ['divide by zero encountered in divide',
         'divide by zero encountered in true_divide']

    # The stat value depends on what power-law model is used. With
    # xspowerlaw it is NaN, but with powlaw1d it is finite.
    #
    # This check is purely a regression test, so the value has
    # not been externally validated.
    #
    # assert np.isnan(stat)
    assert_allclose(stat, 58.2813692358182)

    # Manually adjust the first bin to avoid this problem.
    # Add in asserts just in case this gets "fixed" in the
    # I/O layer (as XSPEC does).
    #
    arf = ui.get_arf()
    rmf = ui.get_rmf()
    assert arf.energ_lo[0] == 0.0
    assert rmf.energ_lo[0] == 0.0
    assert rmf.e_min[0] == 0.0

    # The bin widths are ~ 0.005 or ~ 0.01 keV, so pick a value
    # smaller than this.
    #
    ethresh = 1e-6
    arf.energ_lo[0] = ethresh
    rmf.energ_lo[0] = ethresh
    rmf.e_min[0] = ethresh

    # Pick an energy range which isn't affected by the first
    # bin.
    #
    # Unfortunately, using a range of 0.3-8.0 gives 771 bins
    # in XSPEC - channels 30 to 800 - but 772 bins in Sherpa.
    # If I use ignore(None, 0.3); ignore(8.0, None) instead
    # then the result is 771 bins. This is because the e_min/max
    # of the RMF has channel widths of 0.01 keV, starting at 0,
    # so both 0.3 and 8.0 fall on a bin boundary. So, it's either
    # a difference in < or <= (or > vs >=), or a rounding issue
    # due to floating-point conversion leading to one bin boundary
    # being slightly different in Sherpa vs XSPEC).
    #
    # When using ui.notice(0.3, 8.0); ui.get_indep(filter=True)
    # returns 772 channels, 30 to 801.
    #
    # Using ui.notice(0.3, 7.995) selects channels 30 to 800. So
    # this range is used. Alternatively, channel 801 could have been
    # excluded explicitly.
    #
    # ui.notice(0.3, 8.0)
    ui.notice(0.3, 7.995)

    # XSPEC 12.9.1b calculation of the statistic:
    #   chi sq = 203.88 from 771 bins with 769 dof
    #   cstat  = 568.52
    #
    # There are known differences between XSPEC and Sherpa
    # with chi2xspecvar. This only affects data sets where
    # there is background subtraction, which is not the case
    # here. See https://github.com/sherpa/sherpa/issues/356
    #
    ui.set_stat('chi2xspecvar')
    stat_xvar = ui.get_stat_info()

    assert len(stat_xvar) == 1
    stat_xvar = stat_xvar[0]
    assert stat_xvar.numpoints == 771
    assert stat_xvar.dof == 769
    assert_allclose(stat_xvar.statval, 203.88,
                    rtol=0, atol=0.005)

    ui.set_stat('cstat')
    stat_cstat = ui.get_stat_info()

    assert len(stat_cstat) == 1
    stat_cstat = stat_cstat[0]
    assert stat_cstat.numpoints == 771
    assert stat_cstat.dof == 769
    assert_allclose(stat_cstat.statval, 568.52,
                    rtol=0, atol=0.005)

    ui.clean()
Пример #27
0
    def fit(self):
        # try a PCA decomposition of this spectrum
        initial = self.decompose()
        ui.set_method('neldermead')
        bkgmodel = PCAModel('pca%s' % self.id, data=self.pca)
        self.bkgmodel = bkgmodel
        response = get_identity_response(self.id)
        convbkgmodel = response(bkgmodel)
        ui.set_bkg_full_model(self.id, convbkgmodel)
        for p, v in zip(bkgmodel.pars, initial):
            p.val = v
        srcmodel = ui.get_model(self.id)
        ui.set_full_model(self.id, srcmodel)
        initial_v = self.calc_bkg_stat()
        # print('before fit: stat: %s' % (initial_v))
        ui.fit_bkg(id=self.id)
        # print('fit: first full fit done')
        final = [p.val for p in ui.get_bkg_model(self.id).pars]
        # print('fit: parameters: %s' % (final))
        initial_v = self.calc_bkg_stat()
        # print('fit: stat: %s' % (initial_v))

        # lets try from zero
        # logf.info('fit: second full fit from zero')
        for p in bkgmodel.pars:
            p.val = 0
        ui.fit_bkg(id=self.id)
        initial_v0 = self.calc_bkg_stat()
        # logf.info('fit: parameters: %s' % (final))
        # logf.info('fit: stat: %s' % (initial_v0))

        # pick the better starting point
        if initial_v0 < initial_v:
            # logf.info('fit: using zero-fit')
            initial_v = initial_v0
            final = [p.val for p in ui.get_bkg_model(self.id).pars]
        else:
            # logf.info('fit: using decomposed-fit')
            for p, v in zip(bkgmodel.pars, final):
                p.val = v

        # start with the full fit and remove(freeze) parameters
        print('%d parameters, stat=%.2f' % (len(initial), initial_v))
        results = [(2 * len(final) + initial_v, final, len(final), initial_v)]
        for i in range(len(initial) - 1, 0, -1):
            bkgmodel.pars[i].val = 0
            bkgmodel.pars[i].freeze()
            ui.fit_bkg(id=self.id)
            final = [p.val for p in ui.get_bkg_model(self.id).pars]
            v = self.calc_bkg_stat()
            print('--> %d parameters, stat=%.2f' % (i, v))
            results.insert(0, (v + 2 * i, final, i, v))

        print()
        print('Background PCA fitting AIC results:')
        print('-----------------------------------')
        print()
        print('stat Ncomp AIC')
        for aic, params, nparams, val in results:
            print('%-05.1f %2d %-05.1f' % (val, nparams, aic))
        aic, final, nparams, val = min(results)
        for p, v in zip(bkgmodel.pars, final):
            p.val = v
        for i in range(nparams):
            bkgmodel.pars[i].thaw()

        print()
        print('Increasing parameters again...')
        # now increase the number of parameters again
        # results = [(aic, final, nparams, val)]
        last_aic, last_final, last_nparams, last_val = aic, final, nparams, val
        for i in range(last_nparams, len(bkgmodel.pars)):
            next_nparams = i + 1
            bkgmodel.pars[i].thaw()
            for p, v in zip(bkgmodel.pars, last_final):
                p.val = v
            ui.fit_bkg(id=self.id)
            next_final = [p.val for p in ui.get_bkg_model(self.id).pars]
            v = self.calc_bkg_stat()
            next_aic = v + 2 * next_nparams
            if next_aic < last_aic:  # accept
                print('%d parameters, aic=%.2f ** accepting' %
                      (next_nparams, next_aic))
                last_aic, last_final, last_nparams, last_val = next_aic, next_final, next_nparams, v
            else:
                print('%d parameters, aic=%.2f' % (next_nparams, next_aic))
            # stop if we are 3 parameters ahead what we needed
            if next_nparams >= last_nparams + 3:
                break

        print('Final choice: %d parameters, aic=%.2f' %
              (last_nparams, last_aic))
        # reset to the last good solution
        for p, v in zip(bkgmodel.pars, last_final):
            p.val = v

        last_model = convbkgmodel
        for i in range(10):
            print('Adding Gaussian#%d' % (i + 1))
            # find largest discrepancy
            ui.set_analysis(self.id, "ener", "rate")
            m = ui.get_bkg_fit_plot(self.id)
            y = m.dataplot.y.cumsum()
            z = m.modelplot.y.cumsum()
            diff_rate = np.abs(y - z)
            ui.set_analysis(self.id, "ener", "counts")
            m = ui.get_bkg_fit_plot(self.id)
            x = m.dataplot.x
            y = m.dataplot.y.cumsum()
            z = m.modelplot.y.cumsum()
            diff = np.abs(y - z)
            i = np.argmax(diff)
            energies = x
            e = x[i]
            print(
                'largest remaining discrepancy at %.3fkeV[%d], need %d counts'
                % (x[i], i, diff[i]))
            # e = x[i]
            power = diff_rate[i]
            # lets try to inject a gaussian there

            g = ui.xsgaussian('g_%d_%d' % (self.id, i))
            print('placing gaussian at %.2fkeV, with power %s' % (e, power))
            # we work in energy bins, not energy
            g.LineE.min = energies[0]
            g.LineE.max = energies[-1]
            g.LineE.val = e
            if i > len(diff) - 2:
                i = len(diff) - 2
            if i < 2:
                i = 2
            g.Sigma = (x[i + 1] - x[i - 1])
            g.Sigma.min = (x[i + 1] - x[i - 1]) / 3
            g.Sigma.max = x[-1] - x[0]
            g.norm.min = power * 1e-6
            g.norm.val = power
            convbkgmodel2 = response(g)
            next_model = last_model + convbkgmodel2
            ui.set_bkg_full_model(self.id, next_model)
            ui.fit_bkg(id=self.id)
            next_final = [p.val for p in ui.get_bkg_model(self.id).pars]
            next_nparams = len(next_final)
            v = self.calc_bkg_stat()
            next_aic = v + 2 * next_nparams
            print('with Gaussian:', next_aic,
                  '; change: %.1f (negative is good)' % (next_aic - last_aic))
            if next_aic < last_aic:
                print('accepting')
                last_model = next_model
                last_aic, last_final, last_nparams, last_val = next_aic, next_final, next_nparams, v
            else:
                print('not significant, rejecting')
                ui.set_bkg_full_model(self.id, last_model)
                for p, v in zip(last_model.pars, last_final):
                    p.val = v
                    if v == 0:  # the parameter was frozen.
                        ui.freeze(p)
                break

        self.cstat, self.dof = self.calc_bkg_stat(
            dof=True)  # Save the final cstat and dof (dof = ihi - ilo)
        self.filter_energy = ui.get_filter(
        )  # Save the filter for background fitting.
        ui.set_analysis('channel')
        ui.ignore()
        ui.notice(self.filter0)  # restore filter
        ui.set_analysis('energy')
Пример #28
0
def test_746(make_data_path, clean_astro_ui):
    """Test https://github.com/sherpa/sherpa/issues/746

    Something in #444 (reverted in #759) caused:

      - the fit to fail (niter=2)
      - the line amplitude not to change significantly
      - the statistic reported by the fit to be different to
        that returned by calc_stat

    Something with how the cache code handles analysis=wave
    appears to be the problem. This test takes the line data
    from 746 and adds it into the existing PHA2 file we have
    (3c120) to replicate the problem. Fortunately this
    wavelength range contains essentially no counts, so we
    can just add in the counts to make a fake line and check
    we can fit it.
    """

    ui.load_pha(make_data_path('3c120_pha2.gz'))
    ui.load_arf(10, make_data_path('3c120_meg_1.arf.gz'))
    ui.load_rmf(10, make_data_path('3c120_meg_1.rmf.gz'))

    # Add in the line
    d10 = ui.get_data(10)
    idx = np.arange(4068, 4075, dtype=np.int)
    d10.counts[idx] = [1, 1, 1, 2, 3, 1, 1]

    # group the data
    ui.group_width(id=10, num=2)

    ui.set_analysis('wave')
    ui.notice(21.4, 21.7)

    # internal check that getting the expected data
    expected = np.zeros(32)
    expected[[9, 10, 11, 12]] = [2, 3, 4, 1]
    expected[26] = 1  # this count is from the 3c120 data
    d = d10.get_dep(filter=True)
    assert d == pytest.approx(expected)

    line = ui.create_model_component('gauss1d', 'name')

    # treat the line as a delta function
    line.fwhm.val = 0.00001
    line.fwhm.frozen = True
    line.pos = 21.6
    line.pos.frozen = True

    line.ampl = 2

    ui.set_source(10, line)

    # the original fit used levmar, so use that here
    # (it looks like it also fails with simplex)
    ui.set_method('levmar')
    ui.set_stat('cstat')

    sinit = ui.calc_stat(10)
    ui.fit(10)
    fr = ui.get_fit_results()
    sfinal = ui.calc_stat(10)

    # Did the
    #   - fit improve
    #   - take more than two iterations
    #   - report the same statistic values as calc_stat
    #   - change the fit parameter
    #
    assert sfinal < sinit

    assert fr.succeeded
    assert fr.nfev > 2

    assert fr.istatval == sinit
    assert fr.statval == sfinal

    assert line.ampl.val > 100
    assert len(fr.parvals) == 1
    assert fr.parvals[0] == line.ampl.val

    # some simple checks to throw in because we can
    assert fr.parnames == ('name.ampl', )
    assert fr.datasets == (10,)
    assert fr.statname == 'cstat'
    assert fr.methodname == 'levmar'
    assert fr.itermethodname == 'none'
    assert fr.numpoints == 32
    assert fr.dof == 31

    # Now add in some "absolute" checks to act as regression tests.
    # If these fail then it doesn't necessarily mean something bad
    # has happened.
    #
    assert fr.nfev == 15
    assert sinit == pytest.approx(82.72457294394245)
    assert sfinal == pytest.approx(15.39963248224592)
    assert line.ampl.val == pytest.approx(113.95646989927054)
Пример #29
0
def test_can_use_swift_data(make_data_path):
    """A basic check that we can read in and use the Swift data.

    Unlike the previous tests, that directly access the io module,
    this uses the ui interface.
    """

    # QUS are there pytest fixtures that ensure the state is
    # clean on entry and exit?
    ui.clean()

    # The Swift PHA file does not have the ANCRFILE/RESPFILE keywords
    # set up, so the responses have to be manually added.
    #
    ui.load_pha(make_data_path(PHAFILE))

    rmffile = make_data_path(RMFFILE)
    with warnings.catch_warnings(record=True) as ws:
        warnings.simplefilter("always")
        ui.load_rmf(rmffile)

    validate_replacement_warning(ws, 'RMF', rmffile)

    arffile = make_data_path(ARFFILE)
    with warnings.catch_warnings(record=True) as ws:
        warnings.simplefilter("always")
        ui.load_arf(arffile)

    validate_replacement_warning(ws, 'ARF', arffile)

    assert ui.get_analysis() == 'energy'

    arf = ui.get_arf()
    rmf = ui.get_rmf()
    assert arf.energ_lo[0] == EMIN
    assert rmf.energ_lo[0] == EMIN
    assert rmf.e_min[0] == 0.0

    ui.set_source(ui.powlaw1d.pl)
    ui.set_par('pl.ampl', 0.0003)

    stat = ui.calc_stat()

    # This check is purely a regression test, so the value has
    # not been externally validated.
    #
    assert_allclose(stat, 58.2813692358182)

    # Pick an energy range which isn't affected by the first
    # bin.
    #
    # Unfortunately, using a range of 0.3-8.0 gives 771 bins
    # in XSPEC - channels 30 to 800 - but 772 bins in Sherpa.
    # If I use ignore(None, 0.3); ignore(8.0, None) instead
    # then the result is 771 bins. This is because the e_min/max
    # of the RMF has channel widths of 0.01 keV, starting at 0,
    # so both 0.3 and 8.0 fall on a bin boundary. So, it's either
    # a difference in < or <= (or > vs >=), or a rounding issue
    # due to floating-point conversion leading to one bin boundary
    # being slightly different in Sherpa vs XSPEC).
    #
    # When using ui.notice(0.3, 8.0); ui.get_indep(filter=True)
    # returns 772 channels, 30 to 801.
    #
    # Using ui.notice(0.3, 7.995) selects channels 30 to 800. So
    # this range is used. Alternatively, channel 801 could have been
    # excluded explicitly.
    #
    # ui.notice(0.3, 8.0)
    ui.notice(0.3, 7.995)

    # XSPEC 12.9.1b calculation of the statistic:
    #   chi sq = 203.88 from 771 bins with 769 dof
    #   cstat  = 568.52
    #
    # There are known differences between XSPEC and Sherpa
    # with chi2xspecvar. This only affects data sets where
    # there is background subtraction, which is not the case
    # here. See https://github.com/sherpa/sherpa/issues/356
    #
    ui.set_stat('chi2xspecvar')
    stat_xvar = ui.get_stat_info()

    assert len(stat_xvar) == 1
    stat_xvar = stat_xvar[0]
    assert stat_xvar.numpoints == 771
    assert stat_xvar.dof == 769
    assert_allclose(stat_xvar.statval, 203.88,
                    rtol=0, atol=0.005)

    ui.set_stat('cstat')
    stat_cstat = ui.get_stat_info()

    assert len(stat_cstat) == 1
    stat_cstat = stat_cstat[0]
    assert stat_cstat.numpoints == 771
    assert stat_cstat.dof == 769
    assert_allclose(stat_cstat.statval, 568.52,
                    rtol=0, atol=0.005)

    ui.clean()
Пример #30
0
def check_bad_grouping(exp_xlo, exp_xhi, exp_counts, lo1, hi1, lo2, hi2):
    """Common tests from test_grouped_pha_all_badXXX

    Sending in two ranges is a bit excessive but easiest
    thing to implement
    """

    cts = ui.get_counts()
    assert cts == pytest.approx([exp_counts])

    dplot = ui.get_data_plot()
    assert dplot.xlo == pytest.approx([exp_xlo])
    assert dplot.xhi == pytest.approx([exp_xhi])
    assert dplot.y == pytest.approx([exp_counts])

    # ignore all the data
    ui.ignore(lo1, hi1)

    # can still plot
    cts = ui.get_counts()
    assert cts == pytest.approx([exp_counts])

    cts = ui.get_counts(filter=True)
    assert len(cts) == 0

    dplot = ui.get_data_plot()
    assert len(dplot.xlo) == 0
    assert len(dplot.xhi) == 0
    assert len(dplot.y) == 0

    # ignore does not fail
    #
    ui.ignore(lo2, hi2)

    # we can restore the data
    ui.notice(None, None)

    cts = ui.get_counts()
    assert cts == pytest.approx([exp_counts])

    dplot = ui.get_data_plot()
    assert dplot.xlo == pytest.approx([exp_xlo])
    assert dplot.xhi == pytest.approx([exp_xhi])
    assert dplot.y == pytest.approx([exp_counts])

    # now ignore the bad channels (ie everything)
    #
    ui.ignore_bad()

    cts = ui.get_counts()
    assert len(cts) == 0

    dplot = ui.get_data_plot()
    assert len(dplot.xlo) == 0
    assert len(dplot.xhi) == 0
    assert len(dplot.y) == 0

    # there's nothing to notice (this line is an example of #790)
    ui.notice(lo1, hi1)

    cts = ui.get_counts()
    assert len(cts) == 0

    dplot = ui.get_data_plot()
    assert len(dplot.xlo) == 0
    assert len(dplot.xhi) == 0
    assert len(dplot.y) == 0
Пример #31
0
def test_can_use_swift_data(make_data_path, clean_astro_ui):
    """A basic check that we can read in and use the Swift data.

    Unlike the previous tests, that directly access the io module,
    this uses the ui interface.
    """

    # The Swift PHA file does not have the ANCRFILE/RESPFILE keywords
    # set up, so the responses have to be manually added.
    #
    ui.load_pha(make_data_path(PHAFILE))

    rmffile = make_data_path(RMFFILE)
    with warnings.catch_warnings(record=True) as ws:
        warnings.simplefilter("always")
        ui.load_rmf(rmffile)

    validate_replacement_warning(ws, 'RMF', rmffile)

    arffile = make_data_path(ARFFILE)
    with warnings.catch_warnings(record=True) as ws:
        warnings.simplefilter("always")
        ui.load_arf(arffile)

    validate_replacement_warning(ws, 'ARF', arffile)

    assert ui.get_analysis() == 'energy'

    arf = ui.get_arf()
    rmf = ui.get_rmf()
    assert arf.energ_lo[0] == EMIN
    assert rmf.energ_lo[0] == EMIN
    assert rmf.e_min[0] == 0.0

    ui.set_source(ui.powlaw1d.pl)
    ui.set_par('pl.ampl', 0.0003)

    stat = ui.calc_stat()

    # This check is purely a regression test, so the value has
    # not been externally validated.
    #
    assert_allclose(stat, 58.2813692358182)

    # Pick an energy range which isn't affected by the first
    # bin.
    #
    # Unfortunately, using a range of 0.3-8.0 gives 771 bins
    # in XSPEC - channels 30 to 800 - but 770 bins in Sherpa,
    # channels 31 to 800.
    #
    # Note that the channel numbering starts at 0:
    # % dmlist target_sr.pha header,clean,raw | grep TLMIN
    # TLMIN1       = 0                    / Lowest legal channel number
    #
    # and so it's not clear when XSPEC says 30-800 what it
    # means. From https://github.com/sherpa/sherpa/issues/1211#issuecomment-881647128
    # we have that the first bin it is using is
    #     0.29-0.30
    # and the last bin is
    #     7.99-8.00
    # and I've checked with iplot that it has renumbered the
    # channels to 1-1024 from 0-1023
    #
    # % dmlist swxpc0to12s6_20130101v014.rmf.gz"[ebounds][channel=28:31]" data,clean
    #  CHANNEL    E_MIN                E_MAX
    #         28     0.28000000119209     0.28999999165535
    #         29     0.28999999165535     0.30000001192093
    #         30     0.30000001192093     0.31000000238419
    #         31     0.31000000238419     0.31999999284744
    # % dmlist swxpc0to12s6_20130101v014.rmf.gz"[ebounds][channel=798:801]" data,clean
    #  CHANNEL    E_MIN                E_MAX
    #        798         7.9800000191         7.9899997711
    #        799         7.9899997711                  8.0
    #        800                  8.0         8.0100002289
    #        801         8.0100002289         8.0200004578
    #
    # If I use ignore(None, 0.3); ignore(8.0, None) instead then the
    # result is 771 bins (channels 31 to 800). This is because the
    # e_min/max of the RMF has channel widths of 0.01 keV, starting at
    # 0, so both 0.3 and 8.0 fall on a bin boundary. So, it's either a
    # difference in < or <= (or > vs >=), or a rounding issue due to
    # floating-point conversion leading to one bin boundary being
    # slightly different in Sherpa vs XSPEC).
    #
    # When using ui.notice(0.3, 8.0); ui.get_indep(filter=True)
    # returns 770 channels, 31 to 800.
    #
    # Using ui.notice(0.3, 7.995) selects channels 31 to 800.
    # Using ui.notice(0.299, 8.0) selects channels 30 to 800.
    # Using ui.notice(0.299, 7.995) selects channels 30 to 800.
    #
    ui.notice(0.299, 8.0)

    # Check the selected range
    pha = ui.get_data()
    expected = np.zeros(1024, dtype=bool)
    expected[29:800] = True
    assert pha.mask == pytest.approx(expected)
    assert pha.get_mask() == pytest.approx(expected)

    # XSPEC 12.9.1b calculation of the statistic:
    #   chi sq = 203.88 from 771 bins with 769 dof
    #   cstat  = 568.52
    #
    # There are known differences between XSPEC and Sherpa
    # with chi2xspecvar. This only affects data sets where
    # there is background subtraction, which is not the case
    # here. See https://github.com/sherpa/sherpa/issues/356
    #
    ui.set_stat('chi2xspecvar')
    stat_xvar = ui.get_stat_info()

    assert len(stat_xvar) == 1
    stat_xvar = stat_xvar[0]
    assert stat_xvar.numpoints == 771
    assert stat_xvar.dof == 769
    assert_allclose(stat_xvar.statval, 203.88, rtol=0, atol=0.005)

    ui.set_stat('cstat')
    stat_cstat = ui.get_stat_info()

    assert len(stat_cstat) == 1
    stat_cstat = stat_cstat[0]
    assert stat_cstat.numpoints == 771
    assert stat_cstat.dof == 769
    assert_allclose(stat_cstat.statval, 568.52, rtol=0, atol=0.005)
Пример #32
0
def test_pileup_model(make_data_path, clean_astro_ui):
    """Basic check of setting a pileup model.

    It is more to check we can set a pileup model, not to
    check the model works.
    """

    infile = make_data_path('3c273.pi')
    ui.load_pha('pileup', infile)
    ui.subtract('pileup')
    ui.notice(0.3, 7)

    ui.set_stat('chi2datavar')

    # pick xswabs as it is unlikely to change with XSPEC
    ui.set_source('pileup', ui.xswabs.amdl * ui.powlaw1d.pl)

    # get close to the best fit, but don't need to fit
    pl.ampl = 1.82e-4
    pl.gamma = 1.97
    amdl.nh = 0.012

    stat0 = ui.calc_stat('pileup')

    # We want to compare the data to the pileup model,
    # which should be run with the higher-energy bins included,
    # but that's not relevant here where I am just
    # checking the statistic value.

    ui.set_pileup_model('pileup', ui.jdpileup.jdp)

    # pick some values to make the model change the data
    jdp.ftime = 3.2
    jdp.fracexp = 1
    jdp.alpha = 0.95
    jdp.f = 0.91

    # Check pileup is added to get_model
    #
    mlines = str(ui.get_model('pileup')).split('\n')
    assert mlines[0] == 'apply_rmf(jdpileup.jdp((xswabs.amdl * powlaw1d.pl)))'
    assert mlines[3].strip(
    ) == 'jdp.alpha    thawed         0.95            0            1'
    assert mlines[5].strip(
    ) == 'jdp.f        thawed         0.91          0.9            1'
    assert mlines[11].strip(
    ) == 'pl.gamma     thawed         1.97          -10           10'

    # Ensure that the statistic has got worse (technically
    # it coud get better, but not for this case).
    #
    stat1 = ui.calc_stat('pileup')
    assert stat1 > stat0

    # As a test, check the actual statistic values
    # (evaluated with XSPEC 12.11.0 and Sherpa with the
    # master branch 2020/07/29, on Linux).
    #
    assert stat0 == pytest.approx(35.99899827358692)
    assert stat1 == pytest.approx(36.58791181460404)

    # Can we remove the pileup model?
    #
    ui.delete_pileup_model('pileup')

    # Check pileup is not in get_model
    #
    mlines = str(ui.get_model('pileup')).split('\n')

    # Numeric display depends on Python and/or NumPy
    # for the exposure time. I should have set the exposure
    # time to an integer value.
    #
    # assert mlines[0] == 'apply_rmf(apply_arf((38564.608926889 * (xswabs.amdl * powlaw1d.pl))))'

    toks = mlines[0].split()
    assert len(toks) == 5
    assert toks[0].startswith('apply_rmf(apply_arf((38564.608')
    assert toks[1] == '*'
    assert toks[2] == '(xswabs.amdl'
    assert toks[3] == '*'
    assert toks[4] == 'powlaw1d.pl))))'

    assert mlines[4].strip(
    ) == 'pl.gamma     thawed         1.97          -10           10'

    stat2 = ui.calc_stat('pileup')
    assert stat2 == stat0
Пример #33
0
def test_plot_order_multi(make_data_path, clean_astro_ui):
    """Rather than fake data, use a known dataset.

    Here we pretend we have three orders but with the same
    response (except that the ARF is 0.5, 0.4, 0.25 of the
    normal ARF).
    """

    pha = make_data_path('3c273.pi')
    ui.load_pha(pha)

    # It has already loaded in one response
    arf = ui.get_arf(resp_id=1)
    arf.specresp *= 0.5

    for order, scale in enumerate([0.4, 0.25], 2):
        ui.load_arf(make_data_path('3c273.arf'), resp_id=order)
        ui.load_rmf(make_data_path('3c273.rmf'), resp_id=order)

        arf = ui.get_arf(resp_id=order)
        arf.specresp *= scale

    ui.set_source(ui.powlaw1d.pl)

    ui.notice(0.5, 7)
    ui.ignore(3, 4)

    fplot = ui.get_fit_plot()
    oplot = ui.get_order_plot()

    # The idea is to compare the X range of plot_fit to plot_order
    # (but accessed just using the plot objects rather than creating
    # an actual plot).
    #
    # First some safety checks
    assert fplot.dataplot.xlo == pytest.approx(fplot.modelplot.xlo)
    assert fplot.dataplot.xhi == pytest.approx(fplot.modelplot.xhi)

    assert len(oplot.xlo) == 3
    assert len(oplot.xhi) == 3
    assert len(oplot.y) == 3

    assert oplot.xlo[1] == pytest.approx(oplot.xlo[0])
    assert oplot.xlo[2] == pytest.approx(oplot.xlo[0])

    assert oplot.xhi[1] == pytest.approx(oplot.xhi[0])
    assert oplot.xhi[2] == pytest.approx(oplot.xhi[0])

    # We know the y values are 0.5, 0.4, 0.25 times the original arf
    # so we can compare them.
    #
    assert oplot.y[1] == pytest.approx(oplot.y[0] * 0.4 / 0.5)
    assert oplot.y[2] == pytest.approx(oplot.y[0] * 0.25 / 0.5)

    xlo = oplot.xlo[0]
    xhi = oplot.xhi[0]
    assert len(xlo) == 564
    assert xlo[0] == pytest.approx(0.46720001101493835)
    assert xhi[-1] == pytest.approx(9.869600296020508)

    # The model plot is technically drawn the same way as the order plot
    # (ungrouped) but it uses different code (sherpa.astro.plot.ModelHistogram)
    # so let's compare.
    #
    mplot = ui.get_model_plot()
    assert mplot.xlo[0] == pytest.approx(0.46720001101493835)
    assert mplot.xhi[-1] == pytest.approx(9.869600296020508)

    # Also compare to the fit plot (which is grouped)
    #
    assert fplot.modelplot.xlo[0] == pytest.approx(0.46720001101493835)
    assert fplot.modelplot.xhi[-1] == pytest.approx(9.869600296020508)
Пример #34
0
def test_can_use_swift_data(make_data_path, is_known_warning):
    """A basic check that we can read in and use the Swift data.

    Unlike the previous tests, that directly access the io module,
    this uses the ui interface.
    """

    # QUS are there pytest fixtures that ensure the state is
    # clean on entry and exit?
    ui.clean()

    # The Swift PHA file does not have the ANCRFILE/RESPFILE keywords
    # set up, so the responses have to be manually added.
    #
    ui.load_pha(make_data_path(PHAFILE))

    rmffile = make_data_path(RMFFILE)
    with warnings.catch_warnings(record=True) as ws:
        warnings.simplefilter("always")
        ui.load_rmf(rmffile)

    validate_replacement_warning(ws, 'RMF', rmffile, is_known_warning)

    arffile = make_data_path(ARFFILE)
    with warnings.catch_warnings(record=True) as ws:
        warnings.simplefilter("always")
        ui.load_arf(arffile)

    validate_replacement_warning(ws, 'ARF', arffile, is_known_warning)

    assert ui.get_analysis() == 'energy'

    arf = ui.get_arf()
    rmf = ui.get_rmf()
    assert arf.energ_lo[0] == EMIN
    assert rmf.energ_lo[0] == EMIN
    assert rmf.e_min[0] == 0.0

    ui.set_source(ui.powlaw1d.pl)
    ui.set_par('pl.ampl', 0.0003)

    stat = ui.calc_stat()

    # This check is purely a regression test, so the value has
    # not been externally validated.
    #
    assert_allclose(stat, 58.2813692358182)

    # Pick an energy range which isn't affected by the first
    # bin.
    #
    # Unfortunately, using a range of 0.3-8.0 gives 771 bins
    # in XSPEC - channels 30 to 800 - but 772 bins in Sherpa.
    # If I use ignore(None, 0.3); ignore(8.0, None) instead
    # then the result is 771 bins. This is because the e_min/max
    # of the RMF has channel widths of 0.01 keV, starting at 0,
    # so both 0.3 and 8.0 fall on a bin boundary. So, it's either
    # a difference in < or <= (or > vs >=), or a rounding issue
    # due to floating-point conversion leading to one bin boundary
    # being slightly different in Sherpa vs XSPEC).
    #
    # When using ui.notice(0.3, 8.0); ui.get_indep(filter=True)
    # returns 772 channels, 30 to 801.
    #
    # Using ui.notice(0.3, 7.995) selects channels 30 to 800. So
    # this range is used. Alternatively, channel 801 could have been
    # excluded explicitly.
    #
    # ui.notice(0.3, 8.0)
    ui.notice(0.3, 7.995)

    # XSPEC 12.9.1b calculation of the statistic:
    #   chi sq = 203.88 from 771 bins with 769 dof
    #   cstat  = 568.52
    #
    # There are known differences between XSPEC and Sherpa
    # with chi2xspecvar. This only affects data sets where
    # there is background subtraction, which is not the case
    # here. See https://github.com/sherpa/sherpa/issues/356
    #
    ui.set_stat('chi2xspecvar')
    stat_xvar = ui.get_stat_info()

    assert len(stat_xvar) == 1
    stat_xvar = stat_xvar[0]
    assert stat_xvar.numpoints == 771
    assert stat_xvar.dof == 769
    assert_allclose(stat_xvar.statval, 203.88,
                    rtol=0, atol=0.005)

    ui.set_stat('cstat')
    stat_cstat = ui.get_stat_info()

    assert len(stat_cstat) == 1
    stat_cstat = stat_cstat[0]
    assert stat_cstat.numpoints == 771
    assert stat_cstat.dof == 769
    assert_allclose(stat_cstat.statval, 568.52,
                    rtol=0, atol=0.005)

    ui.clean()
Пример #35
0
    assert get_rmf(id).energ_lo[0] > 0
    assert get_arf(id).energ_lo[0] > 0
    assert (get_bkg(id).counts > 0).sum() > 0
except:
    traceback.print_exc()
    sys.exit(0)

set_xlog()
set_ylog()
set_stat('cstat')
set_xsabund('wilm')
set_xsxsect('vern')
set_analysis(id, 'ener', 'counts')
ignore(None, elo)
ignore(ehi, None)
notice(elo, ehi)

prefix = filename + '_xagnfitter_out_'

#import json
#z = float(open(filename + '.z').read())
#galnh_value = float(open(filename + '.nh').read())

galabso = auto_galactic_absorption()
galabso.nH.freeze()

if args.backgroundmodel == 'chandra':
    print('calling singlefitter...')
    fitter = SingleFitter(id, filename, ChandraBackground)
    try:
        fitter.tryload()