Ejemplo n.º 1
0
def test_cstat_comparison_xspec(make_data_path, l, h, ndp, ndof, statval):
    """Compare CSTAT values for a data set to XSPEC.

    This checks that the "UI layer" works, although ideally there
    should be a file that can be read in rather than having to
    manipulate it (the advantage here is that it means there is
    no messing around with adding a file to the test data set).

    The XSPEC version used was 12.9.0o.
    """

    dset = create_xspec_comparison_dataset(make_data_path,
                                           keep_background=False)

    ui.clean()
    ui.set_data(dset)
    # use powlaw1d rather than xspowerlaw so do not need XSPEC
    ui.set_source(ui.powlaw1d.pl)
    ui.set_par('pl.ampl', 1e-4)

    ui.set_stat('cstat')
    ui.set_analysis('channel')

    validate_xspec_result(l, h, ndp, ndof, statval)
    ui.clean()
Ejemplo n.º 2
0
def test_cstat_comparison_xspec(make_data_path, l, h, ndp, ndof, statval):
    """Compare CSTAT values for a data set to XSPEC.

    This checks that the "UI layer" works, although ideally there
    should be a file that can be read in rather than having to
    manipulate it (the advantage here is that it means there is
    no messing around with adding a file to the test data set).

    The XSPEC version used was 12.9.0o.
    """

    dset = create_xspec_comparison_dataset(make_data_path,
                                           keep_background=False)

    ui.clean()
    ui.set_data(dset)
    # use powlaw1d rather than xspowerlaw so do not need XSPEC
    ui.set_source(ui.powlaw1d.pl)
    ui.set_par('pl.ampl', 1e-4)

    ui.set_stat('cstat')
    ui.set_analysis('channel')

    validate_xspec_result(l, h, ndp, ndof, statval)
    ui.clean()
Ejemplo n.º 3
0
def test_xspecvar_no_grouping_no_bg_comparison_xspec(make_data_path, l, h, ndp,
                                                     ndof, statval):
    """Compare chi2xspecvar values for a data set to XSPEC.

    The data set has no background.

    See test_cstat_comparison_xspec. Note that at present
    Sherpa and XSPEC treat bins with 0 values in them differently:
    see https://github.com/sherpa/sherpa/issues/356
    so for this test all bins are forced to have at least one
    count in them (source -> 5 is added per channel,background ->
    3 is added per channel).

    The XSPEC version used was 12.9.0o.
    """

    dset = create_xspec_comparison_dataset(make_data_path,
                                           keep_background=False)

    # Lazy, so add it to "bad" channels too
    dset.counts += 5

    ui.clean()
    ui.set_data(dset)

    ui.set_source(ui.powlaw1d.pl)
    ui.set_par('pl.ampl', 5e-4)

    ui.set_stat('chi2xspecvar')
    ui.set_analysis('energy')

    validate_xspec_result(l, h, ndp, ndof, statval)
    ui.clean()
Ejemplo n.º 4
0
    def setUp(self):

        self._old_logger_level = logger.getEffectiveLevel()
        logger.setLevel(logging.ERROR)

        ui.set_stat('wstat')

        infile = self.make_path('3c273.pi')
        ui.load_pha(1, infile)

        # Change the backscale value slightly so that the
        # results are different to other runs with this file.
        #
        nbins = ui.get_data(1).get_dep(False).size
        bscal = 0.9 * np.ones(nbins) * ui.get_backscal(1)
        ui.set_backscal(1, backscale=bscal)

        ui.set_source(1, ui.powlaw1d.pl)

        # The powerlaw slope and normalization are
        # intended to be "a reasonable approximation"
        # to the data, just to make sure that any statistic
        # calculation doesn't blow-up too much.
        #
        ui.set_par("pl.gamma", 1.7)
        ui.set_par("pl.ampl", 1.7e-4)
Ejemplo n.º 5
0
def test_xspecvar_no_grouping_comparison_xspec(make_data_path, l, h, ndp, ndof,
                                               statval):
    """Compare chi2xspecvar values for a data set to XSPEC.

    The data set has a background. See
    test_xspecvar_no_grouping_no_bg_comparison_xspec

    The XSPEC version used was 12.9.0o.
    """

    dset = create_xspec_comparison_dataset(make_data_path,
                                           keep_background=True)

    # Lazy, so add it to "bad" channels too
    dset.counts += 5
    dset.get_background().counts += 3

    ui.clean()
    ui.set_data(dset)
    ui.subtract()

    ui.set_source(ui.powlaw1d.pl)
    ui.set_par('pl.ampl', 5e-4)

    ui.set_stat('chi2xspecvar')
    ui.set_analysis('energy')

    validate_xspec_result(l, h, ndp, ndof, statval)
    ui.clean()
Ejemplo n.º 6
0
    def setUp(self):

        self._old_logger_level = logger.getEffectiveLevel()
        logger.setLevel(logging.ERROR)

        ui.set_stat('wstat')

        infile = self.make_path('3c273.pi')
        ui.load_pha(1, infile)

        # Change the backscale value slightly so that the
        # results are different to other runs with this file.
        #
        nbins = ui.get_data(1).get_dep(False).size
        bscal = 0.9 * np.ones(nbins) * ui.get_backscal(1)
        ui.set_backscal(1, backscale=bscal)

        ui.set_source(1, ui.powlaw1d.pl)

        # The powerlaw slope and normalization are
        # intended to be "a reasonable approximation"
        # to the data, just to make sure that any statistic
        # calculation doesn't blow-up too much.
        #
        ui.set_par("pl.gamma", 1.7)
        ui.set_par("pl.ampl", 1.7e-4)
Ejemplo n.º 7
0
def test_xspecvar_no_grouping_no_bg_comparison_xspec(make_data_path,
                                                     l, h, ndp, ndof, statval):
    """Compare chi2xspecvar values for a data set to XSPEC.

    The data set has no background.

    See test_cstat_comparison_xspec. Note that at present
    Sherpa and XSPEC treat bins with 0 values in them differently:
    see https://github.com/sherpa/sherpa/issues/356
    so for this test all bins are forced to have at least one
    count in them (source -> 5 is added per channel,background ->
    3 is added per channel).

    The XSPEC version used was 12.9.0o.
    """

    dset = create_xspec_comparison_dataset(make_data_path,
                                           keep_background=False)

    # Lazy, so add it to "bad" channels too
    dset.counts += 5

    ui.clean()
    ui.set_data(dset)

    ui.set_source(ui.powlaw1d.pl)
    ui.set_par('pl.ampl', 5e-4)

    ui.set_stat('chi2xspecvar')
    ui.set_analysis('energy')

    validate_xspec_result(l, h, ndp, ndof, statval)
    ui.clean()
Ejemplo n.º 8
0
def test_xspecvar_no_grouping_comparison_xspec(make_data_path,
                                               l, h, ndp, ndof, statval):
    """Compare chi2xspecvar values for a data set to XSPEC.

    The data set has a background. See
    test_xspecvar_no_grouping_no_bg_comparison_xspec

    The XSPEC version used was 12.9.0o.
    """

    dset = create_xspec_comparison_dataset(make_data_path,
                                           keep_background=True)

    # Lazy, so add it to "bad" channels too
    dset.counts += 5
    dset.get_background().counts += 3

    ui.clean()
    ui.set_data(dset)
    ui.subtract()

    ui.set_source(ui.powlaw1d.pl)
    ui.set_par('pl.ampl', 5e-4)

    ui.set_stat('chi2xspecvar')
    ui.set_analysis('energy')

    validate_xspec_result(l, h, ndp, ndof, statval)
    ui.clean()
Ejemplo n.º 9
0
def test_missmatch_arf(make_data_path):
    ui.load_pha(1, make_data_path("source1.pi"))
    ui.load_bkg(1, make_data_path("back1.pi"))
    ui.load_arf(1, make_data_path("arf_1024.fits"))
    ui.load_rmf(1, make_data_path("rmf_1024.fits"))
    ui.set_method('levmar')
    ui.set_model(ui.powlaw1d.p1 * ui.xswabs.abs1)
    ui.set_par('p1.ampl', 0.0001)
    ui.set_stat('cash')
    ui.fit()
    parvals = ui.get_fit_results().parvals
    assert parvals[0] == approx(1.47969, rel=1.0e-3)
    assert parvals[1] == approx(0.0019491, rel=1.0e-3)
    assert parvals[2] == approx(2.35452, rel=1.0e-3)
Ejemplo n.º 10
0
def single_scalar_setup(make_data_path):

    ui.set_stat('wstat')

    infile = make_data_path('3c273.pi')
    ui.load_pha(1, infile)

    ui.set_source(1, ui.powlaw1d.pl)

    # The powerlaw slope and normalization are
    # intended to be "a reasonable approximation"
    # to the data, just to make sure that any statistic
    # calculation doesn't blow-up too much.
    #
    ui.set_par("pl.gamma", 1.782)
    ui.set_par("pl.ampl", 1.622e-4)
Ejemplo n.º 11
0
def set_model_powerlaw(ds, nhgal, z, fixgamma):
    ds.set_source("xsphabs.absgal * xszphabs.abs1 * xszpowerlw.po1")

    shp.set_par("absgal.nH", val=nhgal, frozen=True)
    shp.set_par("abs1.redshift", val=z, frozen=True)
    shp.set_par("po1.redshift", val=z, frozen=True)

    if fixgamma:
        shp.set_par("po1.PhoIndex", val=fixgamma, frozen=True)
Ejemplo n.º 12
0
def test_can_use_pspc_data(make_data_path):
    """A basic check that we can read in and use the ROSAT PSPC data.

    Unlike the previous tests, that directly access the io module,
    this uses the ui interface.
    """

    if not six.PY2 and (backend == "crates"):
        pytest.skip('Python3 and Crates: known to fail')

    # The PSPC PHA file does not have the ANCRFILE/RESPFILE keywords
    # set up, so the responses has to be manually added.
    #
    ui.load_pha(make_data_path(PHAFILE), use_errors=True)
    assert ui.get_analysis() == 'channel'

    ui.load_rmf(make_data_path(RMFFILE))
    assert ui.get_analysis() == 'energy'

    ui.set_source(ui.powlaw1d.pl)
    ui.set_par('pl.gamma', 1.7)
    ui.set_par('pl.ampl', 2e-6)

    s = ui.get_stat_info()[0]
    assert s.numpoints == 63
    assert s.dof == 61

    # Value obtained from XSPEC 12.9.1p; Sherpa returns
    # sexpected = 973.2270845920297
    sexpected = 973.23
    assert_allclose(s.statval, sexpected, rtol=0, atol=0.005)

    # apply an energy filter to remove the "bogus" points
    ui.ignore(None, 0.05)

    s = ui.get_stat_info()[0]
    assert s.numpoints == 62
    assert s.dof == 60
    assert_allclose(s.statval, sexpected, rtol=0, atol=0.005)

    ui.ignore(2.01, None)

    s = ui.get_stat_info()[0]
    assert s.numpoints == 7
    assert s.dof == 5

    assert_allclose(s.statval, sexpected, rtol=0, atol=0.005)
Ejemplo n.º 13
0
def test_can_use_pspc_data(make_data_path):
    """A basic check that we can read in and use the ROSAT PSPC data.

    Unlike the previous tests, that directly access the io module,
    this uses the ui interface.
    """

    # The PSPC PHA file does not have the ANCRFILE/RESPFILE keywords
    # set up, so the responses has to be manually added.
    #
    ui.load_pha(make_data_path(PHAFILE), use_errors=True)
    assert ui.get_analysis() == 'channel'

    ui.load_rmf(make_data_path(RMFFILE))
    assert ui.get_analysis() == 'energy'

    ui.set_source(ui.powlaw1d.pl)
    ui.set_par('pl.gamma', 1.7)
    ui.set_par('pl.ampl', 2e-6)

    s = ui.get_stat_info()[0]
    assert s.numpoints == 63
    assert s.dof == 61

    # Value obtained from XSPEC 12.9.1p; Sherpa returns
    # sexpected = 973.2270845920297
    sexpected = 973.23
    assert_allclose(s.statval, sexpected, rtol=0, atol=0.005)

    # apply an energy filter to remove the "bogus" points
    ui.ignore(None, 0.05)

    s = ui.get_stat_info()[0]
    assert s.numpoints == 62
    assert s.dof == 60
    assert_allclose(s.statval, sexpected, rtol=0, atol=0.005)

    ui.ignore(2.01, None)

    s = ui.get_stat_info()[0]
    assert s.numpoints == 7
    assert s.dof == 5

    assert_allclose(s.statval, sexpected, rtol=0, atol=0.005)
Ejemplo n.º 14
0
def test_eqwith_err(make_data_path, restore_xspec_settings):

    def check(a0, a1, a2):
        assert a0 == approx(0.16443033244310976, rel=1e-3)
        assert a1 == approx(0.09205564216156815, rel=1e-3)
        assert a2 == approx(0.23933118287470895, rel=1e-3)

    ui.set_method('neldermead')
    ui.set_stat('cstat')
    ui.set_xsabund('angr')
    ui.set_xsxsect('bcmc')

    ui.load_data(make_data_path('12845.pi'))
    ui.notice(0.5, 7)

    ui.set_model("xsphabs.gal*xszphabs.zabs*(powlaw1d.p1+xszgauss.g1)")
    ui.set_par(gal.nh, 0.08)
    ui.freeze(gal)

    ui.set_par(zabs.redshift, 0.518)
    ui.set_par(g1.redshift, 0.518)
    ui.set_par(g1.Sigma, 0.01)
    ui.freeze(g1.Sigma)
    ui.set_par(g1.LineE, min=6.0, max=7.0)

    ui.fit()

    numpy.random.seed(12345)
    result = ui.eqwidth(p1, p1 + g1, error=True, niter=100)
    check(result[0], result[1], result[2])
    params = result[3]

    numpy.random.seed(12345)
    result = ui.eqwidth(p1, p1 + g1, error=True, params=params, niter=100)
    check(result[0], result[1], result[2])

    parvals = ui.get_fit_results().parvals
    assert parvals[0] == approx(0.6111340686157877, rel=1.0e-3)
    assert parvals[1] == approx(1.6409785803466297, rel=1.0e-3)
    assert parvals[2] == approx(8.960926761312153e-05, rel=1.0e-3)
    assert parvals[3] == approx(6.620017726014523, rel=1.0e-3)
    assert parvals[4] == approx(1.9279114810359657e-06, rel=1.0e-3)
Ejemplo n.º 15
0
def test_eqwith_err(make_data_path, restore_xspec_settings):

    def check(a0, a1, a2):
        assert a0 == pytest.approx(0.16443033244310976, rel=1e-3)
        assert a1 == pytest.approx(0.09205564216156815, rel=1e-3)
        assert a2 == pytest.approx(0.23933118287470895, rel=1e-3)

    ui.set_method('neldermead')
    ui.set_stat('cstat')
    ui.set_xsabund('angr')
    ui.set_xsxsect('bcmc')

    ui.load_data(make_data_path('12845.pi'))
    ui.notice(0.5, 7)

    ui.set_model("xsphabs.gal*xszphabs.zabs*(powlaw1d.p1+xszgauss.g1)")
    ui.set_par(gal.nh, 0.08)
    ui.freeze(gal)

    ui.set_par(zabs.redshift, 0.518)
    ui.set_par(g1.redshift, 0.518)
    ui.set_par(g1.Sigma, 0.01)
    ui.freeze(g1.Sigma)
    ui.set_par(g1.LineE, min=6.0, max=7.0)

    ui.fit()

    np.random.seed(12345)
    result = ui.eqwidth(p1, p1 + g1, error=True, niter=100)
    check(result[0], result[1], result[2])
    params = result[3]

    np.random.seed(12345)
    result = ui.eqwidth(p1, p1 + g1, error=True, params=params, niter=100)
    check(result[0], result[1], result[2])

    parvals = ui.get_fit_results().parvals
    assert parvals[0] == pytest.approx(0.6111340686157877, rel=1.0e-3)
    assert parvals[1] == pytest.approx(1.6409785803466297, rel=1.0e-3)
    assert parvals[2] == pytest.approx(8.960926761312153e-05, rel=1.0e-3)
    assert parvals[3] == pytest.approx(6.620017726014523, rel=1.0e-3)
    assert parvals[4] == pytest.approx(1.9279114810359657e-06, rel=1.0e-3)
Ejemplo n.º 16
0
def test_wstat_comparison_xspec(make_data_path, l, h, ndp, ndof, statval):
    """Compare WSTAT values for a data set to XSPEC.

    See test_cstat_comparison_xspec.

    The XSPEC version used was 12.9.0o.
    """

    dset = create_xspec_comparison_dataset(make_data_path,
                                           keep_background=True)

    ui.clean()
    ui.set_data(dset)
    ui.set_source(ui.powlaw1d.pl)
    ui.set_par('pl.ampl', 1e-4)

    ui.set_stat('wstat')
    ui.set_analysis('channel')

    validate_xspec_result(l, h, ndp, ndof, statval)
    ui.clean()
Ejemplo n.º 17
0
def test_wstat_comparison_xspec(make_data_path, l, h, ndp, ndof, statval):
    """Compare WSTAT values for a data set to XSPEC.

    See test_cstat_comparison_xspec.

    The XSPEC version used was 12.9.0o.
    """

    dset = create_xspec_comparison_dataset(make_data_path,
                                           keep_background=True)

    ui.clean()
    ui.set_data(dset)
    ui.set_source(ui.powlaw1d.pl)
    ui.set_par('pl.ampl', 1e-4)

    ui.set_stat('wstat')
    ui.set_analysis('channel')

    validate_xspec_result(l, h, ndp, ndof, statval)
    ui.clean()
Ejemplo n.º 18
0
    def setUp(self):

        # defensive programming (one of the tests has been seen to fail
        # when the whole test suite is run without this)
        ui.clean()

        self._old_logger_level = logger.getEffectiveLevel()
        logger.setLevel(logging.ERROR)

        ui.set_stat('wstat')

        infile = self.make_path('3c273.pi')
        ui.load_pha(1, infile)

        ui.set_source(1, ui.powlaw1d.pl)

        # The powerlaw slope and normalization are
        # intended to be "a reasonable approximation"
        # to the data, just to make sure that any statistic
        # calculation doesn't blow-up too much.
        #
        ui.set_par("pl.gamma", 1.782)
        ui.set_par("pl.ampl", 1.622e-4)
Ejemplo n.º 19
0
def single_array_setup(make_data_path):

    ui.set_stat('wstat')

    infile = make_data_path('3c273.pi')
    ui.load_pha(1, infile)

    # Change the backscale value slightly so that the
    # results are different to other runs with this file.
    #
    nbins = ui.get_data(1).get_dep(False).size
    bscal = 0.9 * np.ones(nbins) * ui.get_backscal(1)
    ui.set_backscal(1, backscale=bscal)

    ui.set_source(1, ui.powlaw1d.pl)

    # The powerlaw slope and normalization are
    # intended to be "a reasonable approximation"
    # to the data, just to make sure that any statistic
    # calculation doesn't blow-up too much.
    #
    ui.set_par("pl.gamma", 1.7)
    ui.set_par("pl.ampl", 1.7e-4)
Ejemplo n.º 20
0
    def setUp(self):

        # defensive programming (one of the tests has been seen to fail
        # when the whole test suite is run without this)
        ui.clean()

        self._old_logger_level = logger.getEffectiveLevel()
        logger.setLevel(logging.ERROR)

        ui.set_stat('wstat')

        infile = self.make_path('3c273.pi')
        ui.load_pha(1, infile)

        ui.set_source(1, ui.powlaw1d.pl)

        # The powerlaw slope and normalization are
        # intended to be "a reasonable approximation"
        # to the data, just to make sure that any statistic
        # calculation doesn't blow-up too much.
        #
        ui.set_par("pl.gamma", 1.782)
        ui.set_par("pl.ampl", 1.622e-4)
Ejemplo n.º 21
0
def _nH_uplimit(fixgamma, sigma_level=0.955):
    # Fix nH to 1e19 cm-2 and find the best fit
    shp.set_par("abs1.nH", val=0.001, frozen=True)
    properFit()
    dsmod.freeze("po1.PhoIndex")

    fitresult = properFit()
    chi2_min = fitresult.statval

    # Estimate the probability distribution of NH using chi2
    NHtest = np.logspace(-3, 3, num=120)
    PNHtest = np.full(len(NHtest), np.nan)
    cumNHtest = np.full(len(NHtest), np.nan)

    for i, nh in enumerate(NHtest):
        shp.set_par("abs1.nH", val=nh)
        fitresult = properFit()

        Dchi2 = fitresult.statval - chi2_min
        PNHtest[i] = np.exp(-Dchi2 / 2)
        cumNHtest[i] = np.trapz(PNHtest[:i + 1], NHtest[:i + 1])

    # Normalize
    Pnorm = 1 / np.trapz(PNHtest, NHtest)
    C = Pnorm * cumNHtest
    nH = 10**np.interp(np.log10(sigma_level),
                       np.log10(C),
                       np.log10(NHtest),
                       left=0,
                       right=1)

    #    fig = plt.figure()
    #    ax = fig.add_subplot(111)
    #    ax.loglog(NHtest, P, linewidth=0, marker='o')
    #    ax.loglog(NHtest, C, linewidth=0, marker='o')
    #    ax.axvline(nH)
    #    plt.show()

    # Restore original fit (NH=0)
    if not fixgamma:
        dsmod.thaw("po1.PhoIndex")

    shp.set_par("abs1.nH", val=0)
    properFit()

    return nH
Ejemplo n.º 22
0
def group_setup(make_data_path):

    ui.set_stat('wstat')

    infile = make_data_path('9774.pi')
    ui.load_pha(1, infile)

    ui.group_counts(1, 20)

    # Unlike the test_wstat_two_scalar case, the grouping
    # is not copied over.
    # ui.set_grouping(1, bkg_id=1, val=ui.get_grouping(1))

    ui.set_source(1, ui.const1d.c1 * ui.powlaw1d.pl1)

    # These should be the same as test_wstat_two_scalar
    #
    ui.set_par("pl1.gamma", 1.7)
    ui.set_par("pl1.ampl", 1.6e-4)
    ui.set_par("c1.c0", 45)
Ejemplo n.º 23
0
    def setUp(self):

        self._old_logger_level = logger.getEffectiveLevel()
        logger.setLevel(logging.ERROR)

        ui.set_stat('wstat')

        infile1 = self.make_path('3c273.pi')
        infile2 = self.make_path('9774.pi')
        ui.load_pha(1, infile1)
        ui.load_pha(2, infile2)

        # Since 9774.pi isn't grouped, group it. Note that this
        # call groups the background to 20 counts per bin. In this
        # case we do not want that; instead we want to use the same
        # grouping scheme as the source file.
        #
        # Note: this is related to issue 227
        #
        ui.group_counts(2, 20)
        ui.set_grouping(2, bkg_id=1, val=ui.get_grouping(2))

        # There's no need to have the same model in both datasets,
        # but assume the same source model can be used, with a
        # normalization difference.
        #
        ui.set_source(1, ui.powlaw1d.pl1)
        ui.set_source(2, ui.const1d.c2 * ui.get_source(1))

        # The powerlaw slope and normalization are
        # intended to be "a reasonable approximation"
        # to the data, just to make sure that any statistic
        # calculation doesn't blow-up too much.
        #
        # Note: the model values for 3c273 are slighly different
        #       to the single-PHA-file case, so stat results are
        #       slightly different
        #
        ui.set_par("pl1.gamma", 1.7)
        ui.set_par("pl1.ampl", 1.6e-4)
        ui.set_par("c2.c0", 45)
Ejemplo n.º 24
0
    def setUp(self):

        self._old_logger_level = logger.getEffectiveLevel()
        logger.setLevel(logging.ERROR)

        ui.set_stat('wstat')

        infile1 = self.make_path('3c273.pi')
        infile2 = self.make_path('9774.pi')
        ui.load_pha(1, infile1)
        ui.load_pha(2, infile2)

        # Since 9774.pi isn't grouped, group it. Note that this
        # call groups the background to 20 counts per bin. In this
        # case we do not want that; instead we want to use the same
        # grouping scheme as the source file.
        #
        # Note: this is related to issue 227
        #
        ui.group_counts(2, 20)
        ui.set_grouping(2, bkg_id=1, val=ui.get_grouping(2))

        # There's no need to have the same model in both datasets,
        # but assume the same source model can be used, with a
        # normalization difference.
        #
        ui.set_source(1, ui.powlaw1d.pl1)
        ui.set_source(2, ui.const1d.c2 * ui.get_source(1))

        # The powerlaw slope and normalization are
        # intended to be "a reasonable approximation"
        # to the data, just to make sure that any statistic
        # calculation doesn't blow-up too much.
        #
        # Note: the model values for 3c273 are slighly different
        #       to the single-PHA-file case, so stat results are
        #       slightly different
        #
        ui.set_par("pl1.gamma", 1.7)
        ui.set_par("pl1.ampl", 1.6e-4)
        ui.set_par("c2.c0", 45)
Ejemplo n.º 25
0
    def setUp(self):

        self._old_logger_level = logger.getEffectiveLevel()
        logger.setLevel(logging.ERROR)

        ui.set_stat('wstat')

        infile = self.make_path('9774.pi')
        ui.load_pha(1, infile)

        ui.group_counts(1, 20)

        # Unlike the test_wstat_two_scalar case, the grouping
        # is not copied over.
        # ui.set_grouping(1, bkg_id=1, val=ui.get_grouping(1))

        ui.set_source(1, ui.const1d.c1 * ui.powlaw1d.pl1)

        # These should be the same as test_wstat_two_scalar
        #
        ui.set_par("pl1.gamma", 1.7)
        ui.set_par("pl1.ampl", 1.6e-4)
        ui.set_par("c1.c0", 45)
Ejemplo n.º 26
0
    def setUp(self):

        self._old_logger_level = logger.getEffectiveLevel()
        logger.setLevel(logging.ERROR)

        ui.set_stat('wstat')

        infile = self.make_path('9774.pi')
        ui.load_pha(1, infile)

        ui.group_counts(1, 20)

        # Unlike the test_wstat_two_scalar case, the grouping
        # is not copied over.
        # ui.set_grouping(1, bkg_id=1, val=ui.get_grouping(1))

        ui.set_source(1, ui.const1d.c1 * ui.powlaw1d.pl1)

        # These should be the same as test_wstat_two_scalar
        #
        ui.set_par("pl1.gamma", 1.7)
        ui.set_par("pl1.ampl", 1.6e-4)
        ui.set_par("c1.c0", 45)
Ejemplo n.º 27
0
def _set(name, par, val):
    """Set a source parameter."""
    import sherpa.astro.ui as sau
    sau.set_par('{name}.{par}'.format(**locals()), val)
Ejemplo n.º 28
0
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Compute results with Sherpa"""
from __future__ import print_function, division
import numpy as np
import sherpa.astro.ui as sau

sau.load_data("counts.fits.gz")
sau.set_source("normgauss2d.source + const2d.background")
sau.set_stat("cstat")
# Ask for high-precision results
sau.set_method_opt("ftol", 1e-20)
sau.set_covar_opt("eps", 1e-20)

# Set start parameters close to simulation values to make the fit converge
sau.set_par("source.xpos", 101)
sau.set_par("source.ypos", 101)
sau.set_par("source.ampl", 1.1e3)
sau.set_par("source.fwhm", 10)
sau.set_par("background.c0", 1.1)

# Run fit and covariance estimation
# Results are automatically printed to the screen
sau.fit()
sau.covar()

# Sherpa uses fwhm instead of sigma as extension parameter ... need to convert
# http://cxc.harvard.edu/sherpa/ahelp/gauss2d.html
fwhm_to_sigma = 1.0 / np.sqrt(8 * np.log(2))
cov = sau.get_covar_results()
sigma = fwhm_to_sigma * cov.parvals[0]
sigma_err = fwhm_to_sigma * cov.parmaxes[0]
Ejemplo n.º 29
0
def test_can_use_swift_data(make_data_path):
    """A basic check that we can read in and use the Swift data.

    Unlike the previous tests, that directly access the io module,
    this uses the ui interface.
    """

    # QUS are there pytest fixtures that ensure the state is
    # clean on entry and exit?
    ui.clean()

    # The Swift PHA file does not have the ANCRFILE/RESPFILE keywords
    # set up, so the responses have to be manually added.
    #
    ui.load_pha(make_data_path(PHAFILE))
    ui.load_rmf(make_data_path(RMFFILE))
    ui.load_arf(make_data_path(ARFFILE))

    assert ui.get_analysis() == 'energy'

    ui.set_source(ui.powlaw1d.pl)
    ui.set_par('pl.ampl', 0.0003)

    # The responses have the first bin start at an energy of 0,
    # which causes issues for Sherpa. There should be a
    # RuntimeWarning due to a divide by zero.
    #
    with pytest.warns(RuntimeWarning) as record:
        stat = ui.calc_stat()

    # The exact form of the message depends on the Python version;
    # this could be checked, but it feels excessive for this
    # particular test, which is just a regression check, so use a
    # more lax approach.
    #
    assert len(record) == 1
    assert record[0].message.args[0] in \
        ['divide by zero encountered in divide',
         'divide by zero encountered in true_divide']

    # The stat value depends on what power-law model is used. With
    # xspowerlaw it is NaN, but with powlaw1d it is finite.
    #
    # This check is purely a regression test, so the value has
    # not been externally validated.
    #
    # assert np.isnan(stat)
    assert_allclose(stat, 58.2813692358182)

    # Manually adjust the first bin to avoid this problem.
    # Add in asserts just in case this gets "fixed" in the
    # I/O layer (as XSPEC does).
    #
    arf = ui.get_arf()
    rmf = ui.get_rmf()
    assert arf.energ_lo[0] == 0.0
    assert rmf.energ_lo[0] == 0.0
    assert rmf.e_min[0] == 0.0

    # The bin widths are ~ 0.005 or ~ 0.01 keV, so pick a value
    # smaller than this.
    #
    ethresh = 1e-6
    arf.energ_lo[0] = ethresh
    rmf.energ_lo[0] = ethresh
    rmf.e_min[0] = ethresh

    # Pick an energy range which isn't affected by the first
    # bin.
    #
    # Unfortunately, using a range of 0.3-8.0 gives 771 bins
    # in XSPEC - channels 30 to 800 - but 772 bins in Sherpa.
    # If I use ignore(None, 0.3); ignore(8.0, None) instead
    # then the result is 771 bins. This is because the e_min/max
    # of the RMF has channel widths of 0.01 keV, starting at 0,
    # so both 0.3 and 8.0 fall on a bin boundary. So, it's either
    # a difference in < or <= (or > vs >=), or a rounding issue
    # due to floating-point conversion leading to one bin boundary
    # being slightly different in Sherpa vs XSPEC).
    #
    # When using ui.notice(0.3, 8.0); ui.get_indep(filter=True)
    # returns 772 channels, 30 to 801.
    #
    # Using ui.notice(0.3, 7.995) selects channels 30 to 800. So
    # this range is used. Alternatively, channel 801 could have been
    # excluded explicitly.
    #
    # ui.notice(0.3, 8.0)
    ui.notice(0.3, 7.995)

    # XSPEC 12.9.1b calculation of the statistic:
    #   chi sq = 203.88 from 771 bins with 769 dof
    #   cstat  = 568.52
    #
    # There are known differences between XSPEC and Sherpa
    # with chi2xspecvar. This only affects data sets where
    # there is background subtraction, which is not the case
    # here. See https://github.com/sherpa/sherpa/issues/356
    #
    ui.set_stat('chi2xspecvar')
    stat_xvar = ui.get_stat_info()

    assert len(stat_xvar) == 1
    stat_xvar = stat_xvar[0]
    assert stat_xvar.numpoints == 771
    assert stat_xvar.dof == 769
    assert_allclose(stat_xvar.statval, 203.88,
                    rtol=0, atol=0.005)

    ui.set_stat('cstat')
    stat_cstat = ui.get_stat_info()

    assert len(stat_cstat) == 1
    stat_cstat = stat_cstat[0]
    assert stat_cstat.numpoints == 771
    assert stat_cstat.dof == 769
    assert_allclose(stat_cstat.statval, 568.52,
                    rtol=0, atol=0.005)

    ui.clean()
Ejemplo n.º 30
0
def _calc_fluxes(ids, z=0, dataScale=None, nsims=10):
    flux = np.full((len(ids), 3), np.nan)
    rf_flux = np.full((len(ids), 3), np.nan)
    rf_int_flux = np.full((len(ids), 3), np.nan)
    fx = np.full((3, 3), np.nan)

    if dataScale is None:
        # Estimate average flux of all detectors, with no errors
        for i, sid in enumerate(ids):
            # Observed flux at 0.2-12 keV obs. frame (no abs. corr.)
            flux[i, 0] = shp.calc_energy_flux(id=sid, lo=0.2, hi=12.0)

            # Observed flux at 2-10 keV rest frame (only Gal. abs. corr.)
            shp.set_par("absgal.nH", val=0)
            rf_flux[i, 0] = shp.calc_energy_flux(id=sid,
                                                 lo=2.0 / (1 + z),
                                                 hi=10.0 / (1 + z))

            # Observed flux at 2-10 keV rest frame (abs. corr.)
            shp.set_par("abs1.nH", val=0)
            rf_int_flux[i, 0] = shp.calc_energy_flux(id=sid,
                                                     lo=2.0 / (1 + z),
                                                     hi=10.0 / (1 + z))
        fx[0, 0] = np.mean(flux[:, 0])
        fx[1, 0] = np.mean(rf_flux[:, 0])
        fx[2, 0] = np.mean(rf_int_flux[:, 0])

    else:
        # Estimate average flux of all detectors sampling num times the
        # distribution of parameters assuming a multigaussian
        int_component = po1
        obs_component = abs1 * po1

        for i, sid in enumerate(ids):
            # Observed flux at 0.2-12 keV obs. frame (no abs. corr.)
            F = shp.sample_flux(lo=0.2,
                                hi=12,
                                id=sid,
                                num=nsims,
                                scales=dataScale,
                                Xrays=True)
            flux[i, :] = F[0]

            # Observed flux at 2-10 keV rest frame (only Gal. abs. corr.)
            F = shp.sample_flux(
                obs_component,
                lo=2 / (1 + z),
                hi=10 / (1 + z),
                id=sid,
                num=nsims,
                scales=dataScale,
                Xrays=True,
            )
            rf_flux[i, :] = F[1]

            # Observed flux at 2-10 keV rest frame (abs. corr.)
            F = shp.sample_flux(
                int_component,
                lo=2 / (1 + z),
                hi=10 / (1 + z),
                id=sid,
                num=nsims,
                scales=dataScale,
                Xrays=True,
            )
            rf_int_flux[i, :] = F[1]

        fx[0, 0] = np.average(flux[:, 0], weights=flux[:, 1]**-1)
        fx[1, 0] = np.average(rf_flux[:, 0], weights=rf_flux[:, 1]**-1)
        fx[2, 0] = np.average(rf_int_flux[:, 0], weights=rf_int_flux[:, 1]**-1)

        k = np.sqrt(len(ids))
        fx[0, 1:] = k / np.sum(flux[:, 2]**-1), k / np.sum(flux[:, 1]**-1)
        fx[1,
           1:] = k / np.sum(rf_flux[:, 2]**-1), k / np.sum(rf_flux[:, 1]**-1)
        fx[2, 1:] = (
            k / np.sum(rf_int_flux[:, 2]**-1),
            k / np.sum(rf_int_flux[:, 1]**-1),
        )

    return fx
Ejemplo n.º 31
0
def _set_par_vals(parnames, parvals):
    "Sets the paramaters to the given values"

    for (parname,parval) in zip(parnames,parvals):
        ui.set_par(parname, parval)
Ejemplo n.º 32
0
def test_can_use_swift_data(make_data_path):
    """A basic check that we can read in and use the Swift data.

    Unlike the previous tests, that directly access the io module,
    this uses the ui interface.
    """

    # QUS are there pytest fixtures that ensure the state is
    # clean on entry and exit?
    ui.clean()

    # The Swift PHA file does not have the ANCRFILE/RESPFILE keywords
    # set up, so the responses have to be manually added.
    #
    ui.load_pha(make_data_path(PHAFILE))

    rmffile = make_data_path(RMFFILE)
    with warnings.catch_warnings(record=True) as ws:
        warnings.simplefilter("always")
        ui.load_rmf(rmffile)

    validate_replacement_warning(ws, 'RMF', rmffile)

    arffile = make_data_path(ARFFILE)
    with warnings.catch_warnings(record=True) as ws:
        warnings.simplefilter("always")
        ui.load_arf(arffile)

    validate_replacement_warning(ws, 'ARF', arffile)

    assert ui.get_analysis() == 'energy'

    arf = ui.get_arf()
    rmf = ui.get_rmf()
    assert arf.energ_lo[0] == EMIN
    assert rmf.energ_lo[0] == EMIN
    assert rmf.e_min[0] == 0.0

    ui.set_source(ui.powlaw1d.pl)
    ui.set_par('pl.ampl', 0.0003)

    stat = ui.calc_stat()

    # This check is purely a regression test, so the value has
    # not been externally validated.
    #
    assert_allclose(stat, 58.2813692358182)

    # Pick an energy range which isn't affected by the first
    # bin.
    #
    # Unfortunately, using a range of 0.3-8.0 gives 771 bins
    # in XSPEC - channels 30 to 800 - but 772 bins in Sherpa.
    # If I use ignore(None, 0.3); ignore(8.0, None) instead
    # then the result is 771 bins. This is because the e_min/max
    # of the RMF has channel widths of 0.01 keV, starting at 0,
    # so both 0.3 and 8.0 fall on a bin boundary. So, it's either
    # a difference in < or <= (or > vs >=), or a rounding issue
    # due to floating-point conversion leading to one bin boundary
    # being slightly different in Sherpa vs XSPEC).
    #
    # When using ui.notice(0.3, 8.0); ui.get_indep(filter=True)
    # returns 772 channels, 30 to 801.
    #
    # Using ui.notice(0.3, 7.995) selects channels 30 to 800. So
    # this range is used. Alternatively, channel 801 could have been
    # excluded explicitly.
    #
    # ui.notice(0.3, 8.0)
    ui.notice(0.3, 7.995)

    # XSPEC 12.9.1b calculation of the statistic:
    #   chi sq = 203.88 from 771 bins with 769 dof
    #   cstat  = 568.52
    #
    # There are known differences between XSPEC and Sherpa
    # with chi2xspecvar. This only affects data sets where
    # there is background subtraction, which is not the case
    # here. See https://github.com/sherpa/sherpa/issues/356
    #
    ui.set_stat('chi2xspecvar')
    stat_xvar = ui.get_stat_info()

    assert len(stat_xvar) == 1
    stat_xvar = stat_xvar[0]
    assert stat_xvar.numpoints == 771
    assert stat_xvar.dof == 769
    assert_allclose(stat_xvar.statval, 203.88,
                    rtol=0, atol=0.005)

    ui.set_stat('cstat')
    stat_cstat = ui.get_stat_info()

    assert len(stat_cstat) == 1
    stat_cstat = stat_cstat[0]
    assert stat_cstat.numpoints == 771
    assert stat_cstat.dof == 769
    assert_allclose(stat_cstat.statval, 568.52,
                    rtol=0, atol=0.005)

    ui.clean()
Ejemplo n.º 33
0
import sherpa.astro.ui as sau 

# Define width of the source and the PSF
sigma_psf, sigma_source = 3, 4
# for relation of sigma and fwhm see
# http://cxc.harvard.edu/sherpa/ahelp/gauss2d.html
sigma_to_fwhm = np.sqrt(8 * np.log(2))  # ~ 2.35
sigma = np.sqrt(sigma_psf ** 2 + sigma_source ** 2)
fwhm = sigma_to_fwhm * sigma

# Seed the random number generator to make the output reproducible
np.random.seed(0)

sau.dataspace2d((200, 200))
sau.set_source('normgauss2d.source + const2d.background')
sau.set_par('source.xpos', 100)
sau.set_par('source.ypos', 100)
sau.set_par('source.ampl', 1e3)
sau.set_par('source.fwhm', fwhm)
sau.set_par('background.c0', 1)

sau.fake()
sau.save_model('model.fits.gz', clobber=True)
sau.save_data('counts.fits.gz', clobber=True)

sau.set_source('source')
sau.save_model('source.fits.gz', clobber=True)

sau.set_source('background')
sau.save_model('background.fits.gz', clobber=True)
Ejemplo n.º 34
0
def _set(name, par, val):
    """Set a source parameter."""
    import sherpa.astro.ui as sau
    sau.set_par('{name}.{par}'.format(**locals()), val)
Ejemplo n.º 35
0
for i, src_model, rsp, bkg_model, bkg_scale in vals_iter:
    ds[i].set_full_model(rsp(src_model) + bkg_scale * bkg_model)
    ds[i].set_bkg_full_model(bkg_model)

## Fit background
ds.notice(0.5, 8.0)
ui.set_method("neldermead")
ui.set_stat("cash")

ui.thaw(c1.c0)
ui.thaw(c2.c0)
ui.fit_bkg()
ui.freeze(c1.c0)
ui.freeze(c2.c0)

ui.set_par(abs1.nh, 0.0398)
ui.freeze(abs1)
ui.fit()
ds.plot_fit()
# ds.plot_bkg_fit()

# FIT OUTPUT:
#
# Datasets              = 1, 2
# Method                = neldermead
# Statistic             = cash
# Initial fit statistic = -11105.6
# Final fit statistic   = -11105.6 at function evaluation 64
# Data points           = 1028
# Degrees of freedom    = 1026
# Change in statistic   = 0
Ejemplo n.º 36
0
"""Compute results with Sherpa"""
from __future__ import print_function, division
# __doctest_skip__
__doctest_skip__ = ['*']
import numpy as np
import sherpa.astro.ui as sau

sau.load_data('counts.fits.gz')
sau.set_source('normgauss2d.source + const2d.background')
sau.set_stat('cstat')
# Ask for high-precision results
sau.set_method_opt('ftol', 1e-20)
sau.set_covar_opt('eps', 1e-20)

# Set start parameters close to simulation values to make the fit converge
sau.set_par('source.xpos', 101)
sau.set_par('source.ypos', 101)
sau.set_par('source.ampl', 1.1e3)
sau.set_par('source.fwhm', 10)
sau.set_par('background.c0', 1.1)

# Run fit and covariance estimation
# Results are automatically printed to the screen
sau.fit()
sau.covar()

# Sherpa uses fwhm instead of sigma as extension parameter ... need to convert
# http://cxc.harvard.edu/sherpa/ahelp/gauss2d.html
fwhm_to_sigma = 1. / np.sqrt(8 * np.log(2))
cov = sau.get_covar_results()
sigma = fwhm_to_sigma * cov.parvals[0]
Ejemplo n.º 37
0
shp.set_method_opt('verbose', 1)

utils.setup('numosaic_fin_combined_40-50keV.img')

utils.load_components('40-50 keV/bg 150-212.5.model')

# CHXE model
shp.set_full_model(psf(shp.gauss2d.chxe * emap) + bg * emap)

# inner radii chosen
for r in [20, 24, 28, 32]:
    # from KP15, (ra, dec) = (266.4172 deg, -29.00716 deg)
    # systematic shifting error = 6" = 2.4 px
    # ellip = .52
    # theta = 57 deg (from positive north)
    shp.set_par(chxe.xpos, 497.4, 497.4 - 4, 497.4 + 4)
    shp.set_par(chxe.ypos, 499.1, 499.1 - 4, 499.1 + 4)
    shp.set_par(chxe.fwhm, 30, 1, 200)
    shp.set_par(chxe.ellip, .5)
    shp.set_par(chxe.theta, -.9948)
    shp.set_par(chxe.ampl, 1e-5)
    shp.thaw(chxe.ellip, chxe.xpos, chxe.ypos)

    print(shp.get_model())

    shp.ignore2d('circle(500,500,1000)')
    shp.notice2d('circle(500,500,60)')
    shp.ignore2d('circle(500,500,' + str(r) + ')')
    shp.fit()

    print(shp.get_model())
Ejemplo n.º 38
0
def test_can_use_swift_data(make_data_path, clean_astro_ui):
    """A basic check that we can read in and use the Swift data.

    Unlike the previous tests, that directly access the io module,
    this uses the ui interface.
    """

    # The Swift PHA file does not have the ANCRFILE/RESPFILE keywords
    # set up, so the responses have to be manually added.
    #
    ui.load_pha(make_data_path(PHAFILE))

    rmffile = make_data_path(RMFFILE)
    with warnings.catch_warnings(record=True) as ws:
        warnings.simplefilter("always")
        ui.load_rmf(rmffile)

    validate_replacement_warning(ws, 'RMF', rmffile)

    arffile = make_data_path(ARFFILE)
    with warnings.catch_warnings(record=True) as ws:
        warnings.simplefilter("always")
        ui.load_arf(arffile)

    validate_replacement_warning(ws, 'ARF', arffile)

    assert ui.get_analysis() == 'energy'

    arf = ui.get_arf()
    rmf = ui.get_rmf()
    assert arf.energ_lo[0] == EMIN
    assert rmf.energ_lo[0] == EMIN
    assert rmf.e_min[0] == 0.0

    ui.set_source(ui.powlaw1d.pl)
    ui.set_par('pl.ampl', 0.0003)

    stat = ui.calc_stat()

    # This check is purely a regression test, so the value has
    # not been externally validated.
    #
    assert_allclose(stat, 58.2813692358182)

    # Pick an energy range which isn't affected by the first
    # bin.
    #
    # Unfortunately, using a range of 0.3-8.0 gives 771 bins
    # in XSPEC - channels 30 to 800 - but 770 bins in Sherpa,
    # channels 31 to 800.
    #
    # Note that the channel numbering starts at 0:
    # % dmlist target_sr.pha header,clean,raw | grep TLMIN
    # TLMIN1       = 0                    / Lowest legal channel number
    #
    # and so it's not clear when XSPEC says 30-800 what it
    # means. From https://github.com/sherpa/sherpa/issues/1211#issuecomment-881647128
    # we have that the first bin it is using is
    #     0.29-0.30
    # and the last bin is
    #     7.99-8.00
    # and I've checked with iplot that it has renumbered the
    # channels to 1-1024 from 0-1023
    #
    # % dmlist swxpc0to12s6_20130101v014.rmf.gz"[ebounds][channel=28:31]" data,clean
    #  CHANNEL    E_MIN                E_MAX
    #         28     0.28000000119209     0.28999999165535
    #         29     0.28999999165535     0.30000001192093
    #         30     0.30000001192093     0.31000000238419
    #         31     0.31000000238419     0.31999999284744
    # % dmlist swxpc0to12s6_20130101v014.rmf.gz"[ebounds][channel=798:801]" data,clean
    #  CHANNEL    E_MIN                E_MAX
    #        798         7.9800000191         7.9899997711
    #        799         7.9899997711                  8.0
    #        800                  8.0         8.0100002289
    #        801         8.0100002289         8.0200004578
    #
    # If I use ignore(None, 0.3); ignore(8.0, None) instead then the
    # result is 771 bins (channels 31 to 800). This is because the
    # e_min/max of the RMF has channel widths of 0.01 keV, starting at
    # 0, so both 0.3 and 8.0 fall on a bin boundary. So, it's either a
    # difference in < or <= (or > vs >=), or a rounding issue due to
    # floating-point conversion leading to one bin boundary being
    # slightly different in Sherpa vs XSPEC).
    #
    # When using ui.notice(0.3, 8.0); ui.get_indep(filter=True)
    # returns 770 channels, 31 to 800.
    #
    # Using ui.notice(0.3, 7.995) selects channels 31 to 800.
    # Using ui.notice(0.299, 8.0) selects channels 30 to 800.
    # Using ui.notice(0.299, 7.995) selects channels 30 to 800.
    #
    ui.notice(0.299, 8.0)

    # Check the selected range
    pha = ui.get_data()
    expected = np.zeros(1024, dtype=bool)
    expected[29:800] = True
    assert pha.mask == pytest.approx(expected)
    assert pha.get_mask() == pytest.approx(expected)

    # XSPEC 12.9.1b calculation of the statistic:
    #   chi sq = 203.88 from 771 bins with 769 dof
    #   cstat  = 568.52
    #
    # There are known differences between XSPEC and Sherpa
    # with chi2xspecvar. This only affects data sets where
    # there is background subtraction, which is not the case
    # here. See https://github.com/sherpa/sherpa/issues/356
    #
    ui.set_stat('chi2xspecvar')
    stat_xvar = ui.get_stat_info()

    assert len(stat_xvar) == 1
    stat_xvar = stat_xvar[0]
    assert stat_xvar.numpoints == 771
    assert stat_xvar.dof == 769
    assert_allclose(stat_xvar.statval, 203.88, rtol=0, atol=0.005)

    ui.set_stat('cstat')
    stat_cstat = ui.get_stat_info()

    assert len(stat_cstat) == 1
    stat_cstat = stat_cstat[0]
    assert stat_cstat.numpoints == 771
    assert stat_cstat.dof == 769
    assert_allclose(stat_cstat.statval, 568.52, rtol=0, atol=0.005)
Ejemplo n.º 39
0
    def simulate_null_images(img_file: str,
                             psf_file: str,
                             n_null_sims: int,
                             no_core: bool = False,
                             mcmciter: int = 5000,
                             **kwargs) -> None:
        """
        Simulates a specified number of baseline images for a given input observation and a psf file

        :param img_file: Path to the input image file
        :param psf_file: Path to the psf image file
        :param n_null_sims: Number of baseline replicates to be simulated
        :param no_core: Setting this to True will only generate baseline replicates with a flat background while the default value includes a point source at the location of the core
        :param mcmciter: The number of MCMC samples to draw for simulating the baselines
        """
        print("Creating the null file")
        clean()
        set_stat("cstat")
        set_method("simplex")
        load_image(img_file)
        load_psf("mypsf", psf_file)
        set_psf(mypsf)

        if no_core:
            set_model(const2d.c0)
            set_par(c0.c0, min=0)
        else:
            set_model(gauss2d.q1 + const2d.c0)
            set_par(c0.c0, min=0)
            # set_par(q1.fwhm,max=0.5)
            guess(q1)
        fit()
        results = get_fit_results()
        save("core_source_fit.save", clobber=True)
        save_source("null_q1_c1.fits", clobber=True)
        covar()

        if no_core:
            for i in range(n_null_sims):
                fake()
                save_image("sim_null_{}.fits".format(i), clobber=True)
            clean()
            return

        normgauss1d.g1
        g1.pos = q1.fwhm
        g1.fwhm = get_covar_results().parmaxes[0]

        # check if there is a valid upper bound.
        print(get_covar_results())
        if (get_covar_results().parmaxes[0] is None
                or get_covar_results().parmins[1] is None
                or get_covar_results().parmins[0] is None):
            for i in range(n_null_sims):
                fake()
                save_image("sim_null_{}.fits".format(i), clobber=True)
            clean()
            return
        # if not go for the regular
        set_prior(q1.fwhm, g1)
        set_sampler_opt("defaultprior", False)
        set_sampler_opt("priorshape", [True, False, False, False, False])
        set_sampler_opt("originalscale", [True, True, True, True, True])
        if mcmciter < n_null_sims * 100:
            mcmciter = n_null_sims * 100

        # the following code throws an error sometimes #bug
        try:
            stats, accept, params = get_draws(1, niter=mcmciter)
        except:
            params = [np.repeat(q1.fwhm.val, mcmciter)]

        # print('Simulating the null files')
        for i in range(n_null_sims):
            set_par(q1.fwhm, params[0][(i + 1) * 100 - 1])
            fake()
            save_image("sim_null_{}.fits".format(i), clobber=True)
        save_all(outfile="lira_input_baseline_sim.log", clobber=True)
        clean()
Ejemplo n.º 40
0
def test_can_use_swift_data(make_data_path, is_known_warning):
    """A basic check that we can read in and use the Swift data.

    Unlike the previous tests, that directly access the io module,
    this uses the ui interface.
    """

    # QUS are there pytest fixtures that ensure the state is
    # clean on entry and exit?
    ui.clean()

    # The Swift PHA file does not have the ANCRFILE/RESPFILE keywords
    # set up, so the responses have to be manually added.
    #
    ui.load_pha(make_data_path(PHAFILE))

    rmffile = make_data_path(RMFFILE)
    with warnings.catch_warnings(record=True) as ws:
        warnings.simplefilter("always")
        ui.load_rmf(rmffile)

    validate_replacement_warning(ws, 'RMF', rmffile, is_known_warning)

    arffile = make_data_path(ARFFILE)
    with warnings.catch_warnings(record=True) as ws:
        warnings.simplefilter("always")
        ui.load_arf(arffile)

    validate_replacement_warning(ws, 'ARF', arffile, is_known_warning)

    assert ui.get_analysis() == 'energy'

    arf = ui.get_arf()
    rmf = ui.get_rmf()
    assert arf.energ_lo[0] == EMIN
    assert rmf.energ_lo[0] == EMIN
    assert rmf.e_min[0] == 0.0

    ui.set_source(ui.powlaw1d.pl)
    ui.set_par('pl.ampl', 0.0003)

    stat = ui.calc_stat()

    # This check is purely a regression test, so the value has
    # not been externally validated.
    #
    assert_allclose(stat, 58.2813692358182)

    # Pick an energy range which isn't affected by the first
    # bin.
    #
    # Unfortunately, using a range of 0.3-8.0 gives 771 bins
    # in XSPEC - channels 30 to 800 - but 772 bins in Sherpa.
    # If I use ignore(None, 0.3); ignore(8.0, None) instead
    # then the result is 771 bins. This is because the e_min/max
    # of the RMF has channel widths of 0.01 keV, starting at 0,
    # so both 0.3 and 8.0 fall on a bin boundary. So, it's either
    # a difference in < or <= (or > vs >=), or a rounding issue
    # due to floating-point conversion leading to one bin boundary
    # being slightly different in Sherpa vs XSPEC).
    #
    # When using ui.notice(0.3, 8.0); ui.get_indep(filter=True)
    # returns 772 channels, 30 to 801.
    #
    # Using ui.notice(0.3, 7.995) selects channels 30 to 800. So
    # this range is used. Alternatively, channel 801 could have been
    # excluded explicitly.
    #
    # ui.notice(0.3, 8.0)
    ui.notice(0.3, 7.995)

    # XSPEC 12.9.1b calculation of the statistic:
    #   chi sq = 203.88 from 771 bins with 769 dof
    #   cstat  = 568.52
    #
    # There are known differences between XSPEC and Sherpa
    # with chi2xspecvar. This only affects data sets where
    # there is background subtraction, which is not the case
    # here. See https://github.com/sherpa/sherpa/issues/356
    #
    ui.set_stat('chi2xspecvar')
    stat_xvar = ui.get_stat_info()

    assert len(stat_xvar) == 1
    stat_xvar = stat_xvar[0]
    assert stat_xvar.numpoints == 771
    assert stat_xvar.dof == 769
    assert_allclose(stat_xvar.statval, 203.88,
                    rtol=0, atol=0.005)

    ui.set_stat('cstat')
    stat_cstat = ui.get_stat_info()

    assert len(stat_cstat) == 1
    stat_cstat = stat_cstat[0]
    assert stat_cstat.numpoints == 771
    assert stat_cstat.dof == 769
    assert_allclose(stat_cstat.statval, 568.52,
                    rtol=0, atol=0.005)

    ui.clean()
Ejemplo n.º 41
0
def renorm(id=None, cpt=None, bkg_id=None, names=None, limscale=1000.0):
    """Change the normalization of a model to match the data.

    The idea is to change the normalization to be a better match to
    the data, so that the search can be quicker. It can be considered
    to be like the `guess` command, but for the normalization. It
    is *only* intended to change the normalization to a value near
    the correct one; it *should not* be used for any sort of
    calculation without first doing a fit. It is also only going to
    give reasonable results for models where the predicted data of a
    model is linearly related to the normalization.

    Parameters
    ----------
    id : None, int, or str
       The data set identifier to use. A value of ``None`` uses the
       default identifier.
    cpt
       If not ``None``, the model component to use. When ``None``, the
       full source expression for the data set is used. There is no
       check that the ``id`` argument matches the component (i.e. that
       the component is included in the source model for the data set)
    bkg_id : None, int
       If not None then change the normalization of the model to the
       given background dataset.
    names : None or array of str
       The parameter names that should be changed (a case-insensitive
       comparison is made, and the name does not include the model
       name). If ``None`` then the default set of
       ``['ampl', 'norm']`` is used.
    limscale : float
       The min and max range of the normalization is set to the
       calculated value divided and multiplied by ``limscale``.
       These limits will be modified to match the hard limits of the
       parameter if they exceed them.

    See Also
    --------
    guess, ignore, notice, set_par

    Notes
    -----
    The normalization is computed so that the predicted model counts
    matches the observed counts for the currently-noticed data range,
    as long as parameter names match the ``names`` argument (or
    ['ampl', 'norm'] if that is ``None``) and the parameter is not
    frozen.

    If no matches are found, then no changes are made. Otherwise, a
    scale factor is created by summing up the data counts and dividing
    this by the model sum over the currently-noticed range. This scale
    factor is divided by the number of matching parameters, and then
    the parameter values are multiplied by this value. If a model
    contains multiple parameters matching the contents of the
    ``names`` argument then each one will be changed by this routine.

    It is not intended for use with source expressions
    created with `set_full_model`, and may not work well with
    image models that use a PSF (one set with `set_psf`).

    Examples
    --------

    Adjust the normalization of the gal component before fitting.

    >>> load_pha('src.pi')
    >>> subtract()
    >>> notice(0.5, 7)
    >>> set_source(xsphabs.galabs * xsapec.gal)
    >>> renorm()

    Change the normalization of a 2D model using the 'src' dataset.
    Only the ``src`` component is changed since the default value for
    the ``names`` parameter - that is ['ampl', 'norm'] - does not
    match the normalization parameter of the `const2d` model.

    >>> load_image('src', 'img.fits')
    >>> set_source('src', gauss2d.src + const2d.bgnd)
    >>> renorm('src')

    The names parameter is set so that both components are adjusted,
    and each component is assumed to contribute half the signal.

    >>> load_image(12, 'img.fits')
    >>> notice2d_id(12, 'srcfit.reg')
    >>> set_source(12, gauss2d.src12 + const2d.bgnd12)
    >>> renorm(12, names=['ampl', 'c0'])

    Change the minimum and maximum values of the normalization
    parameter to be the calculated value divided by and multiplied by
    1e4 respectively (these changes are made to the soft limits).

    >>> renorm(limscale=1e4)

    """

    if names is None:
        matches = ['ampl', 'norm']
    elif names == []:
        raise ArgumentErr('bad', 'names argument', '[]')
    else:
        matches = [n.lower() for n in names]

    if bkg_id is None:
        d = ui.get_data(id=id)
        m = ui.get_model(id=id)
    else:
        d = ui.get_bkg(id=id, bkg_id=id)
        m = ui.get_bkg_model(id=id, bkg_id=bkg_id)

    if cpt is not None:
        # In this case the get_[bkg_]model call is not needed above,
        # but leave in as it at least ensures there's a model defined
        # for the data set.
        m = cpt

    pars = [p for p in m.pars if p.name.lower() in matches and not p.frozen]
    npars = len(pars)
    if npars == 0:
        wmsg = "no thawed parameters found matching: {}".format(
            ", ".join(matches))
        warn(wmsg)
        return

    yd = d.get_dep(filter=True).sum()
    ym = d.eval_model_to_fit(m).sum()

    # argh; these are numpy floats, and they do not throw a
    # ZeroDivisionError, rather you get a RuntimeWarning message.
    # So explicitly convert to Python float.
    #
    try:
        scale = float(yd) / float(ym) / npars
    except ZeroDivisionError:
        error("model sum evaluated to 0; no re-scaling attempted")
        return

    for p in pars:
        newval = p.val * scale
        newmin = newval / limscale
        newmax = newval * limscale

        # Could do the limit/range checks and then call set_par,
        # but only do so if there's a problem.
        #
        try:
            ui.set_par(p, val=newval, min=newmin, max=newmax)

        except ParameterErr:
            # The following is not guaranteed to catch all cases;
            # e.g if the new value is outside the hard limits.
            #
            minflag = newmin < p.hard_min
            maxflag = newmax > p.hard_max
            if minflag:
                newmin = p.hard_min
            if maxflag:
                newmax = p.hard_max

            ui.set_par(p, val=newval, min=newmin, max=newmax)

            # provide informational message after changing the
            # parameter
            if minflag and maxflag:
                reason = "to hard min and max limits"
            elif minflag:
                reason = "to the hard minimum limit"
            elif maxflag:
                reason = "to the hard maximum limit"
            else:
                # this should be impossible
                reason = "for an unknown reason"

            info("Parameter {} is restricted ".format(p.fullname) + reason)