コード例 #1
0
def test_cstat_rsppha():
    """What does CSTAT calculate when there is an RSP+PHA instrument model.

    This includes the AREASCAL when evaluating the model.

    See Also
    --------
    test_cstat_nophamodel, test_cstat_arfpha, test_cstat_rmfpha
    """

    dset, mdl, expected = setup_likelihood(scale=True)

    # use the full channel grid; the energy grid has to be
    # "the same" as the channel values since the model
    # has a dependency on the independent axis
    #
    egrid = 1.0 * np.concatenate((dset.channel, [dset.channel.max() + 1]))
    arf = make_arf(energ_lo=egrid[:-1], energ_hi=egrid[1:])
    rmf = make_ideal_rmf(e_min=egrid[:-1], e_max=egrid[1:])

    mdl_ascal = RSPModelPHA(arf, rmf, dset, mdl)

    stat = CStat()
    sval_ascal = stat.calc_stat(dset, mdl_ascal)

    assert_allclose(sval_ascal[0], expected)
コード例 #2
0
def test_cstat_rsppha():
    """What does CSTAT calculate when there is an RSP+PHA instrument model.

    This includes the AREASCAL when evaluating the model.

    See Also
    --------
    test_cstat_nophamodel, test_cstat_arfpha, test_cstat_rmfpha
    """

    dset, mdl, expected = setup_likelihood(scale=True)

    # use the full channel grid; the energy grid has to be
    # "the same" as the channel values since the model
    # has a dependency on the independent axis
    #
    egrid = 1.0 * np.concatenate((dset.channel,
                                  [dset.channel.max() + 1]))
    arf = make_arf(energ_lo=egrid[:-1],
                   energ_hi=egrid[1:])
    rmf = make_ideal_rmf(e_min=egrid[:-1], e_max=egrid[1:])

    mdl_ascal = RSPModelPHA(arf, rmf, dset, mdl)

    stat = CStat()
    sval_ascal = stat.calc_stat(dset, mdl_ascal)

    assert_allclose(sval_ascal[0], expected)
コード例 #3
0
def test_cstat_rmfpha():
    """What does CSTAT calculate when there is an RMF+PHA instrument model.

    This includes the AREASCAL when evaluating the model.

    See Also
    --------
    test_cstat_nophamodel, test_cstat_arfpha, test_cstat_rsppha
    """

    dset, mdl, expected = setup_likelihood(scale=True)

    # use the full channel grid; the energy grid has to be
    # "the same" as the channel values since the model
    # has a dependency on the independent axis
    #
    egrid = 1.0 * np.concatenate((dset.channel,
                                  [dset.channel.max() + 1]))
    rmf = create_delta_rmf(egrid[:-1], egrid[1:])

    mdl_ascal = RMFModelPHA(rmf, dset, mdl)

    stat = CStat()
    sval_ascal = stat.calc_stat(dset, mdl_ascal)

    assert sval_ascal[0] == pytest.approx(expected)
コード例 #4
0
def setup(make_data_path):
    from sherpa.astro.io import read_pha
    from sherpa.astro.xspec import XSwabs, XSpowerlaw

    old_level = logger.getEffectiveLevel()
    logger.setLevel(logging.CRITICAL)

    pha = make_data_path("refake_0934_1_21_1e4.fak")

    simarf = make_data_path("aref_sample.fits")
    pcaarf = make_data_path("aref_Cedge.fits")

    data = read_pha(pha)
    data.ignore(None, 0.3)
    data.ignore(7.0, None)

    rsp = Response1D(data)
    abs1 = XSwabs('abs1')
    p1 = XSpowerlaw('p1')
    model = rsp(abs1 * p1)

    abs1.nh = 0.092886
    p1.phoindex = 0.994544
    p1.norm = 9.26369

    fit = Fit(data, model, CStat(), NelderMead(), Covariance())

    yield {'simarf': simarf,
           'pcaarf': pcaarf,
           'niter': 10,
           'fit': fit}

    # Reset the logger
    logger.setLevel(old_level)
コード例 #5
0
    def setUp(self):
        try:
            from sherpa.astro.io import read_pha
            from sherpa.astro.xspec import XSwabs, XSpowerlaw
        except:
            return
        # self.startdir = os.getcwd()
        self.old_level = logger.getEffectiveLevel()
        logger.setLevel(logging.CRITICAL)

        pha = self.make_path("refake_0934_1_21_1e4.fak")
        # rmf = self.make_path("ccdid7_default.rmf")
        # arf = self.make_path("quiet_0934.arf")

        self.simarf = self.make_path("aref_sample.fits")
        self.pcaarf = self.make_path("aref_Cedge.fits")

        data = read_pha(pha)
        data.ignore(None, 0.3)
        data.ignore(7.0, None)

        rsp = Response1D(data)
        self.abs1 = XSwabs('abs1')
        self.p1 = XSpowerlaw('p1')
        model = rsp(self.abs1 * self.p1)

        self.fit = Fit(data, model, CStat(), NelderMead(), Covariance())
コード例 #6
0
def test_cstat_nophamodel():
    """What does CSTAT calculate when there is no PHA instrument model.

    The value here is technically wrong, in that the AREASCAL value
    is not being included in the calculation, but is included as a
    test to validate the current approach.

    See Also
    --------
    test_cstat_arfpha, test_cstat_rmfpha, test_cstat_rsppha
    """

    dset, mdl, expected = setup_likelihood(scale=False)

    stat = CStat()
    sval_noascal = stat.calc_stat(dset, mdl)

    assert_allclose(sval_noascal[0], expected)
コード例 #7
0
def test_cstat_nophamodel():
    """What does CSTAT calculate when there is no PHA instrument model.

    The value here is technically wrong, in that the AREASCAL value
    is not being included in the calculation, but is included as a
    test to validate the current approach.

    See Also
    --------
    test_cstat_arfpha, test_cstat_rmfpha, test_cstat_rsppha
    """

    dset, mdl, expected = setup_likelihood(scale=False)

    stat = CStat()
    sval_noascal = stat.calc_stat(dset, mdl)

    assert_allclose(sval_noascal[0], expected)
コード例 #8
0
def test_cstat_arfpha():
    """What does CSTAT calculate when there is an ARF+PHA instrument model.

    The value here is technically wrong, in that the AREASCAL value
    is not being included in the calculation, but is included as a
    test to validate this use case.

    See Also
    --------
    test_cstat_nophamodel, test_cstat_rmfpha, test_cstat_rsppha
    """

    dset, mdl, expected = setup_likelihood(scale=True)

    # Use the channel grid as the "energy axis".
    #
    arf = make_arf(energ_lo=dset.channel, energ_hi=dset.channel + 1)
    mdl_ascal = ARFModelPHA(arf, dset, mdl)

    stat = CStat()
    sval_ascal = stat.calc_stat(dset, mdl_ascal)

    assert_allclose(sval_ascal[0], expected)
コード例 #9
0
def test_cstat_arfpha():
    """What does CSTAT calculate when there is an ARF+PHA instrument model.

    The value here is technically wrong, in that the AREASCAL value
    is not being included in the calculation, but is included as a
    test to validate this use case.

    See Also
    --------
    test_cstat_nophamodel, test_cstat_rmfpha, test_cstat_rsppha
    """

    dset, mdl, expected = setup_likelihood(scale=True)

    # Use the channel grid as the "energy axis".
    #
    arf = make_arf(energ_lo=dset.channel,
                   energ_hi=dset.channel + 1)
    mdl_ascal = ARFModelPHA(arf, dset, mdl)

    stat = CStat()
    sval_ascal = stat.calc_stat(dset, mdl_ascal)

    assert_allclose(sval_ascal[0], expected)
コード例 #10
0
    def test_metropolisMH(self):

        self.fit.method = NelderMead()
        self.fit.stat = CStat()
        results = self.fit.fit()
        results = self.fit.est_errors()
        cov = results.extra_output

        mcmc = MCMC()
        mcmc.set_sampler('MetropolisMH')
        #mcmc.set_sampler_opt('verbose', True)

        log = logging.getLogger("sherpa")
        level = log.level
        log.setLevel(logging.ERROR)
        stats, accept, params = mcmc.get_draws(self.fit, cov, niter=1e2)
        log.setLevel(level)
コード例 #11
0
ファイル: test_stats.py プロジェクト: wmclaugh/sherpa
def test_cstat_stat(hide_logging, reset_xspec, setup):
    fit = Fit(setup['data'], setup['model'], CStat(), NelderMead())
    results = fit.fit()

    _fit_cstat_results_bench = {
        'succeeded':
        1,
        'numpoints':
        460,
        'dof':
        457,
        'istatval':
        21647.62293983995,
        'statval':
        472.6585691450068,
        'parvals':
        numpy.array([1.75021021282262, 5.474614304244775, -1.9985761873334102])
    }

    compare_results(_fit_cstat_results_bench, results)
コード例 #12
0
ファイル: test_stats.py プロジェクト: vorugantia/sherpa
 def test_cstat_stat(self):
     fit = Fit(self.data, self.model, CStat(), NelderMead())
     results = fit.fit()
     self.compare_results(self._fit_cstat_results_bench, results)
コード例 #13
0
ファイル: simulate.py プロジェクト: spidersaint/sherpa
    def run(fit,
            null_comp,
            alt_comp,
            conv_mdl=None,
            stat=None,
            method=None,
            niter=500,
            numcores=None):

        if stat is None: stat = CStat()
        if method is None: method = NelderMead()

        if not isinstance(stat, (Cash, CStat)):
            raise TypeError("Sherpa fit statistic must be Cash or CStat" +
                            " for likelihood ratio test")

        niter = int(niter)

        alt = alt_comp
        null = null_comp

        oldaltvals = numpy.array(alt.thawedpars)
        oldnullvals = numpy.array(null.thawedpars)

        data = fit.data

        if conv_mdl is not None:
            # Copy the PSF
            null_conv_mdl = deepcopy(conv_mdl)

            alt = conv_mdl(alt_comp)
            if hasattr(conv_mdl, 'fold'):
                conv_mdl.fold(data)

            # Convolve the null model
            null = null_conv_mdl(null_comp)
            if hasattr(null_conv_mdl, 'fold'):
                null_conv_mdl.fold(data)

        nullfit = Fit(data, null, stat, method, Covariance())

        # Fit with null model
        nullfit_results = nullfit.fit()
        debug(nullfit_results.format())

        null_stat = nullfit_results.statval
        null_vals = nullfit_results.parvals

        # Calculate niter samples using null best-fit and covariance
        sampler = NormalParameterSampleFromScaleMatrix()
        samples = sampler.get_sample(nullfit, None, niter)

        # Fit with alt model, null component starts at null's best fit params.
        altfit = Fit(data, alt, stat, method, Covariance())
        altfit_results = altfit.fit()
        debug(altfit_results.format())

        alt_stat = altfit_results.statval
        alt_vals = altfit_results.parvals

        LR = -(alt_stat - null_stat)

        def worker(proposal, *args, **kwargs):
            return LikelihoodRatioTest.calculate(nullfit, altfit, proposal,
                                                 null_vals, alt_vals)

        olddep = data.get_dep(filter=False)
        try:
            #statistics = map(worker, samples)
            statistics = parallel_map(worker, samples, numcores)
        finally:
            data.set_dep(olddep)
            alt.thawedpars = list(oldaltvals)
            null.thawedpars = list(oldnullvals)

        debug("statistic null = " + repr(null_stat))
        debug("statistic alt = " + repr(alt_stat))
        debug("LR = " + repr(LR))

        statistics = numpy.asarray(statistics)

        pppvalue = numpy.sum(statistics[:, 2] > LR) / (1.0 * niter)

        debug('ppp value = ' + str(pppvalue))

        return LikelihoodRatioResults(statistics[:, 2], statistics[:, 0:2],
                                      samples, LR, pppvalue, null_stat,
                                      alt_stat)