Beispiel #1
0
def test_list_samplers():
    """Ensure list_samplers returns a list."""

    mcmc = sim.MCMC()
    samplers = mcmc.list_samplers()

    assert isinstance(samplers, list)
    assert len(samplers) > 0
Beispiel #2
0
def test_list_samplers_contents():
    """Are the expected values included"""

    # Test that the expected values exist in this list,
    # but do not enforce these are the only values. This is
    # a slightly-different return list to the non-astro version.
    #
    samplers = sim.MCMC().list_samplers()
    for expected in ['mh', 'metropolismh', 'pragbayes', 'fullbayes']:
        assert expected in samplers
def test_pragbayes_simarf(setup):
    fit = setup['fit']

    mcmc = sim.MCMC()
    mcmc.set_sampler('PragBayes')
    mcmc.set_sampler_opt("simarf", setup['simarf'])
    mcmc.set_sampler_opt("p_M", 0.5)
    mcmc.set_sampler_opt("nsubiter", 7)

    covar_results = fit.est_errors()
    cov = covar_results.extra_output

    niter = setup['niter']
    stats, accept, params = mcmc.get_draws(fit, cov, niter=niter)

    assert params.shape == (3, niter + 1)
def test_fullbayes_simarf_fails(setup):
    fit = setup['fit']

    mcmc = sim.MCMC()
    mcmc.set_sampler('FullBAYes')
    mcmc.set_sampler_opt("simarf", setup['simarf'])
    mcmc.set_sampler_opt("p_M", 0.5)
    mcmc.set_sampler_opt("nsubiter", 7)

    covar_results = fit.est_errors()
    cov = covar_results.extra_output

    niter = setup['niter']
    with pytest.raises(TypeError) as exc:
        mcmc.get_draws(fit, cov, niter=niter)

    assert str(exc.value) == 'Simulation ARF must be PCA for FullBayes not SIM1DAdd'
Beispiel #5
0
    def test_pragbayes_pcaarf(self):
        mcmc = sim.MCMC()

        self.abs1.nh = 0.092886
        self.p1.phoindex = 0.994544
        self.p1.norm = 9.26369

        mcmc.set_sampler("pragBayes")
        mcmc.set_sampler_opt("simarf", self.pcaarf)
        mcmc.set_sampler_opt("p_M", 0.5)
        mcmc.set_sampler_opt("nsubiter", 5)

        covar_results = self.fit.est_errors()
        cov = covar_results.extra_output

        niter = 10
        stats, accept, params = mcmc.get_draws(self.fit, cov, niter=niter)
Beispiel #6
0
    def test_pragbayes_simarf(self):
        datadir = SherpaTestCase.datadir
        if datadir is None:
            return

        mcmc = sim.MCMC()

        self.abs1.nh = 0.092886
        self.p1.phoindex = 0.994544
        self.p1.norm = 9.26369

        mcmc.set_sampler("PragBayes")
        mcmc.set_sampler_opt("simarf", self.simarf)
        mcmc.set_sampler_opt("p_M", 0.5)
        mcmc.set_sampler_opt("nsubiter", 7)

        covar_results = self.fit.est_errors()
        cov = covar_results.extra_output

        niter = 10
        stats, accept, params = mcmc.get_draws(self.fit, cov, niter=niter)
def test_pragbayes_pcaarf_limits(sampler, setup, caplog, reset_seed):
    """Try and trigger limit issues.

    """

    from sherpa.astro.xspec import XSAdditiveModel, XSMultiplicativeModel, \
        XSwabs, XSpowerlaw

    # Set the seed for the RNG. The seed was adjusted to try and make
    # sure the coverage was "good" (i.e. hits parts of
    # sherpa/astro/sim/*bayes.py) whilst still passing the test and
    # reducing the runtime.  This is not a guarantee that this is the
    # "fastest" seed, just that it's one of the better ones I've seen.
    #
    np.random.seed(0x723c)

    class HackAbs(XSwabs):
        """Restrict hard limits"""

        def __init__(self, name='wabs'):
            self.nH = Parameter(name, 'nH', 0.1, 0, 1, 0, 1, '10^22 atoms / cm^2')
            XSMultiplicativeModel.__init__(self, name, (self.nH, ))

    class HackPowerLaw(XSpowerlaw):
        """Restrict hard limits"""
        def __init__(self, name='powerlaw'):
            self.PhoIndex = Parameter(name, 'PhoIndex', 1., 0.95, 1.05, 0.95, 1.05)
            self.norm = Parameter(name, 'norm', 9.2, 8.8, 9.7, 8.8, 9.7)
            XSAdditiveModel.__init__(self, name, (self.PhoIndex, self.norm))

    fit = setup['fit']

    mcmc = sim.MCMC()
    mcmc.set_sampler(sampler)
    mcmc.set_sampler_opt("simarf", setup['pcaarf'])
    mcmc.set_sampler_opt("p_M", 0.5)
    mcmc.set_sampler_opt("nsubiter", 5)

    covar_results = fit.est_errors()
    cov = covar_results.extra_output

    # Restrict the parameter values to try and trigger some
    # invalid proposal steps. It's not obvious how the soft,
    # hard, and prior function values all interact.
    #
    myabs = HackAbs()
    mypl = HackPowerLaw()

    pvals = np.asarray(covar_results.parvals)
    pmins = np.asarray(covar_results.parmins)
    pmaxs = np.asarray(covar_results.parmaxes)

    fit.model = myabs * mypl

    fit.model.thawedpars = pvals
    fit.model.thawedparmins = pvals + 2 * pmins  # pmins are < 0
    fit.model.thawedparmaxes = pvals + 2 * pmaxs

    # weight values away from the best-fit (does this actually
    # help?)
    #
    for par in fit.model.pars:
        mcmc.set_prior(par, inverse2)

    niter = setup['niter']
    with caplog.at_level(logging.INFO, logger='sherpa'):
        # Do nothing with the warning at the moment, which could be
        # a RuntimeWarning about the covariance matrix not being
        # positive-semidefinite. This is just needed to make sure
        # we don't trigger the default warning check.
        #
        with pytest.warns(Warning):
            stats, accept, params = mcmc.get_draws(fit, cov, niter=niter)

    # This is a lower bound, in case there's any messages from
    # the sampling (either before or after displaying the
    # 'Using Priors' message).
    #
    nrecords = len(caplog.record_tuples)
    assert nrecords > 3

    i = 0
    while caplog.record_tuples[i][2] != 'Using Priors:':
        i += 1
        assert i < nrecords

    assert i < (nrecords - 3)

    assert caplog.record_tuples[i + 1][2].startswith('wabs.nH: <function inverse2 at ')
    assert caplog.record_tuples[i + 2][2].startswith('powerlaw.PhoIndex: <function inverse2 at ')
    assert caplog.record_tuples[i + 3][2].startswith('powerlaw.norm: <function inverse2 at ')

    # It is not guaranteed what limits/checks we hit
    #
    have_hard_limit = False
    have_reject = False
    for loc, lvl, msg in caplog.record_tuples[i + 4:]:
        if msg.startswith('Draw rejected: parameter boundary exception'):
            have_reject = True
            assert lvl == logging.INFO

        elif msg.startswith('hard '):
            have_hard_limit = True
            assert lvl == logging.WARNING

    assert have_hard_limit
    assert have_reject