示例#1
0
    def __init__(self, model, nwalkers, checkpoint_interval=None,
                 logpost_function=None, nprocesses=1, use_mpi=False):

        self.model = model
        # create a wrapper for calling the model
        if logpost_function is None:
            logpost_function = 'logposterior'
        model_call = models.CallModel(model, logpost_function)

        # Set up the pool
        if nprocesses > 1:
            # these are used to help paralleize over multiple cores / MPI
            models._global_instance = model_call
            model_call = models._call_global_model
        pool = choose_pool(mpi=use_mpi, processes=nprocesses)
        if pool is not None:
            pool.count = nprocesses

        # set up emcee
        self._nwalkers = nwalkers
        ndim = len(model.variable_params)
        self._sampler = emcee.EnsembleSampler(nwalkers, ndim, model_call,
                                              pool=pool)
        # emcee uses it's own internal random number generator; we'll set it
        # to have the same state as the numpy generator
        rstate = numpy.random.get_state()
        self._sampler.random_state = rstate
        self._checkpoint_interval = checkpoint_interval
示例#2
0
def sampler_from_cli(opts, likelihood_evaluator, pool=None):
    """Parses the given command-line options to set up a sampler.

    Parameters
    ----------
    opts : object
        ArgumentParser options.
    likelihood_evaluator : LikelihoodEvaluator
        The likelihood evaluator to use with the sampler.

    Returns
    -------
    pycbc.inference.sampler
        A sampler initialized based on the given arguments.
    """
    # Used to help paralleize over multiple cores / MPI
    if opts.nprocesses > 1:
        likelihood._global_instance = likelihood_evaluator
        likelihood_call = likelihood._call_global_likelihood
    else:
        likelihood_call = None

    sclass = pycbc.inference.sampler.samplers[opts.sampler]

    pool = choose_pool(mpi=opts.use_mpi, processes=opts.nprocesses)

    if pool is not None:
        pool.count = opts.nprocesses

    return sclass.from_cli(opts, likelihood_evaluator,
                           pool=pool, likelihood_call=likelihood_call)
示例#3
0
    def __init__(self, model, nwalkers,
                 checkpoint_interval=None, checkpoint_signal=None,
                 logpost_function=None, nprocesses=1, use_mpi=False):

        self.model = model
        # create a wrapper for calling the model
        if logpost_function is None:
            logpost_function = 'logposterior'
        model_call = models.CallModel(model, logpost_function)

        # Set up the pool
        if nprocesses > 1:
            # these are used to help paralleize over multiple cores / MPI
            models._global_instance = model_call
            model_call = models._call_global_model
        pool = choose_pool(mpi=use_mpi, processes=nprocesses)
        if pool is not None:
            pool.count = nprocesses

        # set up emcee
        self.nwalkers = nwalkers
        ndim = len(model.variable_params)
        self._sampler = emcee.EnsembleSampler(nwalkers, ndim, model_call,
                                              pool=pool)
        # emcee uses it's own internal random number generator; we'll set it
        # to have the same state as the numpy generator
        rstate = numpy.random.get_state()
        self._sampler.random_state = rstate
        self._checkpoint_interval = checkpoint_interval
        self._checkpoint_signal = checkpoint_signal
示例#4
0
def sampler_from_cli(opts, likelihood_evaluator, pool=None):
    """Parses the given command-line options to set up a sampler.

    Parameters
    ----------
    opts : object
        ArgumentParser options.
    likelihood_evaluator : LikelihoodEvaluator
        The likelihood evaluator to use with the sampler.

    Returns
    -------
    pycbc.inference.sampler
        A sampler initialized based on the given arguments.
    """
    # Used to help paralleize over multiple cores / MPI
    if opts.nprocesses > 1:
        likelihood._global_instance = likelihood_evaluator
        likelihood_call = likelihood._call_global_likelihood
    else:
        likelihood_call = None

    sclass = pycbc.inference.sampler.samplers[opts.sampler]
    # check for consistency
    if opts.skip_burn_in and opts.min_burn_in is not None:
        raise ValueError("both skip-burn-in and min-burn-in specified")

    pool = choose_pool(mpi=opts.use_mpi, processes=opts.nprocesses)

    if pool is not None:
        pool.count = opts.nprocesses

    return sclass.from_cli(opts, likelihood_evaluator,
                           pool=pool, likelihood_call=likelihood_call)
示例#5
0
    def __init__(self,
                 model,
                 nlive,
                 dlogz,
                 nprocesses=1,
                 loglikelihood_function=None,
                 use_mpi=False,
                 **kwargs):
        self.model = model
        # Set up the pool
        model_call = DynestyModel(model, loglikelihood_function)
        if nprocesses > 1:
            # these are used to help paralleize over multiple cores / MPI
            models._global_instance = model_call
            log_likelihood_call = _call_global_loglikelihood
            prior_call = _call_global_logprior
        else:
            prior_call = model_call.prior_transform
            log_likelihood_call = model_call.log_likelihood
        pool = choose_pool(mpi=use_mpi, processes=nprocesses)
        if pool is not None:
            pool.size = nprocesses

        self.nlive = nlive
        self.dlogz = dlogz
        self.names = model.sampling_params
        self.ndim = len(model.sampling_params)
        self.checkpoint_file = None
        self._sampler = dynesty.NestedSampler(log_likelihood_call,
                                              prior_call,
                                              self.ndim,
                                              nlive=self.nlive,
                                              dlogz=self.dlogz,
                                              pool=pool,
                                              **kwargs)
示例#6
0
def sampler_from_cli(opts, model, pool=None):
    """Parses the given command-line options to set up a sampler.

    Parameters
    ----------
    opts : object
        ArgumentParser options.
    model : model
        The model to use with the sampler.

    Returns
    -------
    gwin.sampler
        A sampler initialized based on the given arguments.
    """
    # create a wrapper for the model
    model = models.CallModel(model, opts.logpost_function)

    # Used to help paralleize over multiple cores / MPI
    if opts.nprocesses > 1:
        models._global_instance = model
        model_call = models._call_global_model
    else:
        model_call = None

    sclass = sampler.samplers[opts.sampler]

    pool = choose_pool(mpi=opts.use_mpi, processes=opts.nprocesses)

    if pool is not None:
        pool.count = opts.nprocesses

    return sclass.from_cli(opts, model, pool=pool, model_call=model_call)
示例#7
0
    def __init__(self, model, nlive, nprocesses=1,
                 loglikelihood_function=None, use_mpi=False, run_kwds=None,
                 **kwargs):
        self.model = model
        log_likelihood_call, prior_call = setup_calls(
            model,
            nprocesses=nprocesses,
            loglikelihood_function=loglikelihood_function)
        # Set up the pool
        pool = choose_pool(mpi=use_mpi, processes=nprocesses)
        if pool is not None:
            pool.size = nprocesses

        self.run_kwds = {} if run_kwds is None else run_kwds
        self.nlive = nlive
        self.names = model.sampling_params
        self.ndim = len(model.sampling_params)
        self.checkpoint_file = None
        if self.nlive < 0:
            # Interpret a negative input value for the number of live points
            # (which is clearly an invalid input in all senses)
            # as the desire to dynamically determine that number
            self._sampler = dynesty.DynamicNestedSampler(log_likelihood_call,
                                                         prior_call, self.ndim,
                                                         pool=pool, **kwargs)
        else:
            self._sampler = dynesty.NestedSampler(log_likelihood_call,
                                                  prior_call, self.ndim,
                                                  nlive=self.nlive,
                                                  pool=pool, **kwargs)
示例#8
0
def sampler_from_cli(opts, likelihood_evaluator, pool=None):
    """Parses the given command-line options to set up a sampler.

    Parameters
    ----------
    opts : object
        ArgumentParser options.
    likelihood_evaluator : LikelihoodEvaluator
        The likelihood evaluator to use with the sampler.

    Returns
    -------
    gwin.sampler
        A sampler initialized based on the given arguments.
    """
    # Used to help paralleize over multiple cores / MPI
    if opts.nprocesses > 1:
        likelihood._global_instance = likelihood_evaluator
        likelihood_call = likelihood._call_global_likelihood
    else:
        likelihood_call = None

    sclass = sampler.samplers[opts.sampler]

    pool = choose_pool(mpi=opts.use_mpi, processes=opts.nprocesses)

    if pool is not None:
        pool.count = opts.nprocesses

    return sclass.from_cli(opts,
                           likelihood_evaluator,
                           pool=pool,
                           likelihood_call=likelihood_call)
示例#9
0
    def __init__(self, model, ntemps, nwalkers, betas=None,
                 checkpoint_interval=None, checkpoint_signal=None,
                 loglikelihood_function=None,
                 nprocesses=1, use_mpi=False):

        self.model = model

        # create a wrapper for calling the model
        if loglikelihood_function is None:
            loglikelihood_function = 'loglikelihood'
        # frustratingly, emcee_pt does not support blob data, so we have to
        # turn it off
        model_call = models.CallModel(model, loglikelihood_function,
                                      return_all_stats=False)

        # these are used to help paralleize over multiple cores / MPI
        models._global_instance = model_call
        model_call = models._call_global_model
        prior_call = models._call_global_model_logprior
        self.pool = choose_pool(mpi=use_mpi, processes=nprocesses)

        # construct the sampler: PTSampler needs the likelihood and prior
        # functions separately
        ndim = len(model.variable_params)
        self._sampler = emcee.PTSampler(ntemps, nwalkers, ndim,
                                        model_call, prior_call, pool=self.pool,
                                        betas=betas)
        self.nwalkers = nwalkers
        self._ntemps = ntemps
        self._checkpoint_interval = checkpoint_interval
        self._checkpoint_signal = checkpoint_signal
示例#10
0
    def __init__(self,
                 model,
                 nlive,
                 nprocesses=1,
                 loglikelihood_function=None,
                 use_mpi=False,
                 run_kwds=None,
                 **kwargs):
        self.model = model
        log_likelihood_call, prior_call = setup_calls(
            model,
            nprocesses=nprocesses,
            loglikelihood_function=loglikelihood_function)
        # Set up the pool
        pool = choose_pool(mpi=use_mpi, processes=nprocesses)
        if pool is not None:
            pool.size = nprocesses

        self.run_kwds = {} if run_kwds is None else run_kwds
        self.nlive = nlive
        self.names = model.sampling_params
        self.ndim = len(model.sampling_params)
        self.checkpoint_file = None
        self._sampler = dynesty.NestedSampler(log_likelihood_call,
                                              prior_call,
                                              self.ndim,
                                              nlive=self.nlive,
                                              pool=pool,
                                              **kwargs)
示例#11
0
    def __init__(self, model, nchains, ntemps=None, betas=None,
                 proposals=None, default_proposal=None,
                 default_proposal_args=None, seed=None,
                 swap_interval=1,
                 checkpoint_interval=None, checkpoint_signal=None,
                 loglikelihood_function=None,
                 nprocesses=1, use_mpi=False):

        # create the betas if not provided
        if betas is None:
            betas = default_beta_ladder(len(model.variable_params),
                                        ntemps=ntemps)
        self.model = model
        # create a wrapper for calling the model
        model_call = _EpsieCallModel(model, loglikelihood_function)
        # Set up the pool
        if nprocesses > 1:
            # these are used to help paralleize over multiple cores / MPI
            models._global_instance = model_call
            model_call = models._call_global_model
        pool = choose_pool(mpi=use_mpi, processes=nprocesses)
        if pool is not None:
            pool.count = nprocesses
        # initialize the sampler
        self._sampler = ParallelTemperedSampler(
            model.sampling_params, model_call, nchains, betas=betas,
            swap_interval=swap_interval,
            proposals=proposals, default_proposal=default_proposal,
            default_proposal_args=default_proposal_args,
            seed=seed, pool=pool)
        # set other parameters
        self._nwalkers = nchains
        self._ntemps = ntemps
        self._checkpoint_interval = checkpoint_interval
        self._checkpoint_signal = checkpoint_signal
示例#12
0
    def __init__(self, model, ntemps, nwalkers, betas=None,
                 checkpoint_interval=None, checkpoint_signal=None,
                 loglikelihood_function=None,
                 nprocesses=1, use_mpi=False):

        self.model = model

        # create a wrapper for calling the model
        if loglikelihood_function is None:
            loglikelihood_function = 'loglikelihood'
        # frustratingly, emcee_pt does not support blob data, so we have to
        # turn it off
        model_call = models.CallModel(model, loglikelihood_function,
                                      return_all_stats=False)

        # Set up the pool
        if nprocesses > 1:
            # these are used to help paralleize over multiple cores / MPI
            models._global_instance = model_call
            model_call = models._call_global_model
            prior_call = models._call_global_model_logprior
        else:
            prior_call = models.CallModel(model, 'logprior',
                                          return_all_stats=False)
        pool = choose_pool(mpi=use_mpi, processes=nprocesses)
        if pool is not None:
            pool.count = nprocesses

        # construct the sampler: PTSampler needs the likelihood and prior
        # functions separately
        ndim = len(model.variable_params)
        self._sampler = emcee.PTSampler(ntemps, nwalkers, ndim,
                                        model_call, prior_call, pool=pool,
                                        betas=betas)
        self._nwalkers = nwalkers
        self._ntemps = ntemps
        self._checkpoint_interval = checkpoint_interval
        self._checkpoint_signal = checkpoint_signal
示例#13
0
    def __init__(self,
                 model,
                 nlive,
                 nprocesses=1,
                 checkpoint_time_interval=None,
                 maxcall=None,
                 loglikelihood_function=None,
                 use_mpi=False,
                 no_save_state=False,
                 run_kwds=None,
                 extra_kwds=None,
                 internal_kwds=None,
                 **kwargs):

        self.model = model
        self.no_save_state = no_save_state
        log_likelihood_call, prior_call = setup_calls(
            model,
            loglikelihood_function=loglikelihood_function,
            copy_prior=True)
        # Set up the pool
        self.pool = choose_pool(mpi=use_mpi, processes=nprocesses)

        self.maxcall = maxcall
        self.checkpoint_time_interval = checkpoint_time_interval
        self.run_kwds = {} if run_kwds is None else run_kwds
        self.extra_kwds = {} if extra_kwds is None else extra_kwds
        self.internal_kwds = {} if internal_kwds is None else internal_kwds
        self.nlive = nlive
        self.names = model.sampling_params
        self.ndim = len(model.sampling_params)
        self.checkpoint_file = None
        # Enable checkpointing if checkpoint_time_interval is set in config
        # file in sampler section
        if self.checkpoint_time_interval:
            self.run_with_checkpoint = True
            if self.maxcall is None:
                self.maxcall = 5000 * self.pool.size
            logging.info(
                "Checkpointing enabled, will verify every %s calls"
                " and try to checkpoint every %s seconds", self.maxcall,
                self.checkpoint_time_interval)
        else:
            self.run_with_checkpoint = False

        # Check for cyclic boundaries
        periodic = []
        cyclic = self.model.prior_distribution.cyclic
        for i, param in enumerate(self.variable_params):
            if param in cyclic:
                logging.info('Param: %s will be cyclic', param)
                periodic.append(i)

        if len(periodic) == 0:
            periodic = None

        # Check for reflected boundaries. Dynesty only supports
        # reflection on both min and max of boundary.
        reflective = []
        reflect = self.model.prior_distribution.well_reflected
        for i, param in enumerate(self.variable_params):
            if param in reflect:
                logging.info("Param: %s will be well reflected", param)
                reflective.append(i)

        if len(reflective) == 0:
            reflective = None

        if 'sample' in extra_kwds:
            if 'rwalk2' in extra_kwds['sample']:
                dynesty.dynesty._SAMPLING["rwalk"] = sample_rwalk_mod
                dynesty.nestedsamplers._SAMPLING["rwalk"] = sample_rwalk_mod
                extra_kwds['sample'] = 'rwalk'

        if self.nlive < 0:
            # Interpret a negative input value for the number of live points
            # (which is clearly an invalid input in all senses)
            # as the desire to dynamically determine that number
            self._sampler = dynesty.DynamicNestedSampler(log_likelihood_call,
                                                         prior_call,
                                                         self.ndim,
                                                         pool=self.pool,
                                                         reflective=reflective,
                                                         periodic=periodic,
                                                         **extra_kwds)
            self.run_with_checkpoint = False
            logging.info("Checkpointing not currently supported with"
                         "DYNAMIC nested sampler")
        else:
            self._sampler = dynesty.NestedSampler(log_likelihood_call,
                                                  prior_call,
                                                  self.ndim,
                                                  nlive=self.nlive,
                                                  reflective=reflective,
                                                  periodic=periodic,
                                                  pool=self.pool,
                                                  **extra_kwds)
        self._sampler.kwargs.update(internal_kwds)

        # properties of the internal sampler which should not be pickled
        self.no_pickle = [
            'loglikelihood', 'prior_transform', 'propose_point',
            'update_proposal', '_UPDATE', '_PROPOSE', 'evolve_point',
            'use_pool', 'queue_size', 'use_pool_ptform', 'use_pool_logl',
            'use_pool_evolve', 'use_pool_update', 'pool', 'M'
        ]
示例#14
0
    def __init__(self,
                 model,
                 nwalkers,
                 ntemps=None,
                 Tmax=None,
                 betas=None,
                 adaptive=False,
                 adaptation_lag=None,
                 adaptation_time=None,
                 scale_factor=None,
                 loglikelihood_function=None,
                 checkpoint_interval=None,
                 checkpoint_signal=None,
                 nprocesses=1,
                 use_mpi=False):

        self.model = model
        ndim = len(model.variable_params)
        # create temperature ladder if needed
        if ntemps is None and Tmax is None and betas is None:
            raise ValueError("must provide either ntemps/Tmax or betas")
        if betas is None:
            betas = ptemcee.make_ladder(ndim, ntemps=ntemps, Tmax=Tmax)
        # construct the keyword arguments to pass; if a kwarg is None, we
        # won't pass it, resulting in ptemcee's defaults being used
        kwargs = {}
        kwargs['adaptive'] = adaptive
        kwargs['betas'] = betas
        if adaptation_lag is not None:
            kwargs['adaptation_lag'] = adaptation_lag
        if adaptation_time is not None:
            kwargs['adaptation_time'] = adaptation_time
        if scale_factor is not None:
            kwargs['scale_factor'] = scale_factor
        # create a wrapper for calling the model
        if loglikelihood_function is None:
            loglikelihood_function = 'loglikelihood'
        # frustratingly, ptemcee does not support blob data, so we have to
        # turn it off
        model_call = models.CallModel(model,
                                      loglikelihood_function,
                                      return_all_stats=False)
        # these are used to help paralleize over multiple cores / MPI
        models._global_instance = model_call
        model_call = models._call_global_model
        prior_call = models._call_global_model_logprior
        self.pool = choose_pool(mpi=use_mpi, processes=nprocesses)
        # construct the sampler
        self._sampler = ptemcee.Sampler(nwalkers=nwalkers,
                                        ndim=ndim,
                                        logl=model_call,
                                        logp=prior_call,
                                        mapper=self.pool.map,
                                        **kwargs)
        self.nwalkers = nwalkers
        self._ntemps = ntemps
        self._checkpoint_interval = checkpoint_interval
        self._checkpoint_signal = checkpoint_signal
        # we'll initialize ensemble and chain to None
        self._chain = None
        self._ensemble = None