コード例 #1
0
def sample_vp(vparams, draws=1000, model=None, random_seed=20090425):
    """Draw samples from variational posterior. 

    Parameters
    ----------
    vparams : dict or pymc3.variational.ADVIFit
        Estimated variational parameters of the model. 
    draws : int
        Number of random samples. 
    model : pymc3.Model
        Probabilistic model. 
    random_seed : int
        Seed of random number generator. 

    Returns
    -------
    trace : pymc3.backends.base.MultiTrace
        Samples drawn from the variational posterior. 
    """
    model = modelcontext(model)

    if isinstance(vparams, ADVIFit):
        vparams = {
            'means': vparams.means, 
            'stds': vparams.stds
        }

    # Make dict for replacements of random variables
    r = MRG_RandomStreams(seed=random_seed)
    updates = {}
    for var in model.free_RVs:
        u = theano.shared(vparams['means'][str(var)]).ravel()
        w = theano.shared(vparams['stds'][str(var)]).ravel()
        n = r.normal(size=u.tag.test_value.shape)
        updates.update({var: (n * w + u).reshape(var.tag.test_value.shape)})
    vars = model.free_RVs
        
    # Replace some nodes of the graph with variational distributions
    samples = theano.clone(vars, updates)
    f = theano.function([], samples)

    varnames = [str(var) for var in model.unobserved_RVs]
    trace = NDArray(model=model, vars=model.unobserved_RVs)
    trace.setup(draws=draws, chain=0)

    for i in range(draws):
        # 'point' is like {'var1': np.array(0.1), 'var2': np.array(0.2), ...}
        point = {varname: value for varname, value in zip(varnames, f())}
        trace.record(point)

    return MultiTrace([trace])
コード例 #2
0
def subtrace(trace, chains, reset_chains=True):
    """Get Multitrace with subset of chains from a super Multitrace"""
    if type(chains) is int:
        chains = [chains]
    elif type(chains) is not list:
        raise TypeError('chains is of type {}. Expected int or list of int')
    straces = []
    for chain in chains:
        if chain not in trace.chains:
            raise ValueError('Invalid chain {} not in {}'.format(
                chain, trace.chains))
        straces.append(trace._straces[chain])
    if reset_chains:
        for new_chain_i, strace in enumerate(straces):
            strace.chain = new_chain_i
    return MultiTrace(straces)
コード例 #3
0
ファイル: ndarray.py プロジェクト: themrzmaster/pymc3
def point_list_to_multitrace(point_list: List[Dict[str, np.ndarray]],
                             model: Optional[Model] = None) -> MultiTrace:
    """transform point list into MultiTrace"""
    _model = modelcontext(model)
    varnames = list(point_list[0].keys())
    with _model:
        chain = NDArray(model=_model, vars=[_model[vn] for vn in varnames])
        chain.setup(draws=len(point_list), chain=0)

        # since we are simply loading a trace by hand, we need only a vacuous function for
        # chain.record() to use. This crushes the default.
        def point_fun(point):
            return [point[vn] for vn in varnames]

        chain.fn = point_fun
        for point in point_list:
            chain.record(point)
    return MultiTrace([chain])
コード例 #4
0
    def posterior_to_trace(self):
        """
        Save results into a PyMC3 trace
        """
        lenght_pos = len(self.posterior)
        varnames = [v.name for v in self.variables]

        with self.model:
            strace = NDArray(self.model)
            strace.setup(lenght_pos, 0)
        for i in range(lenght_pos):
            value = []
            size = 0
            for var in varnames:
                shape, new_size = self.var_info[var]
                value.append(self.posterior[i][size: size + new_size].reshape(shape))
                size += new_size
            strace.record({k: v for k, v in zip(varnames, value)})
        return MultiTrace([strace])
コード例 #5
0
def sample_vp(
        vparams, draws=1000, model=None, local_RVs=None, random_seed=None,
        hide_transformed=True, progressbar=True):
    """Draw samples from variational posterior.

    Parameters
    ----------
    vparams : dict or pymc3.variational.ADVIFit
        Estimated variational parameters of the model.
    draws : int
        Number of random samples.
    model : pymc3.Model
        Probabilistic model.
    random_seed : int or None
        Seed of random number generator.  None to use current seed.
    hide_transformed : bool
        If False, transformed variables are also sampled. Default is True.

    Returns
    -------
    trace : pymc3.backends.base.MultiTrace
        Samples drawn from the variational posterior.
    """
    model = pm.modelcontext(model)

    if isinstance(vparams, ADVIFit):
        vparams = {
            'means': vparams.means,
            'stds': vparams.stds
        }

    ds = model.deterministics

    def get_transformed(v):
        return v if v not in ds else v.transformed

    def rvs(x):
        return [get_transformed(v) for v in x] if x is not None else []

    global_RVs = list(set(model.free_RVs) - set(rvs(local_RVs)))

    # Make dict for replacements of random variables
    if random_seed is None:
        r = MRG_RandomStreams(gen_random_state())
    else:
        r = MRG_RandomStreams(seed=random_seed)
    updates = {}
    for v in global_RVs:
        u = theano.shared(vparams['means'][str(v)]).ravel()
        w = theano.shared(vparams['stds'][str(v)]).ravel()
        n = r.normal(size=u.tag.test_value.shape)
        updates.update({v: (n * w + u).reshape(v.tag.test_value.shape)})

    if local_RVs is not None:
        for v_, (uw, _) in local_RVs.items():
            v = get_transformed(v_)
            u = uw[0].ravel()
            w = uw[1].ravel()
            n = r.normal(size=u.tag.test_value.shape)
            updates.update(
                {v: (n * tt.exp(w) + u).reshape(v.tag.test_value.shape)})

    # Replace some nodes of the graph with variational distributions
    vars = model.free_RVs
    samples = theano.clone(vars, updates)
    f = theano.function([], samples)

    # Random variables which will be sampled
    if hide_transformed:
        vars_sampled = [v_ for v_ in model.unobserved_RVs
                        if not str(v_).endswith('_')]
    else:
        vars_sampled = [v_ for v_ in model.unobserved_RVs]

    varnames = [str(var) for var in model.unobserved_RVs]
    trace = pm.sampling.NDArray(model=model, vars=vars_sampled)
    trace.setup(draws=draws, chain=0)

    range_ = trange(draws) if progressbar else range(draws)

    for _ in range_:
        # 'point' is like {'var1': np.array(0.1), 'var2': np.array(0.2), ...}
        point = {varname: value for varname, value in zip(varnames, f())}
        trace.record(point)

    return MultiTrace([trace])
コード例 #6
0
def opt_nfo(
        #Optimization parameters
        #initialization
        n0=10,  #int, n0 the initial number of draws
        init_samples=None,  #array, Whether to provide some pre-defined sequence or do pymc3 sampling
        #approximation
    k_trunc=np.inf,  #IW clipping, not used by default
        eps_z=0.01,  #float, tolerance on Z for q iter convergence (eps') #currently not used since not iterating SINF unless trainable
        nf_iter=1,  #int, number of NF iters -should always be 1 in our implementation
        #annealing
    N=10,  #int, N the TOTAL number of draws we want at each iteration - this is no longer used, is from when we used to run multiple fits
        t_ess=0.5,  #float, ESS<t_ess*n0 t threshold on ESS for ESS3 (no longer temperature)
        g_AF=0,  #float, size of gradient contribution to AF, not used now
        #exploration
    N_AF=1000,  #int,number of points to use in q_w sampling for AF
        expl_top_AF=1,  #int,cut for the top AF at a given temp level accepted at each beta
        expl_latent=0,  #int,latent draw from around top IW1 or around random draw from q_w, accepted at each step
        expl_top_qw=0,  #int,keep top q_w at this iteration
        beta_max=1,  #float>0,highest exponent on tempered posterior, support >1 for exploitation
        rel_beta=1,  #0<float<1, β2 = rel_beta*β, where β2 is the lower temp level used for sampling q_w, what we call 'X'
        frac_rel_beta_AF=1,  #int, the modifier to the AF used to up/down-weight the w vs uw contribution, what we call "Y"
        latent_sigma=None,  #float, the value of l
        use_latent_beta2=False,  #whether to get the latent sample from q_w(β2) or from q_uw
        use_pq_beta_IW1=False,  #whether to get the latent sample from near top IW1 or randomly from q_w
        bounds=None,  #array, size 2xd, bounding box for samples FIXME make this more obvious, needed for prior
        N_temp=25,  #int, cutoff on number of allowed temp iterations before giving up -> #FIXME eventually make this throw error
        #NF parameters
    model=None,
        frac_validate=0.0,
        iteration=None,
        alpha_w=(0, 0),
        alpha_uw=(0, 0),
        verbose=False,
        n_component=None,
        interp_nbin=None,
        KDE=True,
        bw_factor_min=1.0,
        bw_factor_max=1.0,
        bw_factor_num=1,
        rel_bw=1,
        edge_bins=None,
        ndata_wT=None,
        MSWD_max_iter=None,
        NBfirstlayer=True,
        logit=False,
        Whiten=False,
        trainable_qw=False,  #whether to improve our q_w at each beta iteration with SGD
        sgd_steps=0,  #number of steps used in Adam when training trainable q_w
        knots_trainable=5,
        batchsize=None,
        nocuda=False,
        patch=False,
        shape=[28, 28, 1],
        #Runtime
        random_seed=-1,
        parallel=False,
        cores=None):
    r"""
    Normalizing flow-based Bayesian Optimization.

    Parameters
    ----------
    draws: int
        The number of samples to draw from the posterior (i.e. last stage). And also the number of
        independent chains. Defaults to 2000.
    norm_tol: float
        Fractional difference in the evidence estimate between two steps. If it falls below this we
        stop iterating over the NF fits.
    optim_iter: int
        Maximum number of optimization steps to run during the initialization.
    nf_iter: int
        Number of NF fit iterations to go through after the optimization step.
    model: Model (optional if in ``with`` context)).
    frac_validate: float
        Fraction of the live points at each NS iteration that we use for validation of the NF fit.
    alpha: tuple of floats
        Regularization parameters used for the NF fit.
    verbose: boolean
        Whether you want verbose output from the NF fit.
    random_seed: int
        random seed
    parallel: bool
        Distribute computations across cores if the number of cores is larger than 1.
        Defaults to False.
    cores : int
        Number of cores available for the optimization step. Defaults to None, in which case the CPU
        count is used.

    """

    _log = logging.getLogger("pymc3")
    _log.info("Initializing normalizing flow-based optimization...")

    model = modelcontext(model)
    if model.name:
        raise NotImplementedError(
            "The NS_NFO implementation currently does not support named models. "
            "See https://github.com/pymc-devs/pymc3/pull/4365.")
    if cores is None:
        cores = _cpu_count()
    chains = 1

    _log.info(f"Sampling {chains} chain{'s' if chains > 1 else ''} "
              f"Cores available for optimization: {cores}")
    if random_seed == -1:
        random_seed = None
    if chains == 1 and isinstance(random_seed, int):
        random_seed = [random_seed]
    if random_seed is None or isinstance(random_seed, int):
        if random_seed is not None:
            np.random.seed(random_seed)
        random_seed = [np.random.randint(2**30) for _ in range(chains)]
    if not isinstance(random_seed, Iterable):
        raise TypeError(
            "Invalid value for `random_seed`. Must be tuple, list or int")

    #we changed the name for end-user-facing readability, but internally more familiar with these names
    aN, bN, cN, dN = N_AF, expl_top_AF, expl_latent, expl_top_qw

    params = (
        n0,
        init_samples,
        k_trunc,
        eps_z,
        nf_iter,
        N,
        t_ess,
        g_AF,
        aN,
        bN,
        cN,
        dN,
        beta_max,
        rel_beta,
        frac_rel_beta_AF,
        latent_sigma,
        use_latent_beta2,
        use_pq_beta_IW1,
        bounds,
        N_temp,
        model,
        frac_validate,
        iteration,
        alpha_w,
        alpha_uw,
        cores,
        verbose,
        n_component,
        interp_nbin,
        KDE,
        bw_factor_min,
        bw_factor_max,
        bw_factor_num,
        rel_bw,
        edge_bins,
        ndata_wT,
        MSWD_max_iter,
        NBfirstlayer,
        logit,
        Whiten,
        trainable_qw,
        sgd_steps,
        knots_trainable,
        batchsize,
        nocuda,
        patch,
        shape,
        parallel,
    )

    t1 = time.time()

    results = []
    for i in range(chains):
        results.append(opt_nfo_int(*params, random_seed[i], i, _log))
    (
        traces,
        log_evidence,
        q_samples,
        importance_weights,
        logp,
        logq,
        train_logp,
        train_logq,
        logZ,
        q_models,
        q_ess,
        total_ess,
        min_var_bws,
        min_pq_bws,
        betas,
    ) = zip(*results)
    trace = MultiTrace(traces)
    trace.report.log_evidence = log_evidence
    trace.report.q_samples = q_samples
    trace.report.importance_weights = importance_weights
    trace.report.logp = logp
    trace.report.logq = logq
    trace.report.train_logp = train_logp
    trace.report.train_logq = train_logq
    trace.report.logZ = logZ
    trace.report.q_models = q_models
    trace.report.q_ess = q_ess
    trace.report.total_ess = total_ess
    trace.report.N = N
    trace.report.min_var_bws = min_var_bws
    trace.report.min_pq_bws = min_pq_bws
    trace.report._t_sampling = time.time() - t1
    trace.report.betas = betas
    return trace
コード例 #7
0
def sample_nfmc(draws=500,
                init_draws=500,
                resampling_draws=500,
                init_ess=100,
                init_method='prior',
                init_samples=None,
                start=None,
                sample_mode='reinit',
                finish_regularized=False,
                cull_lowp_tol=0.05,
                init_EL2O='adam',
                mean_field_EL2O=False,
                use_hess_EL2O=False,
                absEL2O=1e-10,
                fracEL2O=1e-2,
                EL2O_draws=100,
                maxiter_EL2O=500,
                EL2O_optim_method='L-BFGS-B',
                scipy_map_method='L-BFGS-B',
                adam_lr=1e-3,
                adam_b1=0.9,
                adam_b2=0.999,
                adam_eps=1.0e-8,
                adam_steps=1000,
                simulator=None,
                model_data=None,
                sim_data_cov=None,
                sim_size=None,
                sim_params=None,
                sim_start=None,
                sim_optim_method='lbfgs',
                sim_tol=0.01,
                local_thresh=3,
                local_step_size=0.1,
                local_grad=True,
                init_local=True,
                full_local=False,
                nf_local_iter=3,
                max_line_search=100,
                k_trunc=0.25,
                norm_tol=0.01,
                ess_tol=0.5,
                optim_iter=1000,
                ftol=2.220446049250313e-9,
                gtol=1.0e-5,
                nf_iter=3,
                model=None,
                frac_validate=0.1,
                iteration=None,
                final_iteration=None,
                alpha=(0, 0),
                final_alpha=(0.75, 0.75),
                verbose=False,
                n_component=None,
                interp_nbin=None,
                KDE=True,
                bw_factor_min=0.5,
                bw_factor_max=2.5,
                bw_factor_num=11,
                edge_bins=None,
                ndata_wT=None,
                MSWD_max_iter=None,
                NBfirstlayer=True,
                logit=False,
                Whiten=False,
                batchsize=None,
                nocuda=False,
                patch=False,
                shape=[28, 28, 1],
                redraw=True,
                random_seed=-1,
                parallel=False,
                chains=None,
                cores=None):
    r"""
    Normalizing flow based nested sampling.

    Parameters
    ----------
    draws: int
        The number of samples to draw from the posterior (i.e. last stage). And also the number of
        independent chains. Defaults to 2000.
    start: dict, or array of dict
        Starting point in parameter space. It should be a list of dict with length `chains`.
        When None (default) the starting point is sampled from the prior distribution.
    init_method: str
        Tells us how to initialize the NFMC fits. Default is 'prior'. If this is supplied along with init_samples
        we use those instead. Current options are 'prior', 'full_rank', 'lbfgs'.
    norm_tol: float
        Fractional difference in the evidence estimate between two steps. If it falls below this we
        stop iterating over the NF fits.
    optim_iter: int
        Maximum number of optimization steps to run during the initialization.
    nf_iter: int
        Number of NF fit iterations to go through after the optimization step.
    model: Model (optional if in ``with`` context)).
    frac_validate: float
        Fraction of the live points at each NS iteration that we use for validation of the NF fit.
    alpha: tuple of floats
        Regularization parameters used for the NF fit.
    verbose: boolean
        Whether you want verbose output from the NF fit.
    random_seed: int
        random seed
    parallel: bool
        Distribute computations across cores if the number of cores is larger than 1.
        Defaults to False.
    cores : int
        Number of cores available for the optimization step. Defaults to None, in which case the CPU
        count is used.
    chains : int
        The number of chains to sample. Running independent chains is important for some
        convergence statistics. Default is 2.

    """

    _log = logging.getLogger("pymc3")
    _log.info("Initializing normalizing flow based sampling...")

    model = modelcontext(model)
    if model.name:
        raise NotImplementedError(
            "The NS_NFMC implementation currently does not support named models. "
            "See https://github.com/pymc-devs/pymc3/pull/4365.")
    if cores is None:
        cores = _cpu_count()

    _log.info(f"Sampling {chains} chain{'s' if chains > 1 else ''} "
              f"Cores available for optimization: {cores}")

    if random_seed == -1:
        random_seed = None
    if chains == 1 and isinstance(random_seed, int):
        random_seed = [random_seed]
    if random_seed is None or isinstance(random_seed, int):
        if random_seed is not None:
            np.random.seed(random_seed)
        random_seed = [np.random.randint(2**30) for _ in range(chains)]
    if not isinstance(random_seed, Iterable):
        raise TypeError(
            "Invalid value for `random_seed`. Must be tuple, list or int")

    assert (sample_mode == 'reinit' or sample_mode == 'keep_local'
            or sample_mode == 'function_approx')

    params = (
        draws,
        init_draws,
        resampling_draws,
        init_ess,
        init_method,
        init_samples,
        start,
        sample_mode,
        finish_regularized,
        cull_lowp_tol,
        init_EL2O,
        mean_field_EL2O,
        use_hess_EL2O,
        absEL2O,
        fracEL2O,
        EL2O_draws,
        maxiter_EL2O,
        EL2O_optim_method,
        scipy_map_method,
        adam_lr,
        adam_b1,
        adam_b2,
        adam_eps,
        adam_steps,
        simulator,
        model_data,
        sim_data_cov,
        sim_size,
        sim_params,
        sim_start,
        sim_optim_method,
        sim_tol,
        local_thresh,
        local_step_size,
        local_grad,
        init_local,
        full_local,
        nf_local_iter,
        max_line_search,
        k_trunc,
        norm_tol,
        ess_tol,
        optim_iter,
        ftol,
        gtol,
        nf_iter,
        model,
        frac_validate,
        iteration,
        final_iteration,
        alpha,
        final_alpha,
        cores,
        verbose,
        n_component,
        interp_nbin,
        KDE,
        bw_factor_min,
        bw_factor_max,
        bw_factor_num,
        edge_bins,
        ndata_wT,
        MSWD_max_iter,
        NBfirstlayer,
        logit,
        Whiten,
        batchsize,
        nocuda,
        patch,
        shape,
        redraw,
        parallel,
    )

    t1 = time.time()

    results = []
    for i in range(chains):
        results.append(sample_nfmc_int(*params, random_seed[i], i, _log))
    (traces, log_evidence, q_samples, importance_weights, total_samples,
     total_weights, logp, logq, train_logp, train_logq, logZ, q_models, q_ess,
     train_ess, total_ess, min_var_bws, min_pq_bws) = zip(*results)
    trace = MultiTrace(traces)
    trace.report.log_evidence = log_evidence
    trace.report.q_samples = q_samples
    trace.report.importance_weights = importance_weights
    trace.report.total_samples = total_samples
    trace.report.total_weights = total_weights
    trace.report.logp = logp
    trace.report.logq = logq
    trace.report.train_logp = train_logp
    trace.report.train_logq = train_logq
    trace.report.logZ = logZ
    trace.report.q_models = q_models
    trace.report.q_ess = q_ess
    trace.report.train_ess = train_ess
    trace.report.total_ess = total_ess
    trace.report._n_draws = draws
    trace.report.min_var_bws = min_var_bws
    trace.report.min_pq_bws = min_pq_bws
    trace.report._t_sampling = time.time() - t1

    return trace
コード例 #8
0
def sample_ns_nfmc(
    draws=2000,
    start=None,
    rho=0.01,
    epsilon=0.01,
    model=None,
    frac_validate=0.8,
    alpha=(0,0),
    verbose=False,
    random_seed=-1,
    parallel=False,
    chains=None,
    cores=None,
):
    r"""
    Normalizing flow based nested sampling.

    Parameters
    ----------
    draws: int
        The number of samples to draw from the posterior (i.e. last stage). And also the number of
        independent chains. Defaults to 2000.
    start: dict, or array of dict
        Starting point in parameter space. It should be a list of dict with length `chains`.
        When None (default) the starting point is sampled from the prior distribution.
    rho: float
        Sets fraction of points we want to be above the likelihood threshold at each iteration.
        Used to adaptively set the likelihood threshold during sampling.
    epsilon: float
        Stopping factor for the algorithm. At each iteration we compare the ratio of the evidences
        from the current and previous iterations. If it is less than 1-epsilon we stop.
    model: Model (optional if in ``with`` context)).
    frac_validate: float
        Fraction of the live points at each NS iteration that we use for validation of the NF fit.
    alpha: tuple of floats
        Regularization parameters used for the NF fit. 
    verbose: boolean
        Whether you want verbose output from the NF fit.
    random_seed: int
        random seed
    parallel: bool
        Distribute computations across cores if the number of cores is larger than 1.
        Defaults to False.
    cores : int
        The number of chains to run in parallel. If ``None``, set to the number of CPUs in the
        system, but at most 4.
    chains : int
        The number of chains to sample. Running independent chains is important for some
        convergence statistics. If ``None`` (default), then set to either ``cores`` or 2, whichever
        is larger.

    """
    _log = logging.getLogger("pymc3")
    _log.info("Initializing normalizing flow based nested sampling...")

    model = modelcontext(model)
    if model.name:
        raise NotImplementedError(
            "The NS_NFMC implementation currently does not support named models. "
            "See https://github.com/pymc-devs/pymc3/pull/4365."
        )
    if cores is None:
        cores = _cpu_count()

    if chains is None:
        chains = max(2, cores)
    elif chains == 1:
        cores = 1

    _log.info(
        f"Sampling {chains} chain{'s' if chains > 1 else ''} "
        f"in {cores} job{'s' if cores > 1 else ''}"
    )

    if random_seed == -1:
        random_seed = None
    if chains == 1 and isinstance(random_seed, int):
        random_seed = [random_seed]
    if random_seed is None or isinstance(random_seed, int):
        if random_seed is not None:
            np.random.seed(random_seed)
        random_seed = [np.random.randint(2 ** 30) for _ in range(chains)]
    if not isinstance(random_seed, Iterable):
        raise TypeError("Invalid value for `random_seed`. Must be tuple, list or int")

    params = (
        draws,
        start,
        rho,
        epsilon,
        model,
        frac_validate,
        alpha,
        verbose,
    )

    t1 = time.time()
    if parallel and chains > 1:
        loggers = [_log] + [None] * (chains - 1)
        pool = mp.Pool(cores)
        results = pool.starmap(
            sample_ns_nfmc_int, [(*params, random_seed[i], i, loggers[i]) for i in range(chains)]
        )

        pool.close()
        pool.join()
    else:
        results = []
        for i in range(chains):
            results.append(sample_ns_nfmc_int(*params, random_seed[i], i, _log))

    (
        traces,
        log_evidence,
        log_evidences,
        likelihood_logp_thresh,
    ) = zip(*results)
    trace = MultiTrace(traces)
    trace.report._n_draws = draws
    trace.report.log_evidence = np.array(log_evidence)
    trace.report._t_sampling = time.time() - t1

    return trace
コード例 #9
0
ファイル: sample_smc.py プロジェクト: themrzmaster/pymc3
def sample_smc(
    draws=2000,
    kernel="metropolis",
    n_steps=25,
    start=None,
    tune_steps=True,
    p_acc_rate=0.85,
    threshold=0.5,
    save_sim_data=False,
    save_log_pseudolikelihood=True,
    model=None,
    random_seed=-1,
    parallel=False,
    chains=None,
    cores=None,
):
    r"""
    Sequential Monte Carlo based sampling.

    Parameters
    ----------
    draws: int
        The number of samples to draw from the posterior (i.e. last stage). And also the number of
        independent chains. Defaults to 2000.
    kernel: str
        Kernel method for the SMC sampler. Available option are ``metropolis`` (default) and `ABC`.
        Use `ABC` for likelihood free inference together with a ``pm.Simulator``.
    n_steps: int
        The number of steps of each Markov Chain. If ``tune_steps == True`` ``n_steps`` will be used
        for the first stage and for the others it will be determined automatically based on the
        acceptance rate and `p_acc_rate`, the max number of steps is ``n_steps``.
    start: dict, or array of dict
        Starting point in parameter space. It should be a list of dict with length `chains`.
        When None (default) the starting point is sampled from the prior distribution.
    tune_steps: bool
        Whether to compute the number of steps automatically or not. Defaults to True
    p_acc_rate: float
        Used to compute ``n_steps`` when ``tune_steps == True``. The higher the value of
        ``p_acc_rate`` the higher the number of steps computed automatically. Defaults to 0.85.
        It should be between 0 and 1.
    threshold: float
        Determines the change of beta from stage to stage, i.e.indirectly the number of stages,
        the higher the value of `threshold` the higher the number of stages. Defaults to 0.5.
        It should be between 0 and 1.
    save_sim_data : bool
        Whether or not to save the simulated data. This parameter only works with the ABC kernel.
        The stored data corresponds to a samples from the posterior predictive distribution.
    save_log_pseudolikelihood : bool
        Whether or not to save the log pseudolikelihood values. This parameter only works with the
        ABC kernel. The stored data can be used to compute LOO or WAIC values. Computing LOO/WAIC
        values from log pseudolikelihood values is experimental.
    model: Model (optional if in ``with`` context)).
    random_seed: int
        random seed
    parallel: bool
        Distribute computations across cores if the number of cores is larger than 1.
        Defaults to False.
    cores : int
        The number of chains to run in parallel. If ``None``, set to the number of CPUs in the
        system, but at most 4.
    chains : int
        The number of chains to sample. Running independent chains is important for some
        convergence statistics. If ``None`` (default), then set to either ``cores`` or 2, whichever
        is larger.

    Notes
    -----
    SMC works by moving through successive stages. At each stage the inverse temperature
    :math:`\beta` is increased a little bit (starting from 0 up to 1). When :math:`\beta` = 0
    we have the prior distribution and when :math:`\beta` =1 we have the posterior distribution.
    So in more general terms we are always computing samples from a tempered posterior that we can
    write as:

    .. math::

        p(\theta \mid y)_{\beta} = p(y \mid \theta)^{\beta} p(\theta)

    A summary of the algorithm is:

     1. Initialize :math:`\beta` at zero and stage at zero.
     2. Generate N samples :math:`S_{\beta}` from the prior (because when :math `\beta = 0` the
        tempered posterior is the prior).
     3. Increase :math:`\beta` in order to make the effective sample size equals some predefined
        value (we use :math:`Nt`, where :math:`t` is 0.5 by default).
     4. Compute a set of N importance weights W. The weights are computed as the ratio of the
        likelihoods of a sample at stage i+1 and stage i.
     5. Obtain :math:`S_{w}` by re-sampling according to W.
     6. Use W to compute the mean and covariance for the proposal distribution, a MVNormal.
     7. For stages other than 0 use the acceptance rate from the previous stage to estimate
        `n_steps`.
     8. Run N independent Metropolis-Hastings (IMH) chains (each one of length `n_steps`),
        starting each one from a different sample in :math:`S_{w}`. Samples are IMH as the proposal
        mean is the of the previous posterior stage and not the current point in parameter space.
     9. Repeat from step 3 until :math:`\beta \ge 1`.
     10. The final result is a collection of N samples from the posterior.


    References
    ----------
    .. [Minson2013] Minson, S. E. and Simons, M. and Beck, J. L., (2013),
        Bayesian inversion for finite fault earthquake source models I- Theory and algorithm.
        Geophysical Journal International, 2013, 194(3), pp.1701-1726,
        `link <https://gji.oxfordjournals.org/content/194/3/1701.full>`__

    .. [Ching2007] Ching, J. and Chen, Y. (2007).
        Transitional Markov Chain Monte Carlo Method for Bayesian Model Updating, Model Class
        Selection, and Model Averaging. J. Eng. Mech., 10.1061/(ASCE)0733-9399(2007)133:7(816),
        816-832. `link <http://ascelibrary.org/doi/abs/10.1061/%28ASCE%290733-9399
        %282007%29133:7%28816%29>`__
    """
    _log = logging.getLogger("pymc3")
    _log.info("Initializing SMC sampler...")

    model = modelcontext(model)
    if model.name:
        raise NotImplementedError(
            "The SMC implementation currently does not support named models. "
            "See https://github.com/pymc-devs/pymc3/pull/4365.")
    if cores is None:
        cores = _cpu_count()

    if chains is None:
        chains = max(2, cores)
    elif chains == 1:
        cores = 1

    _log.info(f"Sampling {chains} chain{'s' if chains > 1 else ''} "
              f"in {cores} job{'s' if cores > 1 else ''}")

    if random_seed == -1:
        random_seed = None
    if chains == 1 and isinstance(random_seed, int):
        random_seed = [random_seed]
    if random_seed is None or isinstance(random_seed, int):
        if random_seed is not None:
            np.random.seed(random_seed)
        random_seed = [np.random.randint(2**30) for _ in range(chains)]
    if not isinstance(random_seed, Iterable):
        raise TypeError(
            "Invalid value for `random_seed`. Must be tuple, list or int")

    if kernel.lower() == "abc":
        if len(model.observed_RVs) != 1:
            warnings.warn(
                "SMC-ABC only works properly with models with one observed variable"
            )
        if model.potentials:
            _log.info("Potentials will be added to the prior term")

    params = (
        draws,
        kernel,
        n_steps,
        start,
        tune_steps,
        p_acc_rate,
        threshold,
        save_sim_data,
        save_log_pseudolikelihood,
        model,
    )

    t1 = time.time()
    if parallel and chains > 1:
        loggers = [_log] + [None] * (chains - 1)
        pool = mp.Pool(cores)
        results = pool.starmap(sample_smc_int,
                               [(*params, random_seed[i], i, loggers[i])
                                for i in range(chains)])

        pool.close()
        pool.join()
    else:
        results = []
        for i in range(chains):
            results.append(sample_smc_int(*params, random_seed[i], i, _log))

    (
        traces,
        sim_data,
        log_marginal_likelihoods,
        log_pseudolikelihood,
        betas,
        accept_ratios,
        nsteps,
    ) = zip(*results)
    trace = MultiTrace(traces)
    trace.report._n_draws = draws
    trace.report._n_tune = 0
    trace.report.log_marginal_likelihood = np.array(log_marginal_likelihoods)
    trace.report.log_pseudolikelihood = log_pseudolikelihood
    trace.report.betas = betas
    trace.report.accept_ratios = accept_ratios
    trace.report.nsteps = nsteps
    trace.report._t_sampling = time.time() - t1

    if save_sim_data:
        return trace, {
            modelcontext(model).observed_RVs[0].name: np.array(sim_data)
        }
    else:
        return trace
コード例 #10
0
def sample_nf_smc(
    draws=2000,
    start=None,
    threshold=0.5,
    frac_validate=0.1,
    iteration=5,
    alpha=(0, 0),
    k_trunc=0.25,
    pareto=False,
    epsilon=1e-3,
    local_thresh=3,
    local_step_size=0.1,
    local_grad=True,
    nf_local_iter=0,
    max_line_search=2,
    verbose=False,
    n_component=None,
    interp_nbin=None,
    KDE=True,
    bw_factor=0.5,
    edge_bins=None,
    ndata_wT=None,
    MSWD_max_iter=None,
    NBfirstlayer=True,
    logit=False,
    Whiten=False,
    batchsize=None,
    nocuda=False,
    patch=False,
    shape=[28, 28, 1],
    model=None,
    random_seed=-1,
    parallel=False,
    chains=None,
    cores=None,
):
    r"""
    Sequential Monte Carlo based sampling.

    Parameters
    ----------
    draws: int
        The number of samples to draw from the posterior (i.e. last stage). And also the number of
        independent chains. Defaults to 2000.
    start: dict, or array of dict
        Starting point in parameter space. It should be a list of dict with length `chains`.
        When None (default) the starting point is sampled from the prior distribution.
    threshold: float
        Determines the change of beta from stage to stage, i.e.indirectly the number of stages,
        the higher the value of `threshold` the higher the number of stages. Defaults to 0.5.
        It should be between 0 and 1.
    model: Model (optional if in ``with`` context)).
    random_seed: int
        random seed
    parallel: bool
        Distribute computations across cores if the number of cores is larger than 1.
        Defaults to False.
    cores : int
        The number of chains to run in parallel. If ``None``, set to the number of CPUs in the
        system, but at most 4.
    chains : int
        The number of chains to sample. Running independent chains is important for some
        convergence statistics. If ``None`` (default), then set to either ``cores`` or 2, whichever
        is larger.

    Notes
    -----
    SMC works by moving through successive stages. At each stage the inverse temperature
    :math:`\beta` is increased a little bit (starting from 0 up to 1). When :math:`\beta` = 0
    we have the prior distribution and when :math:`\beta` =1 we have the posterior distribution.
    So in more general terms we are always computing samples from a tempered posterior that we can
    write as:

    .. math::

        p(\theta \mid y)_{\beta} = p(y \mid \theta)^{\beta} p(\theta)

    A summary of the algorithm is:

     1. Initialize :math:`\beta` at zero and stage at zero.
     2. Generate N samples :math:`S_{\beta}` from the prior (because when :math `\beta = 0` the
        tempered posterior is the prior).
     3. Increase :math:`\beta` in order to make the effective sample size equals some predefined
        value (we use :math:`Nt`, where :math:`t` is 0.5 by default).
     4. Compute a set of N importance weights W. The weights are computed as the ratio of the
        likelihoods of a sample at stage i+1 and stage i.
     5. Obtain :math:`S_{w}` by re-sampling according to W.
     6. Use W to compute the mean and covariance for the proposal distribution, a MVNormal.
     7. For stages other than 0 use the acceptance rate from the previous stage to estimate
        `n_steps`.
     8. Run N independent Metropolis-Hastings (IMH) chains (each one of length `n_steps`),
        starting each one from a different sample in :math:`S_{w}`. Samples are IMH as the proposal
        mean is the of the previous posterior stage and not the current point in parameter space.
     9. Repeat from step 3 until :math:`\beta \ge 1`.
     10. The final result is a collection of N samples from the posterior.


    References
    ----------
    .. [Minson2013] Minson, S. E. and Simons, M. and Beck, J. L., (2013),
        Bayesian inversion for finite fault earthquake source models I- Theory and algorithm.
        Geophysical Journal International, 2013, 194(3), pp.1701-1726,
        `link <https://gji.oxfordjournals.org/content/194/3/1701.full>`__

    .. [Ching2007] Ching, J. and Chen, Y. (2007).
        Transitional Markov Chain Monte Carlo Method for Bayesian Model Updating, Model Class
        Selection, and Model Averaging. J. Eng. Mech., 10.1061/(ASCE)0733-9399(2007)133:7(816),
        816-832. `link <http://ascelibrary.org/doi/abs/10.1061/%28ASCE%290733-9399
        %282007%29133:7%28816%29>`__
    """
    _log = logging.getLogger("pymc3")
    _log.info("Initializing SMC+SINF sampler...")

    model = modelcontext(model)
    if model.name:
        raise NotImplementedError(
            "The SMC implementation currently does not support named models. "
            "See https://github.com/pymc-devs/pymc3/pull/4365.")
    if cores is None:
        cores = _cpu_count()

    if chains is None:
        chains = max(2, cores)
    elif chains == 1:
        cores = 1

    _log.info(f"Sampling {chains} chain{'s' if chains > 1 else ''} "
              f"in {cores} job{'s' if cores > 1 else ''}")

    if random_seed == -1:
        random_seed = None
    if chains == 1 and isinstance(random_seed, int):
        random_seed = [random_seed]
    if random_seed is None or isinstance(random_seed, int):
        if random_seed is not None:
            np.random.seed(random_seed)
        random_seed = [np.random.randint(2**30) for _ in range(chains)]
    if not isinstance(random_seed, Iterable):
        raise TypeError(
            "Invalid value for `random_seed`. Must be tuple, list or int")

    params = (
        draws,
        start,
        threshold,
        frac_validate,
        iteration,
        alpha,
        k_trunc,
        pareto,
        epsilon,
        local_thresh,
        local_step_size,
        local_grad,
        nf_local_iter,
        max_line_search,
        verbose,
        n_component,
        interp_nbin,
        KDE,
        bw_factor,
        edge_bins,
        ndata_wT,
        MSWD_max_iter,
        NBfirstlayer,
        logit,
        Whiten,
        batchsize,
        nocuda,
        patch,
        shape,
        model,
    )

    t1 = time.time()
    if parallel and chains > 1:
        loggers = [_log] + [None] * (chains - 1)
        pool = mp.Pool(cores)
        results = pool.starmap(sample_nf_smc_int,
                               [(*params, random_seed[i], i, loggers[i])
                                for i in range(chains)])

        pool.close()
        pool.join()
    else:
        results = []
        for i in range(chains):
            results.append(sample_nf_smc_int(*params, random_seed[i], i, _log))

    (
        traces,
        log_marginal_likelihood,
        q_samples,
        q_log_weights,
        betas,
    ) = zip(*results)
    trace = MultiTrace(traces)
    trace.report._n_draws = draws
    trace.report.log_marginal_likelihood = log_marginal_likelihood
    trace.report.q_samples = q_samples
    trace.report.q_log_weights = q_log_weights
    trace.report.betas = betas
    trace.report._t_sampling = time.time() - t1

    return trace