def __init__(self, vars, nObs, T, N, observed_jumps, model=None):
        # DES Temp:
        self.logp = []
        self.nObs = nObs
        self.T = T
        self.N = N
        self.zeroIndices = np.roll(self.T.cumsum(), 1)
        self.zeroIndices[0] = 0
        # self.max_obs = max_obs

        model = modelcontext(model)
        vars = inputvars(vars)
        shared = make_shared_replacements(vars, model)

        super(ForwardS, self).__init__(vars, shared)

        self.observed_jumps = observed_jumps
        step_sizes = np.sort(np.unique(observed_jumps))
        self.step_sizes = step_sizes = step_sizes[step_sizes > 0]

        pi = stick_breaking.backward(self.shared["pi_stickbreaking"])
        lower = model.free_RVs[1].distribution.dist.lower
        upper = model.free_RVs[1].distribution.dist.upper
        Q = rate_matrix_one_way(lower, upper).backward(self.shared["Q_ratematrixoneway"])
        B0 = logodds.backward(self.shared["B0_logodds"])
        B = logodds.backward(self.shared["B_logodds"])
        X = self.shared["X"]

        # at this point parameters are still symbolic so we
        # must create get_params function to actually evaluate them
        self.get_params = evaluate_symbolic_shared(pi, Q, B0, B, X)
Пример #2
0
    def setup_kernel(self):
        """Set up the likelihood logp function based on the chosen kernel."""
        shared = make_shared_replacements(self.variables, self.model)

        if self.kernel == "abc":
            factors = [var.logpt for var in self.model.free_RVs]
            factors += [tt.sum(factor) for factor in self.model.potentials]
            self.prior_logp_func = logp_forw([tt.sum(factors)], self.variables, shared)
            simulator = self.model.observed_RVs[0]
            distance = simulator.distribution.distance
            sum_stat = simulator.distribution.sum_stat
            self.likelihood_logp_func = PseudoLikelihood(
                simulator.distribution.epsilon,
                simulator.observations,
                simulator.distribution.function,
                [v.name for v in simulator.distribution.params],
                self.model,
                self.var_info,
                self.variables,
                distance,
                sum_stat,
                self.draws,
                self.save_sim_data,
                self.save_log_pseudolikelihood,
            )
        elif self.kernel == "metropolis":
            self.prior_logp_func = logp_forw([self.model.varlogpt], self.variables, shared)
            self.likelihood_logp_func = logp_forw([self.model.datalogpt], self.variables, shared)
Пример #3
0
    def setup_logp(self):
        """Set up the prior and likelihood logp functions, and derivatives."""
        shared = make_shared_replacements(self.variables, self.model)

        self.prior_logp_func = logp_forw([self.model.varlogpt], self.variables,
                                         shared)
        self.prior_dlogp_func = logp_forw(
            [gradient(self.model.varlogpt, self.variables)], self.variables,
            shared)
        self.likelihood_logp_func = logp_forw([self.model.datalogpt],
                                              self.variables, shared)
        self.posterior_logp_func = logp_forw([self.model.logpt],
                                             self.variables, shared)
        self.posterior_dlogp_func = logp_forw(
            [gradient(self.model.logpt, self.variables)], self.variables,
            shared)
        self.posterior_hessian_func = logp_forw(
            [hessian(self.model.logpt, self.variables)], self.variables,
            shared)
        self.posterior_logp_nojac = logp_forw([self.model.logp_nojact],
                                              self.variables, shared)
        self.posterior_dlogp_nojac = logp_forw(
            [gradient(self.model.logp_nojact, self.variables)], self.variables,
            shared)
        self.posterior_hessian_nojac = logp_forw(
            [hessian(self.model.logp_nojact, self.variables)], self.variables,
            shared)
Пример #4
0
    def __init__(self,
                 vars=None,
                 num_particles=10,
                 max_stages=5000,
                 chunk="auto",
                 model=None):
        _log.warning("The BART model is experimental. Use with caution.")
        model = modelcontext(model)
        vars = inputvars(vars)
        self.bart = vars[0].distribution

        self.tune = True
        self.idx = 0
        self.iter = 0
        self.sum_trees = []
        self.chunk = chunk

        if chunk == "auto":
            self.chunk = max(1, int(self.bart.m * 0.1))
        self.bart.chunk = self.chunk
        self.num_particles = num_particles
        self.log_num_particles = np.log(num_particles)
        self.indices = list(range(1, num_particles))
        self.max_stages = max_stages
        self.old_trees_particles_list = []
        for i in range(self.bart.m):
            p = ParticleTree(self.bart.trees[i],
                             self.bart.prior_prob_leaf_node)
            self.old_trees_particles_list.append(p)

        shared = make_shared_replacements(vars, model)
        self.likelihood_logp = logp([model.datalogpt], vars, shared)
        super().__init__(vars, shared)
Пример #5
0
    def __init__(self,
                 vars=None,
                 covariance=None,
                 scaling=1.,
                 n_chains=100,
                 tune=True,
                 tune_interval=100,
                 model=None,
                 check_bound=True,
                 likelihood_name='like',
                 proposal_dist=MvNPd,
                 coef_variation=1.,
                 **kwargs):

        model = pm.modelcontext(model)

        if vars is None:
            vars = model.vars
        vars = pm.inputvars(vars)

        if covariance is None:
            self.covariance = np.eye(sum(v.dsize for v in vars))
        self.scaling = np.atleast_1d(scaling)
        self.tune = tune
        self.check_bnd = check_bound
        self.tune_interval = tune_interval
        self.steps_until_tune = tune_interval

        self.proposal_dist = proposal_dist(self.covariance)
        self.proposal_samples_array = self.proposal_dist(n_chains)

        self.stage_sample = 0
        self.accepted = 0

        self.beta = 0
        self.stage = 0
        self.coef_variation = coef_variation
        self.n_chains = n_chains
        self.likelihoods = []
        self.likelihood_name = likelihood_name
        self.discrete = np.concatenate(
            [[v.dtype in pm.discrete_types] * (v.dsize or 1) for v in vars])
        self.any_discrete = self.discrete.any()
        self.all_discrete = self.discrete.all()

        # create initial population
        self.population = []
        self.array_population = np.zeros(n_chains)
        for i in range(self.n_chains):
            dummy = pm.Point({v.name: v.random() for v in vars}, model=model)
            self.population.append(dummy)

        shared = make_shared_replacements(vars, model)
        self.logp_forw = logp_forw(model.logpt, vars, shared)
        self.check_bnd = logp_forw(model.varlogpt, vars, shared)
        self.delta_logp = pm.metropolis.delta_logp(model.logpt, vars, shared)

        super(ATMCMC, self).__init__(vars, shared)
Пример #6
0
    def __init__(self, vars=None, scaling=None, step_scale=0.25, is_cov=False,
                 model=None, blocked=True, use_single_leapfrog=False,
                 potential=None, integrator="leapfrog", **theano_kwargs):
        """Superclass to implement Hamiltonian/hybrid monte carlo

        Parameters
        ----------
        vars : list of theano variables
        scaling : array_like, ndim = {1,2}
            Scaling for momentum distribution. 1d arrays interpreted matrix diagonal.
        step_scale : float, default=0.25
            Size of steps to take, automatically scaled down by 1/n**(1/4)
        is_cov : bool, default=False
            Treat scaling as a covariance matrix/vector if True, else treat it as a
            precision matrix/vector
        model : pymc3 Model instance.  default=Context model
        blocked: Boolean, default True
        use_single_leapfrog: Boolean, will leapfrog steps take a single step at a time.
            default False.
        potential : Potential, optional
            An object that represents the Hamiltonian with methods `velocity`,
            `energy`, and `random` methods.
        **theano_kwargs: passed to theano functions
        """
        model = modelcontext(model)

        if vars is None:
            vars = model.cont_vars
        vars = inputvars(vars)

        if scaling is None and potential is None:
            scaling = model.test_point

        if isinstance(scaling, dict):
            scaling = guess_scaling(Point(scaling, model=model), model=model, vars=vars)

        if scaling is not None and potential is not None:
            raise ValueError("Can not specify both potential and scaling.")

        self.step_size = step_scale / (model.ndim ** 0.25)
        if potential is not None:
            self.potential = potential
        else:
            self.potential = quad_potential(scaling, is_cov, as_cov=False)

        shared = make_shared_replacements(vars, model)
        if theano_kwargs is None:
            theano_kwargs = {}

        self.H, self.compute_energy, self.compute_velocity, self.leapfrog, self.dlogp = get_theano_hamiltonian_functions(
            vars, shared, model.logpt, self.potential, use_single_leapfrog, integrator, **theano_kwargs)

        super(BaseHMC, self).__init__(vars, shared, blocked=blocked)
Пример #7
0
    def __init__(self, vars, N, T, K, D, Dd, O, nObs, model=None):
    #DES Temp:
        self.logp = []
        self.N = N
        self.T = T
        self.K = K
        self.D = D
        self.Dd = Dd
        self.O = O
        self.nObs = nObs
        #self.max_obs = max_obs
        self.zeroIndices = np.roll(self.T.cumsum(),1)
        self.zeroIndices[0] = 0

        #self.pos_O_idx = np.zeros((D,max_obs,N), dtype=np.bool_)
        #for n in xrange(N):
        #    for t in xrange(self.T[n]):
        #        self.pos_O_idx[:,t,n] = np.in1d(np.arange(self.D), self.O[:,t,n])

        #self.OO = np.zeros((self.nObs,self.Dd),dtype=np.int)
        #self.OO = np.zeros((self.Dd,self.N,self.max_obs),dtype=np.int)
        self.negMask = np.zeros((self.nObs,D),dtype=np.int)
        #self.negMask = np.zeros((self.N,self.max_obs,D),dtype=np.int)
        for n in range(self.N):
            n0 = self.zeroIndices[n]
            for t in range(self.T[n]):
            #for t in range(self.max_obs):
                #self.OO[n0+t,:] = self.O[n0+t,:]
                self.negMask[n0+t,:] = 1-np.in1d(np.arange(self.D), self.O[n0+t,:]).astype(np.int)
        self.posMask = (self.O != -1).astype(np.int)

        #self.betaMask = np.zeros((max_obs,N,2))
        #for n in range(self.N):
        #    self.betaMask[:(T[n]-1),n,:] = 1

        model = modelcontext(model)
        vars = inputvars(vars)
        shared = make_shared_replacements(vars, model)

        super(ForwardX, self).__init__(vars, shared)

        S = self.shared['S']
        B0 = logodds.backward(self.shared['B0_logodds'])
        B = logodds.backward(self.shared['B_logodds'])
       
        Z = model.vars[6].distribution.transform_used.backward(self.shared['Z_anchoredbeta'])
        #Z = anchoredbeta.backward(self.shared['Z_anchoredbeta'])
        #Z = logodds.backward(self.shared['Z_logodds'])
        L = logodds.backward(self.shared['L_logodds'])

        #at this point parameters are still symbolic so we
        #must create get_params function to actually evaluate them
        self.get_params = evaluate_symbolic_shared(S, B0, B, Z, L)
Пример #8
0
    def __init__(self, vars=None, covariance=None, scaling=1., n_chains=100,
                 tune=True, tune_interval=100, model=None, check_bound=True,
                 likelihood_name='like', proposal_dist=MvNPd,
                 coef_variation=1., **kwargs):

        model = pm.modelcontext(model)

        if vars is None:
            vars = model.vars
        vars = pm.inputvars(vars)

        if covariance is None:
            self.covariance = np.eye(sum(v.dsize for v in vars))
        self.scaling = np.atleast_1d(scaling)
        self.tune = tune
        self.check_bnd = check_bound
        self.tune_interval = tune_interval
        self.steps_until_tune = tune_interval

        self.proposal_dist = proposal_dist(self.covariance)
        self.proposal_samples_array = self.proposal_dist(n_chains)

        self.stage_sample = 0
        self.accepted = 0

        self.beta = 0
        self.stage = 0
        self.coef_variation = coef_variation
        self.n_chains = n_chains
        self.likelihoods = []
        self.likelihood_name = likelihood_name
        self.discrete = np.concatenate(
            [[v.dtype in pm.discrete_types] * (v.dsize or 1) for v in vars])
        self.any_discrete = self.discrete.any()
        self.all_discrete = self.discrete.all()

        # create initial population
        self.population = []
        self.array_population = np.zeros(n_chains)
        for i in range(self.n_chains):
            dummy = pm.Point({v.name: v.random() for v in vars},
                                                            model=model)
            self.population.append(dummy)

        shared = make_shared_replacements(vars, model)
        self.logp_forw = logp_forw(model.logpt, vars, shared)
        self.check_bnd = logp_forw(model.varlogpt, vars, shared)
        self.delta_logp = pm.metropolis.delta_logp(model.logpt, vars, shared)

        super(ATMCMC, self).__init__(vars, shared)
Пример #9
0
    def __init__(self, vars=None, scaling=None, step_scale=0.25, is_cov=False,
                 model=None, blocked=True, use_single_leapfrog=False, **theano_kwargs):
        """Superclass to implement Hamiltonian/hybrid monte carlo

        Parameters
        ----------
        vars : list of theano variables
        scaling : array_like, ndim = {1,2}
            Scaling for momentum distribution. 1d arrays interpreted matrix diagonal.
        step_scale : float, default=0.25
            Size of steps to take, automatically scaled down by 1/n**(1/4)
        is_cov : bool, default=False
            Treat scaling as a covariance matrix/vector if True, else treat it as a
            precision matrix/vector
        state
            State object
        model : pymc3 Model instance.  default=Context model
        blocked: Boolean, default True
        use_single_leapfrog: Boolean, will leapfrog steps take a single step at a time.
            default False.
        **theano_kwargs: passed to theano functions
        """
        model = modelcontext(model)

        if vars is None:
            vars = model.cont_vars
        vars = inputvars(vars)

        if scaling is None:
            scaling = model.test_point

        if isinstance(scaling, dict):
            scaling = guess_scaling(Point(scaling, model=model), model=model, vars=vars)

        n = scaling.shape[0]
        self.step_size = step_scale / (n ** 0.25)
        self.potential = quad_potential(scaling, is_cov, as_cov=False)

        shared = make_shared_replacements(vars, model)
        if theano_kwargs is None:
            theano_kwargs = {}

        self.H, self.compute_energy, self.leapfrog, self._vars = get_theano_hamiltonian_functions(
            vars, shared, model.logpt, self.potential, use_single_leapfrog, **theano_kwargs)

        super(BaseHMC, self).__init__(vars, shared, blocked=blocked)
Пример #10
0
    def setup_logp(self):
        """Set up the likelihood logp function based on the chosen kernel."""
        shared = make_shared_replacements(self.variables, self.model)

        self.prior_logp_func = logp_forw([self.model.varlogpt], self.variables,
                                         shared)
        self.likelihood_logp_func = logp_forw([self.model.datalogpt],
                                              self.variables, shared)
        self.posterior_logp_func = logp_forw([self.model.logpt],
                                             self.variables, shared)
        self.posterior_dlogp_func = logp_forw(
            [gradient(self.model.logpt, self.variables)], self.variables,
            shared)
        self.prior_dlogp_func = logp_forw(
            [gradient(self.model.varlogpt, self.variables)], self.variables,
            shared)
        self.likelihood_dlogp_func = logp_forw(
            [gradient(self.model.datalogpt, self.variables)], self.variables,
            shared)
Пример #11
0
    def __init__(self, vars=None, S=None, proposal_dist=NormalProposal, scaling=1.,
                 tune=True, tune_interval=1000, model=None, recipe=None, recipescale=0.001, **kwargs):

        model = modelcontext(model)

        if vars is None:
            vars = model.vars
        vars = inputvars(vars)

        if S is None:
            S = np.ones(sum(v.dsize for v in vars))
        self.proposal_dist = proposal_dist(S)
        self.scaling = np.atleast_1d(scaling)
        self.tune = tune
        self.tune_interval = tune_interval
        self.steps_until_tune = tune_interval
        self.recipe_vals = np.zeros(tune_interval)
        self.accepted = 0

        # Determine type of variables
        self.discrete = np.array([v.dtype in discrete_types for v in vars])
        self.any_discrete = self.discrete.any()
        self.all_discrete = self.discrete.all()

        shared = make_shared_replacements(vars, model)
        self.delta_logp = delta_logp(model.logpt, vars, shared)
        
        self.vars = vars
        self.shared = shared
        
        varlist = []
        tempdict = {}
        for k, v in self.shared.items():
            tempdict[k.name] = v
        for name in recipe.names:
            varlist.append(tempdict.get(name, None))
        self.varlist = varlist
        self.recipe = recipe
        self.recipescale = recipescale
        
        super(MetropolisExt, self).__init__(vars, shared)
Пример #12
0
    def setup_kernel(self):
        """
        Set up the likelihood logp function based on the chosen kernel
        """
        shared = make_shared_replacements(self.variables, self.model)
        self.prior_logp = logp_forw([self.model.varlogpt], self.variables, shared)

        if self.kernel.lower() == "abc":
            warnings.warn(EXPERIMENTAL_WARNING)
            if len(self.model.observed_RVs) != 1:
                warnings.warn("SMC-ABC only works properly with models with one observed variable")
            simulator = self.model.observed_RVs[0]
            self.likelihood_logp = PseudoLikelihood(
                self.epsilon,
                simulator.observations,
                simulator.distribution.function,
                self.model,
                self.var_info,
                self.variables,
                self.dist_func,
                self.sum_stat,
            )
        elif self.kernel.lower() == "metropolis":
            self.likelihood_logp = logp_forw([self.model.datalogpt], self.variables, shared)
Пример #13
0
    def __init__(self,
                 vars=None,
                 out_vars=None,
                 covariance=None,
                 scale=1.,
                 n_chains=100,
                 tune=True,
                 tune_interval=100,
                 model=None,
                 check_bound=True,
                 likelihood_name='like',
                 proposal_name='MultivariateNormal',
                 coef_variation=1.,
                 **kwargs):

        model = modelcontext(model)

        if vars is None:
            vars = model.vars

        vars = inputvars(vars)

        if out_vars is None:
            out_vars = model.unobserved_RVs

        out_varnames = [out_var.name for out_var in out_vars]

        self.scaling = np.atleast_1d(scale)

        if covariance is None and proposal_name == 'MultivariateNormal':
            self.covariance = np.eye(sum(v.dsize for v in vars))
            scale = self.covariance

        self.tune = tune
        self.check_bnd = check_bound
        self.tune_interval = tune_interval
        self.steps_until_tune = tune_interval

        self.proposal_name = proposal_name
        self.proposal_dist = choose_proposal(self.proposal_name, scale=scale)

        self.proposal_samples_array = self.proposal_dist(n_chains)

        self.stage_sample = 0
        self.accepted = 0

        self.beta = 0
        self.stage = 0
        self.chain_index = 0
        self.resampling_indexes = np.arange(n_chains)

        self.coef_variation = coef_variation
        self.n_chains = n_chains
        self.likelihoods = np.zeros(n_chains)

        self.likelihood_name = likelihood_name
        self._llk_index = out_varnames.index(likelihood_name)
        self.discrete = np.concatenate(
            [[v.dtype in discrete_types] * (v.dsize or 1) for v in vars])
        self.any_discrete = self.discrete.any()
        self.all_discrete = self.discrete.all()

        # create initial population
        self.population = []
        self.array_population = np.zeros(n_chains)
        for i in range(self.n_chains):
            dummy = pm.Point({v.name: v.random() for v in vars}, model=model)
            self.population.append(dummy)

        self.population[0] = model.test_point

        self.chain_previous_lpoint = copy.deepcopy(self.population)

        shared = make_shared_replacements(vars, model)
        self.logp_forw = logp_forw(out_vars, vars, shared)
        self.check_bnd = logp_forw([model.varlogpt], vars, shared)

        super(ATMCMC, self).__init__(vars, out_vars, shared)
Пример #14
0
    def __init__(self,
                 vars=None,
                 batch_size=None,
                 total_size=None,
                 step_size=1.0,
                 model=None,
                 random_seed=None,
                 minibatches=None,
                 minibatch_tensors=None,
                 **kwargs):
        warnings.warn(EXPERIMENTAL_WARNING)

        model = modelcontext(model)

        if vars is None:
            vars = model.vars

        vars = inputvars(vars)

        self.model = model
        self.vars = vars
        self.batch_size = batch_size
        self.total_size = total_size
        _value_error(
            total_size != None or batch_size != None,
            "total_size and batch_size of training data have to be specified",
        )
        self.expected_iter = int(total_size / batch_size)

        # set random stream
        self.random = None
        if random_seed is None:
            self.random = tt_rng()
        else:
            self.random = tt_rng(random_seed)

        self.step_size = step_size

        shared = make_shared_replacements(vars, model)

        self.updates = OrderedDict()
        self.q_size = int(sum(v.dsize for v in self.vars))

        flat_view = model.flatten(vars)
        self.inarray = [flat_view.input]

        self.dlog_prior = prior_dlogp(vars, model, flat_view)
        self.dlogp_elemwise = elemwise_dlogL(vars, model, flat_view)
        self.q_size = int(sum(v.dsize for v in self.vars))

        if minibatch_tensors != None:
            _check_minibatches(minibatch_tensors, minibatches)
            self.minibatches = minibatches

            # Replace input shared variables with tensors
            def is_shared(t):
                return isinstance(t, theano.compile.sharedvalue.SharedVariable)

            tensors = [(t.type() if is_shared(t) else t)
                       for t in minibatch_tensors]
            updates = OrderedDict({
                t: t_
                for t, t_ in zip(minibatch_tensors, tensors) if is_shared(t)
            })
            self.minibatch_tensors = tensors
            self.inarray += self.minibatch_tensors
            self.updates.update(updates)

        self._initialize_values()
        super().__init__(vars, shared)
Пример #15
0
    def __init__(self,
                 vars=None,
                 scaling=None,
                 step_scale=0.25,
                 is_cov=False,
                 model=None,
                 blocked=True,
                 use_single_leapfrog=False,
                 potential=None,
                 integrator="leapfrog",
                 **theano_kwargs):
        """Superclass to implement Hamiltonian/hybrid monte carlo

        Parameters
        ----------
        vars : list of theano variables
        scaling : array_like, ndim = {1,2}
            Scaling for momentum distribution. 1d arrays interpreted matrix diagonal.
        step_scale : float, default=0.25
            Size of steps to take, automatically scaled down by 1/n**(1/4)
        is_cov : bool, default=False
            Treat scaling as a covariance matrix/vector if True, else treat it as a
            precision matrix/vector
        model : pymc3 Model instance.  default=Context model
        blocked: Boolean, default True
        use_single_leapfrog: Boolean, will leapfrog steps take a single step at a time.
            default False.
        potential : Potential, optional
            An object that represents the Hamiltonian with methods `velocity`,
            `energy`, and `random` methods.
        **theano_kwargs: passed to theano functions
        """
        model = modelcontext(model)

        if vars is None:
            vars = model.cont_vars
        vars = inputvars(vars)

        if scaling is None and potential is None:
            size = sum(np.prod(var.dshape, dtype=int) for var in vars)
            mean = floatX(np.zeros(size))
            var = floatX(np.ones(size))
            potential = QuadPotentialDiagAdapt(size, mean, var, 10)

        if isinstance(scaling, dict):
            point = Point(scaling, model=model)
            scaling = guess_scaling(point, model=model, vars=vars)

        if scaling is not None and potential is not None:
            raise ValueError("Can not specify both potential and scaling.")

        self.step_size = step_scale / (model.ndim**0.25)
        if potential is not None:
            self.potential = potential
        else:
            self.potential = quad_potential(scaling, is_cov)

        shared = make_shared_replacements(vars, model)
        if theano_kwargs is None:
            theano_kwargs = {}

        self.H, self.compute_energy, self.compute_velocity, self.leapfrog, self.dlogp = get_theano_hamiltonian_functions(
            vars, shared, model.logpt, self.potential, use_single_leapfrog,
            integrator, **theano_kwargs)

        super(BaseHMC, self).__init__(vars, shared, blocked=blocked)
Пример #16
0
    def __init__(self, vars=None, out_vars=None, covariance=None, scale=1.,
                 n_chains=100, tune=True, tune_interval=100, model=None,
                 check_bound=True, likelihood_name='like', backend='csv',
                 proposal_name='MultivariateNormal', **kwargs):

        model = modelcontext(model)

        if vars is None:
            vars = model.vars

        vars = inputvars(vars)

        if out_vars is None:
            out_vars = model.unobserved_RVs

        out_varnames = [out_var.name for out_var in out_vars]

        self.scaling = utility.scalar2floatX(num.atleast_1d(scale))

        self.tune = tune
        self.check_bound = check_bound
        self.tune_interval = tune_interval
        self.steps_until_tune = tune_interval

        self.stage_sample = 0
        self.cumulative_samples = 0
        self.accepted = 0

        self.beta = 1.
        self.stage = 0
        self.chain_index = 0

        # needed to use the same parallel implementation function as for SMC
        self.resampling_indexes = num.arange(n_chains)
        self.n_chains = n_chains

        self.likelihood_name = likelihood_name
        self._llk_index = out_varnames.index(likelihood_name)
        self.backend = backend
        self.discrete = num.concatenate(
            [[v.dtype in discrete_types] * (v.dsize or 1) for v in vars])
        self.any_discrete = self.discrete.any()
        self.all_discrete = self.discrete.all()

        # create initial population
        self.population = []
        self.array_population = num.zeros(n_chains)
        logger.info('Creating initial population for {}'
                    ' chains ...'.format(self.n_chains))
        for i in range(self.n_chains):
            self.population.append(
                Point({v.name: v.random() for v in vars}, model=model))

        self.population[0] = model.test_point

        shared = make_shared_replacements(vars, model)
        self.logp_forw = logp_forw(out_vars, vars, shared)
        self.check_bnd = logp_forw([model.varlogpt], vars, shared)

        super(Metropolis, self).__init__(vars, out_vars, shared)

        # init proposal
        if covariance is None and proposal_name in multivariate_proposals:
            t0 = time()
            self.covariance = init_proposal_covariance(
                bij=self.bij, vars=vars, model=model, pop_size=1000)
            t1 = time()
            logger.info('Time for proposal covariance init: %f' % (t1 - t0))
            scale = self.covariance
        elif covariance is None:
            scale = num.ones(sum(v.dsize for v in vars))
        else:
            scale = covariance

        self.proposal_name = proposal_name
        self.proposal_dist = choose_proposal(
            self.proposal_name, scale=scale)
        self.proposal_samples_array = self.proposal_dist(n_chains)

        self.chain_previous_lpoint = [[]] * self.n_chains
        self._tps = None
Пример #17
0
    def setup_logp(self):
        """Set up the prior and likelihood logp functions."""
        shared = make_shared_replacements(self.variables, self.model)

        self.prior_logp_func = logp_forw([self.model.varlogpt], self.variables, shared)
        self.likelihood_logp_func = logp_forw([self.model.datalogpt], self.variables, shared)