def model(self, max_time_step):
        #print("in model")
        pyro.module("agentmodel", self)

        observation = reset_env()
        add = True
        # states = []
        # actions = []
        total_reward = torch.tensor(0.)
        for t in range(MAXTIME):
            prob = 0.5
            action = pyro.sample("action_{}".format(t), dist.Bernoulli(prob))
            action = round(action.item())
            #action = self.sample_action(observation, name="action_%d" %t)
            observation, reward, done, _ = env.step(action)

            if done and add:
                add = False

            if add:
                total_reward += reward * 10

            if done:
                break

        global episode
        episode += 1

        if total_reward < max_time_step * 0.5:
            total_reward = 0.01  # eliminate some “bad” simulations

        pyro.factor("Episode_{}".format(episode), total_reward * alpha)
Exemple #2
0
    def pyro_model(self, input, beta=1.0, name_prefix=""):
        # Inducing values p(u)
        with pyro.poutine.scale(scale=beta):
            prior_distribution = self.variational_strategy.prior_distribution
            prior_distribution = prior_distribution.to_event(
                len(prior_distribution.batch_shape))
            u_samples = pyro.sample(name_prefix + ".u", prior_distribution)

        # Include term for GPyTorch priors
        log_prior = torch.tensor(0.0,
                                 dtype=u_samples.dtype,
                                 device=u_samples.device)
        for _, prior, closure, _ in self.named_priors():
            log_prior.add_(prior.log_prob(closure()).sum())
        pyro.factor(name_prefix + ".log_prior", log_prior)

        # Include factor for added loss terms
        added_loss = torch.tensor(0.0,
                                  dtype=u_samples.dtype,
                                  device=u_samples.device)
        for added_loss_term in self.added_loss_terms():
            added_loss.add_(added_loss_term.loss())
        pyro.factor(name_prefix + ".added_loss", added_loss)

        # Draw samples from p(f)
        function_dist = self(input, prior=True)
        function_dist = pyro.distributions.Normal(
            loc=function_dist.mean, scale=function_dist.stddev).to_event(
                len(function_dist.event_shape) - 1)
        return function_dist.mask(False)
Exemple #3
0
    def model(self):
        #print("in model")
        pyro.module("agentmodel", self)

        observation = reset_env()
        add = True
        total_reward = torch.tensor(MAXTIME).float()
        for t in range(MAXTIME):
            prob = torch.tensor([1 / 3, 1 / 3, 1 / 3])
            action = pyro.sample("action_{}".format(t), dist.Categorical(prob))
            action = round(action.item())
            observation, reward, done, info = env.step(action)
            #total_reward += reward

            if done and t < MAXTIME - 1:
                #total_reward += 50
                break

        total_reward = np.abs(observation[0] - (-target_position)) * alpha
        total_reward -= t * 0.1

        global episode
        episode += 1

        pyro.factor("Episode_{}".format(episode), total_reward)
Exemple #4
0
def model(trajectory_sampler, actionSampler, imm_timestamp):
    total_reward = 0
    #observation = env.reset()
    env.state = init_state
    observation = init_state
    trajectory = torch.from_numpy(np.random.randint(-1, 0, total_timestamp))
    for t in range(imm_timestamp):
        #env.render()
        action = int(
            pyro.sample("action{}".format(observation), dist.Bernoulli(0.5)))
        if (trajectory_sampler):
            sample_traj = int(trajectory_sampler.sample()[t])
            if sample_traj > -1:
                action = sample_traj

        observation, reward, done, info = env.step(action)

        total_reward += reward
        trajectory[t] = action
        if done:
            print("exit at", t, end=" ")
            break
    global episode
    if total_reward < imm_timestamp * 0.96:
        total_reward = 0  # eliminate some “bad” simulations
    else:
        total_reward = total_reward / imm_timestamp
    print("reward", total_reward)
    pyro.factor("Episode_{}".format(episode), total_reward * alpha)
    episode += 1
    return trajectory
Exemple #5
0
    def guide(self, x, y=None):
        pyro.module("scanvi", self)
        with pyro.plate("batch",
                        len(x)), poutine.scale(scale=self.scale_factor):
            z2_loc, z2_scale, l_loc, l_scale = self.z2l_encoder(x)
            pyro.sample("l", dist.LogNormal(l_loc, l_scale).to_event(1))
            z2 = pyro.sample("z2", dist.Normal(z2_loc, z2_scale).to_event(1))

            y_logits = self.classifier(z2)
            y_dist = dist.OneHotCategorical(logits=y_logits)
            if y is None:
                # x is unlabeled so sample y using q(y|z2)
                y = pyro.sample("y", y_dist)
            else:
                # x is labeled so add a classification loss term
                # (this way q(y|z2) learns from both labeled and unlabeled data)
                classification_loss = y_dist.log_prob(y)
                # Note that the negative sign appears because we're adding this term in the guide
                # and the guide log_prob appears in the ELBO as -log q
                pyro.factor(
                    "classification_loss",
                    -self.alpha * classification_loss,
                    has_rsample=False,
                )

            z1_loc, z1_scale = self.z1_encoder(z2, y)
            pyro.sample("z1", dist.Normal(z1_loc, z1_scale).to_event(1))
Exemple #6
0
 def init(self, state, N):
     state["λ"] = sample("λ", Gamma(1., 1./1.))
     state["μ"] = sample("μ", Gamma(1., 1./0.5))
     f = (N-1)*log(tensor(2))
     for n in range(2, N+1):
         f -= log(tensor(n))
     factor("factor_orient_labeled", f)
Exemple #7
0
 def model():
     x = pyro.sample("x", dist.Normal(0, 1))
     pyro.sample("y", dist.Normal(x, 1), obs=torch.tensor(0.0))
     called.add("model-always")
     if poutine.get_mask() is not False:
         called.add("model-sometimes")
         pyro.factor("f", x + 1)
Exemple #8
0
    def model(self, max_time_step):
        pyro.module("agentmodel", self)
        self.states = []
        observation = reset_env()
        total_reward = torch.tensor(0.)
        for t in range(max_time_step):
            observation = torch.from_numpy(observation).float()
            state = observation

            prob = self.target_policy(observation)
            action = pyro.sample("action_{}".format(t), dist.Bernoulli(prob))
            action = round(action.item())
            observation, reward, done, _ = env.step(action)

            if done:
                reward = -10

            total_reward += reward
            self.states.append(state)

            if done:
                break

        global episode
        episode += 1

        if total_reward < 20:
            total_reward = 0  # eliminate some “bad” simulations

        pyro.factor("Episode_{}".format(episode), total_reward * alpha)
Exemple #9
0
    def model(self,*args, **kwargs):

        I, N = self._data['data'].shape

        batch = N if self._params['batch_size'] else self._params['batch_size']

        weights = pyro.sample('mixture_weights', dist.Dirichlet((1 / self._params['K']) * torch.ones(self._params['K'])))
        cat_vector = torch.tensor(np.arange(self._params['hidden_dim']) + 1, dtype = torch.float)

        with pyro.plate('segments', I):
            segment_factor = pyro.sample('segment_factor', dist.Gamma(self._params['theta_scale'], self._params['theta_rate']))
            with pyro.plate('components', self._params['K']):
                cc = pyro.sample("CNV_probabilities", dist.Dirichlet(self.create_dirichlet_init_values()))

        with pyro.plate('data', N, batch):

                # p(x|z_i) = Poisson(marg(cc * theta * segment_factor))

                segment_fact_cat = torch.matmul(segment_factor.reshape([I,1]) , cat_vector.reshape([1, self._params['hidden_dim']]))
                segment_fact_marg = segment_fact_cat * cc
                segment_fact_marg = torch.sum(segment_fact_marg, dim = -1)

                # p(z_i| D, X ) = lk(z_i) * p(z_i | X) / sum_z_i(lk(z_i) * p(z_i | X))
                # log(p(z_i| D, X )) = log(lk(z_i)) + log(p(z_i | X)) - log_sum_exp(log(lk(z_i)) + log(p(z_i | X)))

                pyro.factor("lk", self.likelihood(segment_fact_marg, weights, self._params['theta']))
Exemple #10
0
    def model(self):
        pyro.module("agentmodel", self)

        observation = reset_env()
        add = True
        total_reward = torch.tensor(MAXTIME).float()
        solve = False
        for t in range(MAXTIME):
            prob = torch.tensor([1/3, 1/3, 1/3])
            action = pyro.sample("action_{}".format(t), dist.Categorical(prob))
            action = round(action.item())
            observation, reward, done, info = env.step(action)
                
            if done:
                if (t < MAXTIME - 1):
                    solve = True
                break

        if solve:
            total_reward -= t
        else:
            total_reward = np.abs(observation[0] - (-target_position))

        global episode
        episode += 1
        
        pyro.factor("Episode_{}".format(episode), total_reward * alpha)
Exemple #11
0
 def model(log_factor):
     pyro.sample("z1", dist.Normal(0.0, 1.0))
     pyro.factor("f1", log_factor)
     pyro.sample("z2", dist.Normal(torch.zeros(2), torch.ones(2)).to_event(1))
     with pyro.plate("plate", 3):
         pyro.factor("f2", log_factor)
         pyro.sample("z3", dist.Normal(torch.zeros(3), torch.ones(3)))
def trajectory_model_mdp(env, *, agent_model, factor_G=False):
    """trajectory_model_mdp

    A probabilistic program for MDP environment trajectories.  The sample return
    can be used to affect the trace likelihood.

    :param env: OpenAI Gym environment
    :param agent_model: agent's probabilistic program
    :param factor_G: boolean; if True then apply $\\alpha G$ likelihood factor
    """
    env = deepcopy(env)

    # running return and discount factor
    return_, discount = 0.0, 1.0

    # with keep_state=True only the time-step used to name sites is being reset
    state = env.reset(keep_state=True)
    for t in itt.count():
        action = agent_model(f'A_{t}', env, state)
        state, reward, done, _ = env.step(action)

        # running return and discount factor
        return_ += discount * reward
        discount *= args.gamma

        if done:
            break

    pyro.sample('G', Delta(return_))

    if factor_G:
        pyro.factor('factor_G', args.alpha * return_)

    return return_
def trajectory_model_frozenlake(env, *, agent_model, factor_G=False):
    """trajectory_model_frozenlake

    A probabilistic program for the frozenlake environment trajectories.  The
    sample return can be used to affect the trace likelihood.

    :param env: OpenAI Gym FrozenLake environment
    :param agent_model: agent's probabilistic program
    :param factor_G: boolean; if True then apply $\\alpha G$ likelihood factor
    """
    env = deepcopy(env)

    # running return and discount factor
    return_, discount = 0.0, 1.0
    for t in itt.count():
        action = agent_model(f'A_{t}', env, env.s)
        _, reward, done, _ = env.step(action.item())

        # running return and discount factor
        return_ += discount * reward
        discount *= args.gamma

        if done:
            break

    pyro.sample('G', Delta(torch.as_tensor(return_)))

    if factor_G:
        pyro.factor('factor_G', args.alpha * return_)

    return return_
Exemple #14
0
    def __exit__(self, *args):
        _coerce = COERCIONS.pop()
        assert _coerce is self._coerce
        super().__exit__(*args)

        if any(site["type"] == "sample" for site in self.trace.nodes.values()):
            name, log_prob, _, _ = self._get_log_prob()
            pyro.factor(name, log_prob.data)
Exemple #15
0
 def action_model(self, state):
     # draw a random action
     action = dist.Categorical(torch.tensor((0.5, 0.5))).sample()
     # calculate expected uttility for the action
     expected_u = self.expected_utility(state, action)
     # add factor to the action
     pyro.factor("state_%saction_%d" % (state, action),
                 self.alpha * expected_u)
     return action
Exemple #16
0
 def init(self, state, N):
     state["λ_α"] = full((state._num_particles, ), 1.)
     state["λ_β"] = full((state._num_particles, ), 1. / 1.)
     state["μ_α"] = full((state._num_particles, ), 1.)
     state["μ_β"] = full((state._num_particles, ), 1. / 0.5)
     f = (N - 1) * log(tensor(2))
     for n in range(2, N + 1):
         f -= log(tensor(n))
     factor("factor_orient_labeled", f)
Exemple #17
0
    def _sample_aux_values(self, *, temperature: float) -> Dict[str, torch.Tensor]:
        funsor = _import_funsor()

        # Convert torch to funsor.
        particle_plates = frozenset(get_plates())
        plate_to_dim = self._funsor_plate_to_dim.copy()
        plate_to_dim.update({f.name: f.dim for f in particle_plates})
        factors = {}
        for d, inputs in self._funsor_factor_inputs.items():
            batch_shape = torch.Size(
                p.size for p in sorted(self._plates[d], key=lambda p: p.dim)
            )
            white_vec = deep_getattr(self.white_vecs, d)
            prec_sqrt = deep_getattr(self.prec_sqrts, d)
            factors[d] = funsor.gaussian.Gaussian(
                white_vec=white_vec.reshape(batch_shape + white_vec.shape[-1:]),
                prec_sqrt=prec_sqrt.reshape(batch_shape + prec_sqrt.shape[-2:]),
                inputs=inputs,
            )

        # Perform Gaussian tensor variable elimination.
        if temperature == 1:
            samples, log_prob = _try_possibly_intractable(
                funsor.recipes.forward_filter_backward_rsample,
                factors=factors,
                eliminate=self._funsor_eliminate,
                plates=frozenset(plate_to_dim),
                sample_inputs={f.name: funsor.Bint[f.size] for f in particle_plates},
            )

        else:
            samples, log_prob = _try_possibly_intractable(
                funsor.recipes.forward_filter_backward_precondition,
                factors=factors,
                eliminate=self._funsor_eliminate,
                plates=frozenset(plate_to_dim),
            )

            # Substitute noise.
            sample_shape = torch.Size(f.size for f in particle_plates)
            noise = torch.randn(sample_shape + log_prob.inputs["aux"].shape)
            noise.mul_(temperature)
            aux = funsor.Tensor(noise)[tuple(f.name for f in particle_plates)]
            with funsor.interpretations.memoize():
                samples = {k: v(aux=aux) for k, v in samples.items()}
                log_prob = log_prob(aux=aux)

        # Convert funsor to torch.
        if am_i_wrapped() and poutine.get_mask() is not False:
            log_prob = funsor.to_data(log_prob, name_to_dim=plate_to_dim)
            pyro.factor(f"_{self._pyro_name}_latent", log_prob, has_rsample=True)
        samples = {
            k: funsor.to_data(v, name_to_dim=plate_to_dim) for k, v in samples.items()
        }
        return samples
Exemple #18
0
def world_prior(num_objs, meaning_fn):
    prev_factor = torch.tensor(0.)
    world = []
    for i in range(num_objs):
        world.append(Obj("obj_{}".format(i)))
        new_factor = heuristic(meaning_fn(world))
        pyro.factor("factor_{}".format(i), new_factor - prev_factor)
        prev_factor = new_factor

    pyro.factor("factor_{}".format(num_objs), prev_factor * -1)
    return tuple(world)
Exemple #19
0
 def step(self, state, branch, ρ=1.0):
     Δ = branch["t_beg"] - branch["t_end"]
     if branch['parent_id'] is None and Δ < 1e-5:
         return
     count_hs = sample(f"count_hs_{branch['id']}", Poisson(state["λ"] * Δ))
     f = vec_survives(branch["t_end"], branch["t_beg"], count_hs.numpy(), state["λ"].numpy(), state["μ"].numpy(), ρ)
     factor(f"factor_hs_{branch['id']}", f)
     sample(f"num_ex_{branch['id']}", Poisson(state["μ"] * Δ), obs=tensor(0))
     if branch["has_children"]:
         sample(f"spec_{branch['id']}", Exponential(state["λ"]), obs=tensor(1e-40))
     else:
         sample(f"obs_{branch['id']}", Bernoulli(ρ), obs=tensor(1.))
Exemple #20
0
def vectorized_model(args, data):
    # Sample global parameters.
    rate_s, prob_i, rho = global_model(args.population)

    # Sample reparameterizing variables.
    S_aux = pyro.sample(
        "S_aux",
        dist.Uniform(-0.5, args.population + 0.5).mask(False).expand(
            data.shape).to_event(1),
    )
    I_aux = pyro.sample(
        "I_aux",
        dist.Uniform(-0.5, args.population + 0.5).mask(False).expand(
            data.shape).to_event(1),
    )

    # Manually enumerate.
    S_curr, S_logp = quantize_enumerate(S_aux, min=0, max=args.population)
    I_curr, I_logp = quantize_enumerate(I_aux, min=0, max=args.population)
    # Truncate final value from the right then pad initial value onto the left.
    S_prev = torch.nn.functional.pad(S_curr[:-1], (0, 0, 1, 0),
                                     value=args.population - 1)
    I_prev = torch.nn.functional.pad(I_curr[:-1], (0, 0, 1, 0), value=1)
    # Reshape to support broadcasting, similar to EnumMessenger.
    T = len(data)
    Q = 4
    S_prev = S_prev.reshape(T, Q, 1, 1, 1)
    I_prev = I_prev.reshape(T, 1, Q, 1, 1)
    S_curr = S_curr.reshape(T, 1, 1, Q, 1)
    S_logp = S_logp.reshape(T, 1, 1, Q, 1)
    I_curr = I_curr.reshape(T, 1, 1, 1, Q)
    I_logp = I_logp.reshape(T, 1, 1, 1, Q)
    data = data.reshape(T, 1, 1, 1, 1)

    # Reverse the S2I,I2R computation.
    S2I = S_prev - S_curr
    I2R = I_prev - I_curr + S2I

    # Compute probability factors.
    S2I_logp = dist.ExtendedBinomial(S_prev,
                                     -(rate_s * I_prev).expm1()).log_prob(S2I)
    I2R_logp = dist.ExtendedBinomial(I_prev, prob_i).log_prob(I2R)
    obs_logp = dist.ExtendedBinomial(S2I, rho).log_prob(data)

    # Manually perform variable elimination.
    logp = S_logp + (I_logp + obs_logp) + S2I_logp + I2R_logp
    logp = logp.reshape(-1, Q * Q, Q * Q)
    logp = pyro.distributions.hmm._sequential_logmatmulexp(logp)
    logp = logp.reshape(-1).logsumexp(0)
    logp = logp - math.log(4)  # Account for S,I initial distributions.
    warn_if_nan(logp)
    pyro.factor("obs", logp)
Exemple #21
0
    def sample_guide(self):
        pyro.sample(
            f"{self._pyro_name}.weight",
            dist.Laplace(self.weight_loc, self.weight_scale).to_event(1),
        )
        bias = pyro.sample(
            f"{self._pyro_name}.bias",
            dist.Normal(self.bias_loc, self.bias_scale).to_event(1),
        )

        pyro.factor(
            f"{self._pyro_name}.monotonic_bias",
            -self.alpha * torch.clamp(bias[:-1] - bias[1:], 0).sum(),
        )
Exemple #22
0
    def model(self):

        # Sample Poissonian parameters and compute expected contribution

        mu_poiss = torch.zeros(torch.sum(~self.mask), dtype=torch.float64)

        for i_temp in torch.arange(self.n_poiss):
            norms_poiss = pyro.sample(self.labels_poiss[i_temp],
                                      self.poiss_priors[i_temp])

            if self.poiss_log_priors[i_temp]:
                norms_poiss = 10**norms_poiss.clone()
            mu_poiss += norms_poiss * self.poiss_temps[i_temp]

        # Samples non-Poissonian parameters

        thetas = []

        for i_ps in torch.arange(self.n_ps):
            theta_temp = [
                pyro.sample(
                    self.labels_ps_params[i_np_param] + "_" +
                    self.labels_ps[i_ps], self.ps_priors[i_ps][i_np_param])
                for i_np_param in torch.arange(self.n_ps_params)
            ]

            for i_p in torch.arange(len(theta_temp)):
                if self.ps_log_priors[i_ps][i_p]:
                    theta_temp[i_p] = 10**theta_temp[i_p]

            thetas.append(theta_temp)

        # Mark each pixel as conditionally independent
        with pyro.plate("data_plate",
                        len(self.data),
                        dim=-1,
                        subsample_size=self.subsample_size) as idx:

            # Use either the non-Poissonian (if there is at least one NP model) or Poissonian likelihood
            if self.n_ps != 0:
                log_likelihood = log_like_np(mu_poiss[idx], thetas,
                                             self.ps_temps[:, idx],
                                             self.data[idx], self.f_ary,
                                             self.df_rho_div_f_ary)
                pyro.factor("obs", log_likelihood)
            else:
                pyro.sample("obs",
                            dist.Poisson(mu_poiss[idx]),
                            obs=self.data[idx])
Exemple #23
0
 def model(self, time_step, actions, states, reward, rewards, epoch=False):
     '''
     this model takes data of a whole trajectory from simulation and replay
     '''
     pyro.module("agentmodel", self)
     global episode
     episode += 1
     for i in range(len(states)):
         prob = self.target_policy(states[i])
         action = pyro.sample("action_%d" % i, dist.Bernoulli(prob))
         a = 1 if int(actions[i]) == int(action) else -1
         pyro.factor("Episode_{}_{}".format(episode, i),
                     rewards[i] * alpha * a)
     pyro.factor("Episode_{}".format(episode), reward *
                 alpha)  # factor by the total reward of the trajectory
Exemple #24
0
    def model(self):
        self.set_mode("model")

        # W = (inv(Luu) @ Kuf).T
        # Qff = Kfu @ inv(Kuu) @ Kuf = W @ W.T
        # Fomulas for each approximation method are
        # DTC:  y_cov = Qff + noise,                   trace_term = 0
        # FITC: y_cov = Qff + diag(Kff - Qff) + noise, trace_term = 0
        # VFE:  y_cov = Qff + noise,                   trace_term = tr(Kff-Qff) / noise
        # y_cov = W @ W.T + D
        # trace_term is added into log_prob

        N = self.X.size(0)
        M = self.Xu.size(0)
        Kuu = self.kernel(self.Xu).contiguous()
        Kuu.view(-1)[::M + 1] += self.jitter  # add jitter to the diagonal
        Luu = Kuu.cholesky()
        Kuf = self.kernel(self.Xu, self.X)
        W = Kuf.triangular_solve(Luu, upper=False)[0].t()

        D = self.noise.expand(N)
        if self.approx == "FITC" or self.approx == "VFE":
            Kffdiag = self.kernel(self.X, diag=True)
            Qffdiag = W.pow(2).sum(dim=-1)
            if self.approx == "FITC":
                D = D + Kffdiag - Qffdiag
            else:  # approx = "VFE"
                trace_term = (Kffdiag - Qffdiag).sum() / self.noise
                trace_term = trace_term.clamp(min=0)

        zero_loc = self.X.new_zeros(N)
        f_loc = zero_loc + self.mean_function(self.X)
        if self.y is None:
            f_var = D + W.pow(2).sum(dim=-1)
            return f_loc, f_var
        else:
            if self.approx == "VFE":
                pyro.factor(self._pyro_get_fullname("trace_term"),
                            -trace_term / 2.)

            return pyro.sample(
                self._pyro_get_fullname("y"),
                dist.LowRankMultivariateNormal(f_loc, W, D).expand_by(
                    self.y.shape[:-1]).to_event(self.y.dim() - 1),
                obs=self.y)
def softmax_agent_model(env):
    """softmax_agent_model

    Softmax agent model;  Performs inference to estimate $Q^\pi(s, a)$, then
    uses pyro.factor to modify the trace log-likelihood.

    :param env: OpenAI Gym environment
    """
    policy_probs = torch.ones(env.state_space.n, env.action_space.n)
    policy_vector = pyro.sample('policy_vector', Categorical(policy_probs))

    inference = Importance(trajectory_model, num_samples=args.num_samples)
    posterior = inference.run(env, lambda state: policy_vector[state])
    Q = EmpiricalMarginal(posterior, 'G').mean

    pyro.factor('factor_Q', args.alpha * Q)

    return policy_vector
Exemple #26
0
    def model(self, state, timeLeft, cu_utility=0):
        if (not timeLeft) or (cu_utility > 0): # cu_utility > 0 implies arriving destination
            pyro.factor("state_{}_{}".format(state, str(uuid.uuid1())), self.alpha * cu_utility)
            if timeLeft:
                ignore = torch.tensor([-1, -1]) # invalid state tensor, using for keeping dimension identical 
                return ignore.repeat(timeLeft, 1)
            else: # no time left
                return torch.Tensor()
        # sample an action
        possibleActions = self.get_actions(state)
        num_choices = len(possibleActions)
        action_index = dist.Categorical(torch.tensor([1 / num_choices for _ in range(num_choices)])).sample()
        action = possibleActions[action_index]

        next_state = self.transition(state, action)
        utility = self.utility(state)
        cu_utility += utility
        return torch.cat((state.unsqueeze(0), self.model(next_state, timeLeft - 1, cu_utility)))
Exemple #27
0
 def step(self, state, branch, ρ=1.0):
     Δ = branch["t_beg"] - branch["t_end"]
     if branch['parent_id'] is None and Δ == 0:
         return
     count_hs = sample(f"count_hs_{branch['id']}", Poisson(state["λ"] * Δ))
     f = zeros(state._num_particles)
     for n in range(state._num_particles):
         for i in range(int(count_hs[n])):
             t = Uniform(branch["t_end"], branch["t_beg"]).sample()
             if self.survives(t, state["λ"][n], state["μ"][n], ρ):
                 f[n] = -float('inf')
                 break
             f[n] += log(tensor(2))
     factor(f"factor_hs_{branch['id']}", f)
     sample(f"num_ex_{branch['id']}", Poisson(state["μ"] * Δ), obs=tensor(0))
     if branch["has_children"]:
         sample(f"spec_{branch['id']}", Exponential(state["λ"]), obs=tensor(1e-40))
     else:
         sample(f"obs_{branch['id']}", Bernoulli(ρ), obs=tensor(1.))
Exemple #28
0
def softmax_agent_model(t, env, *, trajectory_model):
    """softmax_agent_model

    Softmax agent model;  Performs inference to estimate $Q^\pi(s, a)$, then
    uses pyro.factor to modify the trace log-likelihood.

    :param t: time-step
    :param env: OpenAI Gym environment
    :param trajectory_model: trajectory probabilistic program
    """
    action_probs = torch.ones(env.action_space.n)
    action = pyro.sample(f'A_{t}', Categorical(action_probs))

    inference = Importance(trajectory_model, num_samples=args.num_samples)
    posterior = inference.run(t, env, action)
    Q = EmpiricalMarginal(posterior, f'G_{t}').mean

    pyro.factor(f'softmax_{t}', args.alpha * Q)

    return action
Exemple #29
0
    def _pyro_barrier(self, msg):
        # Get log_prob and record factor.
        name, log_prob, log_joint, sampled_vars = self._get_log_prob()
        self._block = True
        pyro.factor(name, log_prob.data)
        self._block = False

        # Sample
        if sampled_vars:
            samples = log_joint.sample(sampled_vars)
            deltas = _extract_deltas(samples)
            samples = {name: point.data for name, (point, _) in deltas.terms}
        else:
            samples = {}

        # Update value.
        assert len(msg["args"]) == 1
        value = msg["args"][0]
        value = _substitute(value, samples)
        msg["value"] = value
Exemple #30
0
 def step(self, state, branch, ρ=1.0):
     Δ = branch["t_beg"] - branch["t_end"]
     if branch['parent_id'] is None and Δ < 1e-5:
         return
     count_hs = sample(f"count_hs_{branch['id']}",
                       GammaPoisson(state["λ_α"], state["λ_β"] / Δ))
     state["λ_α"] += count_hs
     state["λ_β"] += Δ
     f = vec_survives(branch["t_end"], branch["t_beg"], count_hs.numpy(),
                      state["λ_α"].numpy(), state["λ_β"].numpy(),
                      state["μ_α"].numpy(), state["μ_β"].numpy(), ρ)
     factor(f"factor_hs_{branch['id']}", f)
     sample(f"num_ex_{branch['id']}",
            GammaPoisson(state["μ_α"], state["μ_β"] / Δ),
            obs=tensor(0))
     state["μ_β"] += Δ
     if branch["has_children"]:
         factor(f"spec_{branch['id']}",
                log(state["λ_α"]) - log(state["λ_β"]))
         state["λ_α"] += 1
     else:
         sample(f"obs_{branch['id']}", Bernoulli(ρ), obs=tensor(1.))