def model(home_id, away_id, score1_obs=None, score2_obs=None):
    # priors
    alpha = pyro.sample("alpha", dist.Normal(0.0, 1.0))
    sd_att = pyro.sample(
        "sd_att",
        dist.TransformedDistribution(
            dist.StudentT(3.0, 0.0, 2.5),
            FoldedTransform(),
        ),
    )
    sd_def = pyro.sample(
        "sd_def",
        dist.TransformedDistribution(
            dist.StudentT(3.0, 0.0, 2.5),
            FoldedTransform(),
        ),
    )

    home = pyro.sample("home", dist.Normal(0.0, 1.0))  # home advantage

    nt = len(np.unique(home_id))

    # team-specific model parameters
    with pyro.plate("plate_teams", nt):
        attack = pyro.sample("attack", dist.Normal(0, sd_att))
        defend = pyro.sample("defend", dist.Normal(0, sd_def))

    # likelihood
    theta1 = torch.exp(alpha + home + attack[home_id] - defend[away_id])
    theta2 = torch.exp(alpha + attack[away_id] - defend[home_id])

    with pyro.plate("data", len(home_id)):
        pyro.sample("s1", dist.Poisson(theta1), obs=score1_obs)
        pyro.sample("s2", dist.Poisson(theta2), obs=score2_obs)
Beispiel #2
0
 def gen_publications(self):
     multiplier = pyro.sample(self.namegen("pub_multiplier"),
                              pyd.Poisson(15)).item()
     return pyro.sample(
         self.namegen("number_publication"),
         pyd.Poisson((multiplier + 1) * self.iq / 100.0),
     ).item()
Beispiel #3
0
    def model(self, home_team, away_team):

        sigma_a = pyro.sample("sigma_a", dist.HalfNormal(1.0))
        sigma_b = pyro.sample("sigma_b", dist.HalfNormal(1.0))
        mu_b = pyro.sample("mu_b", dist.Normal(0.0, 1.0))
        rho_raw = pyro.sample("rho_raw", dist.Beta(2, 2))
        rho = pyro.deterministic("rho", 2.0 * rho_raw - 1.0)

        log_gamma = pyro.sample("log_gamma", dist.Normal(0, 1))

        with pyro.plate("teams", self.n_teams):
            abilities = pyro.sample(
                "abilities",
                dist.MultivariateNormal(
                    torch.tensor([0.0, mu_b]),
                    covariance_matrix=torch.tensor(
                        [
                            [sigma_a ** 2.0, rho * sigma_a * sigma_b],
                            [rho * sigma_a * sigma_b, sigma_b ** 2.0],
                        ]
                    ),
                ),
            )

        log_a = abilities[:, 0]
        log_b = abilities[:, 1]
        home_inds = torch.tensor([self.team_to_index[team] for team in home_team])
        away_inds = torch.tensor([self.team_to_index[team] for team in away_team])
        home_rate = torch.exp(log_a[home_inds] + log_b[away_inds] + log_gamma)
        away_rate = torch.exp(log_a[away_inds] + log_b[home_inds])

        pyro.sample("home_goals", dist.Poisson(home_rate))
        pyro.sample("away_goals", dist.Poisson(away_rate))
Beispiel #4
0
 def setUp(self):
     self.lam = Variable(torch.Tensor([2, 4.5, 3., 5.1]))
     self.dim = 4
     self.batch_lam = Variable(
         torch.Tensor([[2, 4.5, 3., 5.1], [6, 3.2, 1, 4]]))
     self.test_data = Variable(torch.Tensor([0, 1, 2, 4]))
     self.batch_test_data = Variable(
         torch.Tensor([[0, 1, 2, 4], [4, 1, 2, 4]]))
     self.g = dist.Poisson(self.lam)
     self.batch_g = dist.Poisson(self.batch_lam)
Beispiel #5
0
def test_gamma_poisson(hyperpriors):
    def model(data):
        with pyro.plate("latent_dim", data.shape[1]):
            alpha = (
                pyro.sample("alpha", dist.HalfCauchy(1.0))
                if hyperpriors
                else torch.tensor([1.0, 1.0])
            )
            beta = (
                pyro.sample("beta", dist.HalfCauchy(1.0))
                if hyperpriors
                else torch.tensor([1.0, 1.0])
            )
            gamma_poisson = GammaPoissonPair()
            rate = pyro.sample("rate", gamma_poisson.latent(alpha, beta))
            with pyro.plate("data", data.shape[0]):
                pyro.sample("obs", gamma_poisson.conditional(rate), obs=data)

    true_rate = torch.tensor([3.0, 10.0])
    num_samples = 100
    data = dist.Poisson(rate=true_rate).sample(sample_shape=(torch.Size((100,))))
    hmc_kernel = NUTS(
        collapse_conjugate(model), jit_compile=True, ignore_jit_warnings=True
    )
    mcmc = MCMC(hmc_kernel, num_samples=num_samples, warmup_steps=50)
    mcmc.run(data)
    samples = mcmc.get_samples()
    posterior = posterior_replay(model, samples, data, num_samples=num_samples)
    assert_equal(posterior["rate"].mean(0), true_rate, prec=0.3)
    def model(self, data, demand):
        coef = {}

        for s in self.features['station']['names']:
            coef[s] = pyro.sample(s, dist.Normal(0, 1))

        for h in self.features['hour']['names']:
            for d in self.features['daytype']['names']:
                name = h + '_' + d
                coef[name] = pyro.sample(name, dist.Normal(0, 1))

        log_lmbda = 0
        for i in range(len(self.features['station']['names'])):
            name = self.features['station']['names'][i]
            index = self.features['station']['index'][i]
            log_lmbda += coef[name] * data[:, index]

        for h in range(len(self.features['hour']['names'])):
            for d in range(len(self.features['daytype']['names'])):
                h_name = self.features['hour']['names'][h]
                h_index = self.features['hour']['index'][h]
                d_name = self.features['daytype']['names'][d]
                d_index = self.features['daytype']['index'][d]
                log_lmbda += coef[h_name + '_' + d_name] * \
                    data[:, h_index] * data[:, d_index]

        lmbda = log_lmbda.exp()

        with pyro.plate("data", len(data)):
            pyro.sample("obs", dist.Poisson(lmbda), obs=demand)

            return lmbda
Beispiel #7
0
    def model(self, *args, **kwargs):
        I, N = self._data['data'].shape
        batch = N if self._params['batch_size'] else self._params['batch_size']
        weights = pyro.sample(
            'mixture_weights',
            dist.Dirichlet(
                (1 / self._params['K']) * torch.ones(self._params['K'])))
        with pyro.plate('segments', I):
            with pyro.plate('components', self._params['K']):
                cnv_probs = pyro.sample(
                    "cnv_probs",
                    dist.Dirichlet(self._params['probs'] * 1 /
                                   torch.ones(self._params['hidden_dim'])))

        with pyro.plate("data2", N, batch):
            theta = pyro.sample(
                'norm_factor',
                dist.Gamma(self._params['theta_scale'],
                           self._params['theta_rate']))

        with pyro.plate('data', N, batch):
            assignment = pyro.sample('assignment',
                                     dist.Categorical(weights),
                                     infer={"enumerate": "parallel"})
            for i in pyro.plate('segments2', I):
                cc = pyro.sample('copy_number_{}'.format(i),
                                 dist.Categorical(
                                     Vindex(cnv_probs)[assignment, i, :]),
                                 infer={"enumerate": "parallel"})
                pyro.sample('obs_{}'.format(i),
                            dist.Poisson((cc * theta * self._data['mu'][i]) +
                                         1e-8),
                            obs=self._data['data'][i, :])
Beispiel #8
0
def test_reparam_stable():
    data = dist.Poisson(torch.randn(8).exp()).sample()

    @poutine.reparam(config={
        "dz": LatentStableReparam(),
        "y": LatentStableReparam()
    })
    def model():
        stability = pyro.sample("stability", dist.Uniform(1.0, 2.0))
        trans_skew = pyro.sample("trans_skew", dist.Uniform(-1.0, 1.0))
        obs_skew = pyro.sample("obs_skew", dist.Uniform(-1.0, 1.0))
        scale = pyro.sample("scale", dist.Gamma(3, 1))

        # We use separate plates because the .cumsum() op breaks independence.
        with pyro.plate("time1", len(data)):
            dz = pyro.sample("dz", dist.Stable(stability, trans_skew))
        z = dz.cumsum(-1)
        with pyro.plate("time2", len(data)):
            y = pyro.sample("y", dist.Stable(stability, obs_skew, scale, z))
            pyro.sample("x", dist.Poisson(y.abs()), obs=data)

    guide = AutoDelta(model)
    svi = SVI(model, guide, optim.Adam({"lr": 0.01}), Trace_ELBO())
    for step in range(100):
        loss = svi.step()
        if step % 20 == 0:
            logger.info("step {} loss = {:0.4g}".format(step, loss))
Beispiel #9
0
def model(data, params):
    # initialize data
    N = data["N"]
    y = data["y"]
    roach1 = data["roach1"]
    treatment = data["treatment"]
    senior = data["senior"]
    log_expo = data["log_expo"]

    # init parameters
    beta = params["beta"]

    # model block
    # Tau is the global precision
    # tau = pyro.sample('tau', dist.Gamma(concentration=0.001, rate=0.001))
    # sigma = tau.sqrt().reciprocal()

    # NOTE: Had to made change from Stan model here!
    sigma = pyro.sample('sigma', dist.HalfCauchy(2.5))

    with pyro.plate("data", N):
        # NOTE: the zeros_like is here since tau might be in plate
        lambdah = pyro.sample(
            'lambda', dist.Normal(loc=torch.zeros_like(sigma), scale=sigma))
        log_rate = lambdah + log_expo + beta[..., 0] + beta[
            ..., 1] * roach1 + beta[..., 2] * senior + beta[..., 3] * treatment

        # NOTE: Do we have to worry about under-/overflow here due to exp?
        pyro.sample('y', dist.Poisson(rate=log_rate.exp() + 1e-8), obs=y)
Beispiel #10
0
def log_linear_mm_model(num_sites,
                        num_days,
                        num_predictors,
                        num_clusters,
                        predictors,
                        data=None):
    weight = pyro.sample("weights", dist.Dirichlet(torch.ones(num_clusters)))
    with plate("beta_components", num_predictors):
        with plate("beta_clusters", num_clusters):
            betas = pyro.sample(
                "betas",
                dist.Normal(0 * torch.ones(num_clusters, num_predictors), 2),
            )

    log_thetas = predictors @ betas.T
    with plate("epsilons", num_sites):
        epsilon = pyro.sample("epsilon", dist.Normal(0, 5))

    epsilon = (epsilon.unsqueeze(-1) *
               torch.ones(num_sites, num_days)).unsqueeze(-1)
    thetas = torch.exp(log_thetas + epsilon)
    with plate("sites", num_sites):
        assignments = pyro.sample("assignments", dist.Categorical(weight))
        # selection = (assignments * torch.ones(num_days,1)).T.long()
        a = (torch.arange(num_sites).expand(num_days,
                                            num_sites).permute(1, 0).long())
        b = torch.arange(num_days).expand(num_sites, num_days).long()
        select = Vindex(thetas)[a, b, assignments.unsqueeze(-1)]
        accidents = pyro.sample("accidents",
                                dist.Poisson(select).to_event(1),
                                obs=data)

    return accidents
Beispiel #11
0
    def matrix_factorization_poisson(anime_matrix_train, k=k):

        m = anime_matrix_train.shape[0]
        n = anime_matrix_train.shape[1]

        u_mean = Variable(torch.zeros([m, k]))
        u_sigma = Variable(torch.ones([m, k]) * sigma_u)

        v_mean = Variable(torch.zeros([n, k]))
        v_sigma = Variable(torch.ones([n, k]) * sigma_v)

        u = pyro.sample("u", dist.Normal(
            loc=u_mean, scale=u_sigma).to_event(2))
        v = pyro.sample("v", dist.Normal(
            loc=v_mean, scale=v_sigma).to_event(2))

        expectation = torch.mm(u, v.t())
        # softly make all values positive for poisson
        expectation[expectation <= 0] = 0.01
        is_observed = (~np.isnan(anime_matrix_train))
        is_observed = torch.tensor(is_observed)
        valid_matrix = torch.tensor(anime_matrix_train).clone()
        valid_matrix[~is_observed] = 0  # ensure all values are valid
        # round all observed values to positive integers
        valid_matrix = np.around(valid_matrix)

        with pyro.plate("user", m, dim=-2):
            with pyro.plate("anime", n, dim=-3):
                with pyro.poutine.mask(mask=is_observed):
                    pyro.sample("obs", dist.Poisson(expectation),
                                obs=valid_matrix)
Beispiel #12
0
    def model(self, *args, **kwargs):
        I, N = self._data['data'].shape
        batch = N if self._params['batch_size'] else self._params['batch_size']
        weights = pyro.sample('mixture_weights',
                              dist.Dirichlet(torch.ones(self._params['K'])))

        with pyro.plate('components', self._params['K']):
            probs_z = pyro.sample(
                "cnv_probs",
                dist.Dirichlet(self._params['t'] *
                               torch.eye(self._params['hidden_dim']) +
                               (1 - self._params['t'])).to_event(1))

        with pyro.plate("data2", N, batch):
            theta = pyro.sample(
                'norm_factor',
                dist.Gamma(self._params['theta_scale'],
                           self._params['theta_rate']))

        with pyro.plate('data', N, batch):
            z = 0
            assignment = pyro.sample('assignment',
                                     dist.Categorical(weights),
                                     infer={"enumerate": "parallel"})
            for i in pyro.markov(range(I)):
                z = pyro.sample("z_{}".format(i),
                                dist.Categorical(
                                    Vindex(probs_z)[assignment, z]),
                                infer={"enumerate": "parallel"})
                pyro.sample('obs_{}'.format(i),
                            dist.Poisson((z * theta * self._data['mu'][i]) +
                                         1e-8),
                            obs=self._data['data'][i, :])
Beispiel #13
0
    def model(self, *args, **kwargs):
        I, N = self.params['data'].shape
        weights = pyro.sample('mixture_weights',
                              dist.Dirichlet(self.params['mixture']))
        with pyro.plate('segments', I):
            mu = pyro.sample(
                'gene_basal',
                dist.Gamma(self.params['theta_scale'],
                           self.params['theta_rate']))
            with pyro.plate('components', self.params['K']):
                cc = pyro.sample(
                    'cnv_probs',
                    dist.LogNormal(np.log(self.params['cnv_mean']),
                                   self.params['cnv_var']))

        with pyro.plate('data', N, self.params['batch_size']):
            assignment = pyro.sample('assignment',
                                     dist.Categorical(weights),
                                     infer={"enumerate": "parallel"})
            theta = pyro.sample(
                'norm_factor',
                dist.Gamma(self.params['theta_scale'],
                           self.params['theta_rate']))
            for i in pyro.plate('segments2', I):
                pyro.sample(
                    'obs_{}'.format(i),
                    dist.Poisson((Vindex(cc)[assignment, i] * theta * mu[i]) +
                                 1e-8),
                    obs=self.params['data'][i, :])
Beispiel #14
0
    def _true_counts_from_params(self, data: torch.Tensor,
                                 mu_est: torch.Tensor,
                                 lambda_est: torch.Tensor,
                                 alpha_est: torch.Tensor) -> torch.Tensor:
        """Calculate a single sample estimate of mu, the mean of the true count
        matrix, and lambda, the rate parameter of the Poisson background counts.

        Args:
            data: Dense tensor minibatch of cell by gene count data.
            mu_est: Dense tensor of Negative Binomial means for true counts.
            lambda_est: Dense tensor of Poisson rate params for noise counts.
            alpha_est: Dense tensor of Dirichlet concentration params that
                inform the overdispersion of the Negative Binomial.

        Returns:
            dense_counts_torch: Dense matrix of true de-noised counts.

        """

        # Estimate a reasonable low-end to begin the Poisson summation.
        n = min(100., data.max().item())  # No need to exceed the max value
        poisson_values_low = (lambda_est.detach() - n / 2).int()

        poisson_values_low = torch.clamp(torch.min(poisson_values_low,
                                                   (data - n + 1).int()),
                                         min=0).float()

        # Construct a big tensor of possible noise counts per cell per gene,
        # shape (batch_cells, n_genes, max_noise_counts)
        noise_count_tensor = torch.arange(start=0, end=n) \
            .expand([data.shape[0], data.shape[1], -1]) \
            .float().to(device=data.device)
        noise_count_tensor = noise_count_tensor + poisson_values_low.unsqueeze(
            -1)

        # Compute probabilities of each number of noise counts.
        # NOTE: some values will be outside the support (negative values for NB).
        # The resulting NaNs are ignored by torch.argmax().
        logits = (mu_est.log() - alpha_est.log()).unsqueeze(-1)
        log_prob_tensor = (
            dist.Poisson(lambda_est.unsqueeze(-1)).log_prob(noise_count_tensor)
            + dist.NegativeBinomial(
                total_count=alpha_est.unsqueeze(-1), logits=logits).log_prob(
                    data.unsqueeze(-1) - noise_count_tensor))
        log_prob_tensor = torch.where(
            noise_count_tensor <= data.unsqueeze(-1), log_prob_tensor,
            torch.ones_like(log_prob_tensor) * -np.inf)

        # Find the most probable number of noise counts per cell per gene.
        noise_count_map = torch.argmax(log_prob_tensor, dim=-1,
                                       keepdim=False).float()

        # Handle the cases where y = 0 (no cell): all counts are noise.
        noise_count_map = torch.where(mu_est == 0, data, noise_count_map)

        # Compute the number of true counts.
        dense_counts_torch = torch.clamp(data - noise_count_map, min=0.)

        return dense_counts_torch
Beispiel #15
0
 def model():
     alpha_p_log = pyro.param("alpha_p_log", self.alpha_p_log_0.clone())
     beta_p_log = pyro.param("beta_p_log", self.beta_p_log_0.clone())
     alpha_p, beta_p = torch.exp(alpha_p_log), torch.exp(beta_p_log)
     lambda_latent = pyro.sample("lambda_latent",
                                 dist.Gamma(alpha_p, beta_p))
     pyro.sample("obs", dist.Poisson(lambda_latent), obs=self.data)
     return lambda_latent
Beispiel #16
0
def model(data):

    idk = torch.tensor(4.0)  # where is my neural network?
    idkb = torch.tensor(4.0)
    genelambdas = dist.Gamma(idka, idkb, batch_size=19795)
    for celltype in range(data.size(0)):  # this one's 56 right?
        with iarange('observe_{}'.format(celltype)):
            pyro.sample('indiv', dist.Poisson(genelambdas), obs=data[celltype])
    def forward(self, f_loc, f_var, y=None):
        f_res = self.response_function(
            f_loc + torch.randn(f_loc.dim(), device=f_loc.device) * f_var)

        y_dist = dist.Poisson(f_res)
        self.y_dist = y_dist
        if y is not None:
            y_dist = y_dist.expand_by(y.shape[:-f_res.dim()]).to_event(y.dim())
        return pyro.sample("y", y_dist, obs=y)
Beispiel #18
0
def base_model(num_sites, num_days, data=None):
    with plate("sites", size=num_sites, dim=-2):
        epsilon = pyro.sample("epsilon", dist.Normal(-5, 15))
        with plate("days", size=num_days, dim=-1):
            accidents = pyro.sample("accidents",
                                    dist.Poisson(torch.exp(epsilon)),
                                    obs=data)

    return accidents
Beispiel #19
0
def post_model(
    p_data,
    t_data,
    s_data,
    r_data,
    y,
    p_types,
    p_stories,
    p_subreddits,
    zero_inflated,
):
    coef_scale_prior = 0.1

    num_posts, num_p_indeps = p_data.shape

    # shared prior
    gamma_loc = torch.zeros((num_p_indeps, 1), dtype=torch.float64)

    if zero_inflated:
        gamma_gate_loc = torch.zeros((num_p_indeps, 1), dtype=torch.float64)

    with pyro.plate("p_indep", num_p_indeps, dim=-2):
        gamma = pyro.sample("gamma", dist.Normal(gamma_loc, coef_scale_prior))

        if zero_inflated:
            gamma_gate = pyro.sample(
                "gamma_gate", dist.Normal(gamma_gate_loc, coef_scale_prior)
            )

    # for each post,
    # use the correct set of coefficients to run our post-level regression
    with pyro.plate("post", num_posts, dim=-1) as p:

        # indep vars for this post
        indeps = p_data[p, :]

        mu = torch.matmul(
            indeps, gamma
        ).flatten()  # ( num_posts, num_p_indeps) x (num_p_indeps, 1)

        # defining response dist
        if zero_inflated:
            gate = torch.nn.Sigmoid()(
                torch.matmul(indeps, gamma_gate).flatten()
            )  # ( num_posts, num_p_indeps) x (num_p_indeps, 1)

            response_dist = dist.ZeroInflatedPoisson(
                rate=torch.exp(mu), gate=gate
            )
        else:
            response_dist = dist.Poisson(rate=torch.exp(mu))

        # sample
        if y is None:
            pyro.sample("obs", response_dist, obs=y)
        else:
            pyro.sample("obs", response_dist, obs=y[p])
Beispiel #20
0
def model(data):
    alpha = 1 / torch.mean(data)
    lambda1 = pyro.sample("lambda1", dist.Exponential(alpha))
    lambda2 = pyro.sample("lambda2", dist.Exponential(alpha))
    tau = pyro.sample("tau", dist.Uniform(0, 1))
    idx = torch.arange(len(data)).float()
    lambda_ = torch.where(idx.lt(tau * len(data)), lambda1, lambda2)

    with pyro.plate("data", len(data)):
        pyro.sample("obs", dist.Poisson(lambda_), obs=data)
Beispiel #21
0
    def model(self, data, ratings):

        with pyro.plate("betas", data.shape[1]):
            betas = pyro.sample("beta", dist.Normal(0, 1))

        lambda_ = torch.exp(torch.sum(betas * data, axis=1))
        with pyro.plate("ratings", data.shape[0]):
            y = pyro.sample("obs", dist.Poisson(lambda_), obs=ratings)

        return y
Beispiel #22
0
def model(data, params):
    # initialize data
    N = data["N"]
    x = data["x"]
    t = data["t"]

    alpha = pyro.sample("alpha", dist.Exponential(1.0))
    beta = pyro.sample("beta", dist.Gamma(0.1, 1.0))
    with pyro.plate('data', N):
        theta = pyro.sample("theta", dist.Gamma(alpha, beta))
        x = pyro.sample("x", dist.Poisson(theta * t), obs=x)
Beispiel #23
0
def test_gamma_poisson_log_prob(shape):
    gamma_conc = torch.randn(shape).exp()
    gamma_rate = torch.randn(shape).exp()
    value = torch.arange(20.)

    num_samples = 300000
    poisson_rate = dist.Gamma(gamma_conc, gamma_rate).sample((num_samples, ))
    log_probs = dist.Poisson(poisson_rate).log_prob(value)
    expected = log_probs.logsumexp(0) - math.log(num_samples)
    actual = GammaPoisson(gamma_conc, gamma_rate).log_prob(value)
    assert_close(actual, expected, rtol=0.05)
Beispiel #24
0
def test_gamma_poisson(sample_shape, batch_shape):
    concentration = torch.randn(batch_shape).exp()
    rate = torch.randn(batch_shape).exp()
    nobs = 5
    obs = dist.Poisson(10.).sample((nobs,) + sample_shape + batch_shape).sum(0)

    f = dist.Gamma(concentration, rate)
    g = dist.Gamma(1 + obs, nobs)
    fg, log_normalizer = f.conjugate_update(g)

    x = fg.sample(sample_shape)
    assert_close(f.log_prob(x) + g.log_prob(x), fg.log_prob(x) + log_normalizer)
Beispiel #25
0
    def model():
        stability = pyro.sample("stability", dist.Uniform(1.0, 2.0))
        trans_skew = pyro.sample("trans_skew", dist.Uniform(-1.0, 1.0))
        obs_skew = pyro.sample("obs_skew", dist.Uniform(-1.0, 1.0))
        scale = pyro.sample("scale", dist.Gamma(3, 1))

        # We use separate plates because the .cumsum() op breaks independence.
        with pyro.plate("time1", len(data)):
            dz = pyro.sample("dz", dist.Stable(stability, trans_skew))
        z = dz.cumsum(-1)
        with pyro.plate("time2", len(data)):
            y = pyro.sample("y", dist.Stable(stability, obs_skew, scale, z))
            pyro.sample("x", dist.Poisson(y.abs()), obs=data)
Beispiel #26
0
  def work_on_projects(self):
      def productivity(rep_j,rep_u,comp_prod):
        return (sum(rep_j)+sum(rep_u) + comp_prod)

      lab_students = [agent for agent in self.model.schedule.agents if (agent.category == 'U' and agent.affliation == self.affliation)]
      rep_u = []
      u_agents = []
      for agent in lab_students:
        if not agent.booked:
          if self.own_lab_agent.capacity_s_initial >= self.own_lab_agent.capacity_s:
            rep_u.append(agent.reputation_points)
            agent.booked = True
            self.own_lab_agent.capacity_s += 1
            u_agents.append(agent)


      j_agents = [agent for agent in self.model.schedule.agents if (agent.category == 'J' and agent.affliation == self.unique_id)]
      rep_j = [agent.reputation_points for agent in j_agents]
      self.project_productivity = productivity(rep_j,rep_u,self.compute_productivity)*self.reputation_points   # This includes all the agents and compute
      d = self.difficulty_selected
      chance = self.chance_of_project_success()
      self.is_successful = pyro.sample(self.namegen("Proj_success"),pyd.Bernoulli(chance)).item()
      if self.is_successful == 1:
        print("Chance of project by",self.unique_id,"is",round(chance,5),"with difficulty",round(d,3),"with bid of",round(self.bid_value,4),"topic was",self.topic_interested,".....it was a SUCCESS")
        vision = 4
        self.landscape.reduce_novelty([self.pos_y,self.pos_x],1)
        publication_generated = pyro.sample(self.namegen("publication_gen"),pyd.Poisson(self.project_productivity)).item() + 1
        citations_generated = publication_generated*pyro.sample(self.namegen("citation_gen"),pyd.Poisson(self.landscape.matrix[self.pos_y,self.pos_x])).item()  # Significance result to citations    
        self.publications+= publication_generated
        self.citations+= citations_generated
        for students in u_agents:
          students.publications+= publication_generated
          students.citations+= citations_generated
        for juniors in j_agents:
          juniors.publications+= publication_generated
          juniors.citations+= citations_generated
      else:
        print("Chance of project by",self.unique_id,"is",round(chance,5),"with difficulty",round(d,3),"with bid of",round(self.bid_value,4),"topic was",self.topic_interested,".....it was a FAILURE")
        vision = 2
        self.landscape.reduce_novelty([self.pos_y,self.pos_x],chance)

      max_size = self.model.elsize
      for xi in range(-vision , vision + 1):
        for yi in range(-vision,vision + 1):
          if (xi**2 + yi**2 <= (vision+0.5)**2):
            self.landscape.x_arr.append((self.pos_x+xi)%max_size)
            self.landscape.y_arr.append((self.pos_y+yi)%max_size)
            self.landscape.explored[(self.pos_y+yi)%max_size,(self.pos_x+xi)%max_size] = 1

      self.landscape.visible_x = self.landscape.x_arr    
      self.landscape.visible_y = self.landscape.y_arr
Beispiel #27
0
    def likelihood(self, inf_params):
        lk = torch.zeros(self._params['K'], self._data['data'].shape[1],
                         self._data['data'].shape[0])
        if self._params['K'] == 1:
            norm_f = torch.sum(inf_params["cnv_probs"] *
                               self._data['mu']) / torch.sum(self._data['mu'])
            for i in range(self._data['data'].shape[0]):
                lamb = (inf_params["cnv_probs"][0, i] * self._data['mu'][i] *
                        inf_params["norm_factor"]) / norm_f
                lk[0, :, i] = torch.log(
                    inf_params["mixture_weights"]) + dist.Poisson(
                        lamb).log_prob(self._data['data'][i, :])
            return lk

        for k in range(self._params['K']):
            norm_f = torch.sum(inf_params["cnv_probs"][k, :] *
                               self._data['mu']) / torch.sum(self._data['mu'])
            for i in range(self._data['data'].shape[0]):
                lamb = (inf_params["cnv_probs"][k, i] * self._data['mu'][i] *
                        inf_params["norm_factor"]) / norm_f
                lk[k, :,
                   i] = dist.Poisson(lamb).log_prob(self._data['data'][i, :])
        return lk
Beispiel #28
0
    def model(self, home_team, away_team, gameweek):
        n_gameweeks = max(gameweek) + 1
        gamma = pyro.sample("gamma", dist.LogNormal(0, 1))
        mu_b = pyro.sample("mu_b", dist.Normal(0, 1))

        with pyro.plate("teams", self.n_teams):

            log_a0 = pyro.sample("log_a0", dist.Normal(0, 1))
            log_b0 = pyro.sample("log_b0", dist.Normal(mu_b, 1))
            sigma_rw = pyro.sample("sigma_rw", dist.HalfNormal(0.1))

            with pyro.plate("random_walk", n_gameweeks - 1):
                diffs_a = pyro.sample("diff_a", dist.Normal(0, sigma_rw))
                diffs_b = pyro.sample("diff_b", dist.Normal(0, sigma_rw))

            log_a0_t = log_a0 if log_a0.dim() == 2 else log_a0[None, :]
            diffs_a = torch.cat((log_a0_t, diffs_a), axis=0)
            log_a = torch.cumsum(diffs_a, axis=0)

            log_b0_t = log_b0 if log_b0.dim() == 2 else log_b0[None, :]
            diffs_b = torch.cat((log_b0_t, diffs_b), axis=0)
            log_b = torch.cumsum(diffs_b, axis=0)

        pyro.sample("log_a", dist.Delta(log_a), obs=log_a)
        pyro.sample("log_b", dist.Delta(log_b), obs=log_b)
        home_inds = torch.tensor(
            [self.team_to_index[team] for team in home_team])
        away_inds = torch.tensor(
            [self.team_to_index[team] for team in away_team])
        home_rate = torch.clamp(
            log_a[gameweek, home_inds] - log_b[gameweek, away_inds] + gamma,
            -7, 2)
        away_rate = torch.clamp(
            log_a[gameweek, away_inds] - log_b[gameweek, home_inds], -7, 2)

        pyro.sample("home_goals", dist.Poisson(torch.exp(home_rate)))
        pyro.sample("away_goals", dist.Poisson(torch.exp(away_rate)))
Beispiel #29
0
    def model(self):

        # Sample Poissonian parameters and compute expected contribution

        mu_poiss = torch.zeros(torch.sum(~self.mask), dtype=torch.float64)

        for i_temp in torch.arange(self.n_poiss):
            norms_poiss = pyro.sample(self.labels_poiss[i_temp],
                                      self.poiss_priors[i_temp])

            if self.poiss_log_priors[i_temp]:
                norms_poiss = 10**norms_poiss.clone()
            mu_poiss += norms_poiss * self.poiss_temps[i_temp]

        # Samples non-Poissonian parameters

        thetas = []

        for i_ps in torch.arange(self.n_ps):
            theta_temp = [
                pyro.sample(
                    self.labels_ps_params[i_np_param] + "_" +
                    self.labels_ps[i_ps], self.ps_priors[i_ps][i_np_param])
                for i_np_param in torch.arange(self.n_ps_params)
            ]

            for i_p in torch.arange(len(theta_temp)):
                if self.ps_log_priors[i_ps][i_p]:
                    theta_temp[i_p] = 10**theta_temp[i_p]

            thetas.append(theta_temp)

        # Mark each pixel as conditionally independent
        with pyro.plate("data_plate",
                        len(self.data),
                        dim=-1,
                        subsample_size=self.subsample_size) as idx:

            # Use either the non-Poissonian (if there is at least one NP model) or Poissonian likelihood
            if self.n_ps != 0:
                log_likelihood = log_like_np(mu_poiss[idx], thetas,
                                             self.ps_temps[:, idx],
                                             self.data[idx], self.f_ary,
                                             self.df_rho_div_f_ary)
                pyro.factor("obs", log_likelihood)
            else:
                pyro.sample("obs",
                            dist.Poisson(mu_poiss[idx]),
                            obs=self.data[idx])
Beispiel #30
0
    def forward(self, data):
        N_pop = 300

        p1 = self.ode_params1.view((-1, ))
        p2 = self.ode_params2.view((-1, ))
        p3 = self.ode_params3.view((-1, ))
        R0 = pyro.deterministic('R0', torch.zeros_like(p1))
        ode_params = torch.stack([p1, p2, p3, 1 - p3, R0], dim=1)
        SIR_sim = self._ode_op.apply(ode_params, (self._ode_model, ))

        for i in range(len(data)):
            pyro.sample("obs_{}".format(i),
                        dist.Poisson(SIR_sim[..., i, 1] * N_pop),
                        obs=data[i])
        return SIR_sim