示例#1
0
def test_shapes(parallel):
    num_samples = 10

    def model():
        x = pyro.sample("x", dist.Normal(0, 1).expand([2]).to_event(1))
        with pyro.plate("plate", 5):
            loc, log_scale = x.unbind(-1)
            y = pyro.sample("y", dist.Normal(loc, log_scale.exp()))
        return dict(x=x, y=y)

    guide = AutoDiagonalNormal(model)

    # Compute by hand.
    vectorize = pyro.plate("_vectorize", num_samples, dim=-2)
    trace = poutine.trace(vectorize(guide)).get_trace()
    expected = poutine.replay(vectorize(model), trace)()

    # Use Predictive.
    predictive = Predictive(
        model,
        guide=guide,
        return_sites=["x", "y"],
        num_samples=num_samples,
        parallel=parallel,
    )
    actual = predictive.get_samples()
    assert set(actual) == set(expected)
    assert actual["x"].shape == expected["x"].shape
    assert actual["y"].shape == expected["y"].shape
示例#2
0
    def _predict(self, home_team, away_team, dates, num_samples=100, seed=42):

        predictive = Predictive(
            self.model,
            guide=self.guide,
            num_samples=num_samples,
            return_sites=("home_goals", "away_goals"),
        )

        home_team = [home_team] if isinstance(home_team, str) else home_team
        away_team = [away_team] if isinstance(away_team, str) else away_team

        missing_teams = set(list(home_team) + list(away_team)) - set(
            self.team_to_index.keys())

        for team in missing_teams:
            new_index = max(self.team_to_index.values()) + 1
            self.team_to_index[team] = new_index
            self.index_to_team[new_index] = team
            self.n_teams += 1

        gameweek = (dates - self.min_date).dt.days // 7

        predictions = predictive.get_samples(home_team, away_team, gameweek)

        return (
            predictions["home_goals"].detach().numpy(),
            predictions["away_goals"].detach().numpy(),
        )
示例#3
0
def test_posterior_predictive_svi_one_hot():
    pseudocounts = torch.ones(3) * 0.1
    true_probs = torch.tensor([0.15, 0.6, 0.25])
    classes = dist.OneHotCategorical(true_probs).sample((10000,))
    guide = AutoDelta(one_hot_model)
    svi = SVI(one_hot_model, guide, optim.Adam(dict(lr=0.1)), Trace_ELBO())
    for i in range(1000):
        svi.step(pseudocounts, classes=classes)
    posterior_samples = Predictive(guide, num_samples=10000).get_samples(pseudocounts)
    posterior_predictive = Predictive(one_hot_model, posterior_samples)
    marginal_return_vals = posterior_predictive.get_samples(pseudocounts)["obs"]
    assert_close(marginal_return_vals.mean(dim=0), true_probs.unsqueeze(0), rtol=0.1)
示例#4
0
def test_posterior_predictive_svi_auto_delta_guide(parallel):
    true_probs = torch.ones(5) * 0.7
    num_trials = torch.ones(5) * 1000
    num_success = dist.Binomial(num_trials, true_probs).sample()
    conditioned_model = poutine.condition(model, data={"obs": num_success})
    guide = AutoDelta(conditioned_model)
    svi = SVI(conditioned_model, guide, optim.Adam(dict(lr=1.0)), Trace_ELBO())
    for i in range(1000):
        svi.step(num_trials)
    posterior_predictive = Predictive(model, guide=guide, num_samples=10000, parallel=parallel)
    marginal_return_vals = posterior_predictive.get_samples(num_trials)["obs"]
    assert_close(marginal_return_vals.mean(dim=0), torch.ones(5) * 700, rtol=0.05)
示例#5
0
def test_posterior_predictive_svi_auto_diag_normal_guide(return_trace):
    true_probs = torch.ones(5) * 0.7
    num_trials = torch.ones(5) * 1000
    num_success = dist.Binomial(num_trials, true_probs).sample()
    conditioned_model = poutine.condition(model, data={"obs": num_success})
    guide = AutoDiagonalNormal(conditioned_model)
    svi = SVI(conditioned_model, guide, optim.Adam(dict(lr=0.1)), Trace_ELBO())
    for i in range(1000):
        svi.step(num_trials)
    posterior_predictive = Predictive(model, guide=guide, num_samples=10000, parallel=True)
    if return_trace:
        marginal_return_vals = posterior_predictive.get_vectorized_trace(num_trials).nodes["obs"]["value"]
    else:
        marginal_return_vals = posterior_predictive.get_samples(num_trials)["obs"]
    assert_close(marginal_return_vals.mean(dim=0), torch.ones(5) * 700, rtol=0.05)
示例#6
0
# 次にこのサンプルを用いた予測分布の計算を行います。
# こちらも関数一つで予測分布の計算が行えるので簡単です。

# In[15]:

pred = Predictive(model, {
    'a': posterior_a,
    'b': posterior_b
},
                  return_sites=["y"])

# In[16]:

x_ = np.linspace(-2, 2, 100)
y_ = pred.get_samples(torch.tensor(x_), None)['y']

# In[17]:

y_mean = y_.mean(0)
y_std = y_.std(0)
plt.figure(figsize=(10, 5))
plt.plot(x_, y_mean)
plt.fill_between(x_, y_mean - y_std * 2, y_mean + y_std * 2, alpha=0.3)
plt.grid()
plt.scatter(x, y)
plt.show()

# ## 変分推論

# In[18]: