Пример #1
0
def test_empty_model_error():
    def model():
        pass

    guide = AutoDiagonalNormal(model)
    with pytest.raises(RuntimeError):
        guide()
Пример #2
0
 def generate_auto_guide(self,
                         guide_type='delta',
                         model_method_name='model_with',
                         guide_method_name='guide_with'):
     assert guide_type in self._auto_guide_types
     exposed_variables = ['epsilon', 'alpha', 'pi']
     if self.likelihood == 'normal':
         if self.terms_isotropic:
             exposed_variables += ['ppca_sigmas']
         else:
             exposed_variables += [f'ppca_sigma_{i}' for i in range(self.d)]
         if self.group_term is not None:
             if self.group_isotropic:
                 exposed_variables += ['ppca_gm_sigma']
             else:
                 exposed_variables += [
                     f'ppca_gm_sigma_{i}' for i in range(self.d)
                 ]
     if guide_type == 'delta':
         guide = AutoDelta(
             poutine.block(getattr(self, model_method_name),
                           expose=exposed_variables))
     elif guide_type == 'diag_normal':
         guide = AutoDiagonalNormal(
             poutine.block(getattr(self, model_method_name),
                           expose=exposed_variables))
     else:
         raise ValueError
     #setattr(self, guide_method_name, classmethod(guide))
     setattr(self, guide_method_name, guide)
Пример #3
0
def get_pyro_model(return_all=False):
    regression_model = RegressionModel(p=1)
    model = model_fn(regression_model)
    guide = AutoDiagonalNormal(model)
    # guide = guide_fn(regression_model)
    optimizer = Adam({'lr': 0.05})
    svi = SVI(model, guide, optimizer, loss=Trace_ELBO(), num_samples=1000)
    if return_all:
        return svi, model, guide
    else:
        return svi
Пример #4
0
def main(args):
    # load data
    print('loading training data...')
    dataset_directory = get_data_directory(__file__)
    dataset_path = os.path.join(dataset_directory, 'faces_training.csv')
    if not os.path.exists(dataset_path):
        try:
            os.makedirs(dataset_directory)
        except OSError as e:
            if e.errno != errno.EEXIST:
                raise
            pass
        wget.download('https://d2fefpcigoriu7.cloudfront.net/datasets/faces_training.csv', dataset_path)
    data = torch.tensor(np.loadtxt(dataset_path, delimiter=',')).float()

    sparse_gamma_def = SparseGammaDEF()

    # due to the special logic in the custom guide (e.g. parameter clipping), the custom guide
    # is more numerically stable and enables us to use a larger learning rate (and consequently
    # achieves better results)
    learning_rate = 0.2 if args.auto_guide else 4.5
    momentum = 0.05 if args.auto_guide else 0.1
    opt = optim.AdagradRMSProp({"eta": learning_rate, "t": momentum})

    # either use an automatically constructed guide (see pyro.contrib.autoguide for details) or our custom guide
    guide = AutoDiagonalNormal(sparse_gamma_def.model) if args.auto_guide else sparse_gamma_def.guide

    # this is the svi object we use during training; we use TraceMeanField_ELBO to
    # get analytic KL divergences
    svi = SVI(sparse_gamma_def.model, guide, opt, loss=TraceMeanField_ELBO())

    # we use svi_eval during evaluation; since we took care to write down our model in
    # a fully vectorized way, this computation can be done efficiently with large tensor ops
    svi_eval = SVI(sparse_gamma_def.model, guide, opt,
                   loss=TraceMeanField_ELBO(num_particles=args.eval_particles, vectorize_particles=True))

    guide_description = 'automatically constructed' if args.auto_guide else 'custom'
    print('\nbeginning training with %s guide...' % guide_description)

    # the training loop
    for k in range(args.num_epochs):
        loss = svi.step(data)
        if not args.auto_guide:
            # for the custom guide we clip parameters after each gradient step
            sparse_gamma_def.clip_params()

        if k % args.eval_frequency == 0 and k > 0 or k == args.num_epochs - 1:
            loss = svi_eval.evaluate_loss(data)
            print("[epoch %04d] training elbo: %.4g" % (k, -loss))
Пример #5
0
def auto_guide_callable(model):
    def guide_x():
        x_loc = pyro.param("x_loc", torch.tensor(1.))
        x_scale = pyro.param("x_scale",
                             torch.tensor(2.),
                             constraint=constraints.positive)
        pyro.sample("x", dist.Normal(x_loc, x_scale))

    def median_x():
        return {"x": pyro.param("x_loc", torch.tensor(1.))}

    guide = AutoGuideList(model)
    guide.add(AutoCallable(model, guide_x, median_x))
    guide.add(AutoDiagonalNormal(poutine.block(model, hide=["x"])))
    return guide
Пример #6
0
    def fit(
        self,
        df,
        max_iter=6000,
        patience=200,
        optimiser_settings={"lr": 1.0e-2},
        elbo_kwargs={"num_particles": 5},
    ):
        teams = sorted(list(set(df["home_team"]) | set(df["away_team"])))
        home_team = df["home_team"].values
        away_team = df["away_team"].values
        home_goals = torch.tensor(df["home_goals"].values, dtype=torch.float32)
        away_goals = torch.tensor(df["away_goals"].values, dtype=torch.float32)
        gameweek = ((df["date"] - df["date"].min()).dt.days // 7).values

        self.team_to_index = {team: i for i, team in enumerate(teams)}
        self.index_to_team = {
            value: key
            for key, value in self.team_to_index.items()
        }
        self.n_teams = len(teams)
        self.min_date = df["date"].min()

        conditioned_model = condition(self.model,
                                      data={
                                          "home_goals": home_goals,
                                          "away_goals": away_goals
                                      })
        guide = AutoDiagonalNormal(conditioned_model)

        optimizer = Adam(optimiser_settings)
        elbo = Trace_ELBO(**elbo_kwargs)
        svi = SVI(conditioned_model, guide, optimizer, loss=elbo)

        pyro.clear_param_store()
        fitted_svi, losses = early_stopping(svi,
                                            home_team,
                                            away_team,
                                            gameweek,
                                            max_iter=max_iter,
                                            patience=patience)

        self.guide = guide

        return losses
Пример #7
0
def auto_guide_list_x(model):
    guide = AutoGuideList(model)
    guide.add(AutoDelta(poutine.block(model, expose=["x"])))
    guide.add(AutoDiagonalNormal(poutine.block(model, hide=["x"])))
    return guide
                                     len(mix_params),
                                     device=x.device))
    beta_scale = pyro.param(
        'beta_resp_scale',
        torch.tril(
            1. * torch.eye(len(mix_params), len(mix_params), device=x.device)),
        constraint=constraints.lower_cholesky)
    pyro.sample(
        "beta_resp",
        dist.MultivariateNormal(beta_loc, scale_tril=beta_scale).to_event(1))


# In[11]:

guide = AutoGuideList(model)
guide.add(AutoDiagonalNormal(poutine.block(model, expose=['theta',
                                                          'L_omega'])))
guide.add(my_local_guide)  # automatically wrapped in an AutoCallable

# # Run variational inference

# In[12]:

# prepare data for running inference
train_x = torch.tensor(alt_attributes, dtype=torch.float)
train_x = train_x.cuda()
train_y = torch.tensor(true_choices, dtype=torch.int)
train_y = train_y.cuda()
alt_av_cuda = torch.from_numpy(alt_availability)
alt_av_cuda = alt_av_cuda.cuda()
alt_av_mat = alt_availability.copy()
alt_av_mat[np.where(alt_av_mat == 0)] = -1e9
    b_r = py.sample("b_r", dist.Normal(loc_b_r, scale_b_r))

    loc_b_ar = py.param("loc_b_ar", torch.tensor(torch.randn(1) + guess))
    scale_b_ar = py.param("scale_b_ar", torch.randn(1))

    b_ar = py.sample("b_ar", dist.Normal(loc_b_ar, scale_b_ar))

    sigma_dist = dist.Normal(0., 1.)
    sigma = pyro.sample("sigma", sigma_dist)


# an easier way is to call the autoguide library, AutoDiagonalNormal is the mean-field approximation

from pyro.contrib.autoguide import AutoDiagonalNormal
guide = AutoDiagonalNormal(model)

# now we learn the parameters through SVI
num_iterations = 1000
optim = pyro.optim.Adam({"lr": 0.03})
svi = py.infer.SVI(model,
                   guide,
                   optim,
                   loss=py.infer.Trace_ELBO(),
                   num_samples=1000)

data = torch.tensor(df.values, dtype=torch.float)
x_data, y_data = data[:, :-1], data[:, -1]


def train():
Пример #10
0
        if y is not None:
            logging.debug(f"y_data: {y.shape}")

        logging.debug(f"batch shape: {d_dist.batch_shape}")
        logging.debug(f"event shape: {d_dist.event_shape}")
        logging.debug(f"event dim: {d_dist.event_dim}")

        pyro.sample("obs", d_dist, obs=y)

        return prediction_mean


softplus = torch.nn.Softplus()

from pyro.contrib.autoguide import AutoDiagonalNormal
guide = AutoDiagonalNormal(pyromodel)
"""
from pyro.infer.autoguide import AutoMultivariateNormal
from pyro.infer.autoguide import init_to_mean
guide = AutoMultivariateNormal(pyromodel, init_loc_fn=init_to_mean)
"""


def save():
    save_model = input("save model > ")

    if save_model.lower().startswith('y'):
        experiment_id = input("Enter exp name, press return to use datetime> ")
        if not experiment_id:
            experiment_id = datetime.now().isoformat()
Пример #11
0
nuts_posterior_samples = nuts.get_samples()

# In[61]:

plot_uq(nuts_posterior_samples, X, Xnew, "NUTS")

# ## ADVI

# In[81]:

# Automatically define variational distribution (a mean field guide).
pyro.clear_param_store()  # clear global parameter cache
pyro.set_rng_seed(1)  # set random seed

# Create guides.
guide = AutoDiagonalNormal(gpc)

# Create SVI object for optimization.
svi = SVI(gpc, guide, Adam({'lr': 1e-2}), TraceEnum_ELBO())

# Do gradient steps.
advi_loss = []
for step in trange(1000):
    advi_loss.append(svi.step(X, y.double()))

# Plot negative ELBO trace.
plt.plot(advi_loss)
plt.title("ADVI Negative ELBO")


# Bijector for advi samples.
def pyro_bayesian(regression_model, y_data):
    def summary(traces, sites):
        marginal = get_marginal(traces, sites)
        site_stats = {}
        for i in range(marginal.shape[1]):
            site_name = sites[i]
            marginal_site = pd.DataFrame(marginal[:, i]).transpose()
            describe = partial(pd.Series.describe,
                               percentiles=[.05, 0.25, 0.5, 0.75, 0.95])
            site_stats[site_name] = marginal_site.apply(describe, axis=1) \
                [["mean", "std", "5%", "25%", "50%", "75%", "95%"]]
        return site_stats

    # CI testing
    assert pyro.__version__.startswith('0.3.0')
    pyro.enable_validation(True)
    pyro.set_rng_seed(1)
    pyro.enable_validation(True)

    from pyro.contrib.autoguide import AutoDiagonalNormal
    guide = AutoDiagonalNormal(model)

    optim = Adam({"lr": 0.03})
    svi = SVI(model, guide, optim, loss=Trace_ELBO(), num_samples=1000)

    train(svi, x_data, y_data, num_iterations, regression_model)

    for name, value in pyro.get_param_store().items():
        print(name, pyro.param(name))

    get_marginal = lambda traces, sites: EmpiricalMarginal(
        traces, sites)._get_samples_and_weights()[0].detach().cpu().numpy()

    posterior = svi.run(x_data, y_data, regression_model)

    # posterior predictive distribution we can get samples from
    trace_pred = TracePredictive(wrapped_model, posterior, num_samples=1000)
    post_pred = trace_pred.run(x_data, None, regression_model)
    post_summary = summary(post_pred, sites=['prediction', 'obs'])
    mu = post_summary["prediction"]
    y = post_summary["obs"]
    predictions = pd.DataFrame({
        "x0": x_data[:, 0],
        "x1": x_data[:, 1],
        "mu_mean": mu["mean"],
        "mu_perc_5": mu["5%"],
        "mu_perc_95": mu["95%"],
        "y_mean": y["mean"],
        "y_perc_5": y["5%"],
        "y_perc_95": y["95%"],
        "true_gdp": y_data,
    })
    # print("predictions=", predictions)
    """we need to prepend `module$$$` to all parameters of nn.Modules since
    # that is how they are stored in the ParamStore
    """
    weight = get_marginal(posterior,
                          ['module$$$linear.weight']).squeeze(1).squeeze(1)
    factor = get_marginal(posterior, ['module$$$factor'])

    # x0, x1, x2"-home_page, x1*x2-factor
    print("weight shape=", weight.shape)
    print("factor shape=", factor.shape)

    fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(12, 6), sharey=True)
    ax[0].hist(weight[:, 0])
    ax[1].hist(weight[:, 1])
    ax[2].hist(factor.squeeze(1))
    plt.show()
Пример #13
0
hmc_posterior_samples = hmc.get_samples()  # get posterior samples.

# NUTS
pyro.clear_param_store()
pyro.set_rng_seed(2)
nuts = MCMC(NUTS(gpc,
                 target_accept_prob=0.8,
                 max_tree_depth=10,
                 jit_compile=True),
            num_samples=500,
            warmup_steps=500)
nuts.run(X, y.double())
nuts_posterior_samples = nuts.get_samples()

# ADVI
pyro.clear_param_store()  # clear global parameter cache
pyro.set_rng_seed(1)  # set random seed

# Automatically define variational distribution (a mean field guide).
guide = AutoDiagonalNormal(gpc)

# Create SVI object for optimization.
svi = SVI(gpc, guide, Adam({'lr': 1e-2}), TraceEnum_ELBO())

# Do 1000 gradient steps.
advi_loss = []
for step in trange(1000):
    advi_loss.append(svi.step(X, y.double()))

# NOTE: See notebook to see full example.
Пример #14
0
 def _guide(self, model):
     if self.mode == 'diagonal':
         return AutoDiagonalNormal(model)
     elif self.mode == 'multivariate':
         return AutoMultivariateNormal(model)
Пример #15
0
### Model definition ###


def log_reg(x_data=None, y_data=None):
    w = pyro.sample("w", Normal(torch.zeros(d), torch.ones(d)))
    w0 = pyro.sample("w0", Normal(0., 1.))

    with pyro.plate("map", N):
        x = pyro.sample("x", Normal(torch.zeros(d), 2).to_event(1), obs=x_data)
        logits = (w0 + x @ torch.FloatTensor(w)).squeeze(-1)
        y = pyro.sample("pred", Binomial(logits=logits), obs=y_data)

    return x, y


qmodel = AutoDiagonalNormal(log_reg)

### 37

#### Sample from prior model

sampler = pyro.condition(log_reg, data={"w0": 0, "w": [2, 1]})
x_train, y_train = sampler()

#### Inference

optim = Adam({"lr": 0.1})
svi = SVI(log_reg, qmodel, optim, loss=Trace_ELBO(), num_samples=10)

num_iterations = 10000
pyro.clear_param_store()