Beispiel #1
0
def inference(train_x, train_y, num_epochs=10000):
    svi = SVI(model,
              guide,
              optim.Adam({'lr': 0.001}),
              loss=Trace_ELBO(),
              num_samples=len(train_x))

    for i in range(num_epochs):
        elbo = svi.step(train_x, train_y)
        if i % 1000 == 0:
            print('Elbo loss : {}'.format(elbo))

    print('pyro\'s Param Store')
    for k, v in pyro.get_param_store().items():
        print(k, v)
Beispiel #2
0
    def update_noise_svi(self, observed_steady_state, initial_noise):
        def guide(noise):
            noise_terms = list(noise.keys())
            mu_constraints = constraints.interval(-3., 3.)
            sigma_constraints = constraints.interval(.0001, 3)
            mu = {
                k: pyro.param('{}_mu'.format(k),
                              tensor(0.),
                              constraint=mu_constraints)
                for k in noise_terms
            }
            sigma = {
                k: pyro.param('{}_sigma'.format(k),
                              tensor(1.),
                              constraint=sigma_constraints)
                for k in noise_terms
            }
            for noise in noise_terms:
                sample(noise, Normal(mu[noise], sigma[noise]))

        observation_model = condition(self.noisy_model, observed_steady_state)
        pyro.clear_param_store()
        svi = SVI(model=observation_model,
                  guide=guide,
                  optim=SGD({
                      "lr": 0.001,
                      "momentum": 0.1
                  }),
                  loss=Trace_ELBO())

        losses = []
        num_steps = 1000
        samples = defaultdict(list)
        for t in range(num_steps):
            losses.append(svi.step(initial_noise))
            for noise in initial_noise.keys():
                mu = '{}_mu'.format(noise)
                sigma = '{}_sigma'.format(noise)
                samples[mu].append(pyro.param(mu).item())
                samples[sigma].append(pyro.param(sigma).item())
        means = {k: statistics.mean(v) for k, v in samples.items()}
        updated_noise = {
            'N_Raf': Normal(means['N_Raf_mu'], means['N_Raf_sigma']),
            'N_Mek': Normal(means['N_Mek_mu'], means['N_Mek_sigma']),
            'N_Erk': Normal(means['N_Erk_mu'], means['N_Erk_sigma'])
        }

        return updated_noise, losses
Beispiel #3
0
    def run_inference(self):
        data = torch.tensor(parse_tdoc_file(self.observations_file_path,
                                            self.documents_length,
                                            self.words_number),
                            dtype=torch.float32).view(-1)

        if self.create_ism_data_file:
            self.ism.save_ism_data(data)
            return 0

        if self.use_ism:
            seq = format_seq_file('./mutu_data/seq.txt')
            self.initalized_motifs = self.ism.initialize_motifs(data, seq)
        else:
            self.initalized_motifs = torch.ones(self.latent_motifs_number, 1,
                                                self.words_number,
                                                self.relative_time_length)

        pyro.clear_param_store()

        adam_params = {"lr": 0.1, "betas": (0.9, 0.999)}
        optimizer = pyro.optim.Adam(adam_params)

        svi = SVI(self.model,
                  self.guide,
                  optimizer,
                  loss=Trace_ELBO(),
                  num_samples=self.n_samples)

        for _ in tqdm(range(self.n_steps)):
            svi.step(data)

        motifs_starting_times_file_path = os.path.join(
            self.work_dir, 'motifs_starting_times.npy')
        motifs_file_path = os.path.join(self.work_dir, 'motifs.npy')

        motifs_starting_times = pyro.param(
            "q_motifs_starting_times").detach().numpy()
        motifs = pyro.param("q_motifs").detach().numpy()

        np.save(file=motifs_starting_times_file_path,
                arr=motifs_starting_times)
        np.save(file=motifs_file_path, arr=motifs)

        self.dump_motifs_and_starting_times(motifs_starting_times, motifs)

        if self.plot_results:
            self.plot_motifs_and_starting_times(motifs)
    def fit(self, data_loaders, num_epochs=10):
        train_loader, test_loader = data_loaders
        batch_size = train_loader.batch_size
        optimizer = Adam({'lr': 0.0009})
        elbo = Trace_ELBO()
        svi = SVI(self.model, self.guide, optimizer, elbo)

        for epoch in range(num_epochs):
            loss = 0.
            iterations = 0
            for x, y in tqdm(train_loader):
                if x.size() == (batch_size, 1, 28, 28):
                    loss += svi.step(x.reshape(batch_size, -1), y)
                    iterations += 1

            print(epoch, ' Epoch loss : ', loss / (iterations * batch_size))
 def inference_svi(self, data, steps=3000, lr=0.01):
     self.inference_method = "svi"
     pyro.clear_param_store()
     self.optimizer = Adam({"lr": lr, "betas": (0.90, 0.999)})
     self.svi = SVI(self.model,
                    self.multi_norm_guide(),
                    self.optimizer,
                    loss=Trace_ELBO())
     self.history = {"losses": []}
     data = torch.tensor(data).float()
     bar = trange(steps)
     for i in bar:
         loss = self.svi.step(data)
         if (i + 1) % 100 == 1:
             bar.write("Now step %d completed, loss is %.4f" % (i, loss))
         self.history["losses"].append(loss)
    def __init__(self,model, guide=None, epochs=500, lr=0.05, cuda=False):

        self.model=model
        self.cuda=cuda
        if guide != None:
            self.guide=guide
        else:
            self.guide=AutoDiagonalNormal(model)
        self.optimizer=optim.Adam({"lr": lr})
        
        self.svi=SVI(self.model,
                     self.guide,
                     self.optimizer,
                     loss=Trace_ELBO())
        
        self.epochs=epochs
Beispiel #7
0
def test_pyrocov_smoke(model, Guide, backend):
    T, P, S, F = 3, 4, 5, 6
    dataset = {
        "features": torch.randn(S, F),
        "local_time": torch.randn(T, P),
        "weekly_strains": torch.randn(T, P, S).exp().round(),
    }

    guide = Guide(model, backend=backend)
    svi = SVI(model, guide, ClippedAdam({"lr": 1e-8}), Trace_ELBO())
    for step in range(2):
        with xfail_if_not_implemented():
            svi.step(dataset)
    guide(dataset)
    predictive = Predictive(model, guide=guide, num_samples=2)
    predictive(dataset)
Beispiel #8
0
def test_posterior_predictive_svi_one_hot():
    pseudocounts = torch.ones(3) * 0.1
    true_probs = torch.tensor([0.15, 0.6, 0.25])
    classes = dist.OneHotCategorical(true_probs).sample((10000, ))
    guide = AutoDelta(one_hot_model)
    svi = SVI(one_hot_model, guide, optim.Adam(dict(lr=0.1)), Trace_ELBO())
    for i in range(1000):
        svi.step(pseudocounts, classes=classes)
    posterior_samples = Predictive(guide,
                                   num_samples=10000).get_samples(pseudocounts)
    posterior_predictive = Predictive(one_hot_model, posterior_samples)
    marginal_return_vals = posterior_predictive.get_samples(
        pseudocounts)["obs"]
    assert_close(marginal_return_vals.mean(dim=0),
                 true_probs.unsqueeze(0),
                 rtol=0.1)
Beispiel #9
0
def test_laplace_linear_model(linear_model, one_point_design):
    pyro.set_rng_seed(42)
    pyro.clear_param_store()
    # You can use 1 final sample here because linear models have a posterior entropy that is independent of `y`
    estimated_eig = laplace_eig(linear_model,
                                one_point_design,
                                "y",
                                "w",
                                guide=laplace_guide,
                                num_steps=250,
                                final_num_samples=1,
                                optim=optim.Adam({"lr": 0.05}),
                                loss=Trace_ELBO().differentiable_loss)
    expected_eig = linear_model_ground_truth(linear_model, one_point_design,
                                             "y", "w")
    assert_equal(estimated_eig, expected_eig, prec=5e-2)
Beispiel #10
0
def test_subsample_guide(auto_class, init_fn):

    # The model from tutorial/source/easyguide.ipynb
    def model(batch, subsample, full_size):
        num_time_steps = len(batch)
        result = [None] * num_time_steps
        drift = pyro.sample("drift", dist.LogNormal(-1, 0.5))
        plate = pyro.plate("data", full_size, subsample=subsample)
        assert plate.size == 50
        with plate:
            z = 0.
            for t in range(num_time_steps):
                z = pyro.sample("state_{}".format(t), dist.Normal(z, drift))
                result[t] = pyro.sample("obs_{}".format(t),
                                        dist.Bernoulli(logits=z),
                                        obs=batch[t])

        return torch.stack(result)

    def create_plates(batch, subsample, full_size):
        return pyro.plate("data", full_size, subsample=subsample)

    if auto_class == AutoGuideList:
        guide = AutoGuideList(model, create_plates=create_plates)
        guide.add(AutoDelta(poutine.block(model, expose=["drift"])))
        guide.add(AutoNormal(poutine.block(model, hide=["drift"])))
    else:
        guide = auto_class(model, create_plates=create_plates)

    full_size = 50
    batch_size = 20
    num_time_steps = 8
    pyro.set_rng_seed(123456789)
    data = model([None] * num_time_steps, torch.arange(full_size), full_size)
    assert data.shape == (num_time_steps, full_size)

    pyro.get_param_store().clear()
    pyro.set_rng_seed(123456789)
    svi = SVI(model, guide, Adam({"lr": 0.02}), Trace_ELBO())
    for epoch in range(2):
        beg = 0
        while beg < full_size:
            end = min(full_size, beg + batch_size)
            subsample = torch.arange(beg, end)
            batch = data[:, beg:end]
            beg = end
            svi.step(batch, subsample, full_size=full_size)
Beispiel #11
0
    def train(
        self,
        max_epochs: int = 30000,
        batch_size: int = None,
        train_size: float = 1,
        lr: float = 0.002,
        num_particles: int = 1,
        scale_elbo: float = 1.0,
        **kwargs,
    ):
        """Train the model with useful defaults

        Parameters
        ----------
        max_epochs
            Number of passes through the dataset. If `None`, defaults to
            `np.min([round((20000 / n_cells) * 400), 400])`
        train_size
            Size of training set in the range [0.0, 1.0]. Use all data points in training because
            we need to estimate cell abundance at all locations.
        batch_size
            Minibatch size to use during training. If `None`, no minibatching occurs and all
            data is copied to device (e.g., GPU).
        lr
            Optimiser learning rate (default optimiser is :class:`~pyro.optim.ClippedAdam`).
            Specifying optimiser via plan_kwargs overrides this choice of lr.
        kwargs
            Other arguments to scvi.model.base.PyroSviTrainMixin().train() method
        """

        kwargs["max_epochs"] = max_epochs
        kwargs["batch_size"] = batch_size
        kwargs["train_size"] = train_size
        kwargs["lr"] = lr

        if "plan_kwargs" not in kwargs.keys():
            kwargs["plan_kwargs"] = dict()
        if getattr(self.module.model, "discrete_variables", None) and (len(self.module.model.discrete_variables) > 0):
            kwargs["plan_kwargs"]["loss_fn"] = TraceEnum_ELBO(num_particles=num_particles)
        else:
            kwargs["plan_kwargs"]["loss_fn"] = Trace_ELBO(num_particles=num_particles)
        if scale_elbo != 1.0:
            if scale_elbo is None:
                scale_elbo = 1.0 / (self.summary_stats["n_cells"] * self.summary_stats["n_genes"])
            kwargs["plan_kwargs"]["scale_elbo"] = scale_elbo

        super().train(**kwargs)
Beispiel #12
0
def inference(train_x, train_y, num_epochs=2000):
    svi = SVI(model,
              guide,
              optim.Adam({'lr': 0.005}),
              loss=Trace_ELBO(),
              num_samples=1000)

    for i in range(num_epochs):
        elbo = svi.step(train_x, train_y)
        if i % 200 == 0:
            print('Elbo loss : {}'.format(elbo))

    svi_posterior = svi.run(train_x, train_y)
    sites = ['w', 'b', 'sigma']
    for site, values in summary(svi_posterior, sites).items():
        print("Site: {}".format(site))
        print(values, "\n")
def main(model, guide, args):
    # init
    if args.seed is not None: pyro.set_rng_seed(args.seed)
    logger = get_logger(args.log, __name__)
    logger.info(args)

    # load data
    train_loader, test_loader = setup_data_loaders(batch_size=256)

    # setup svi
    pyro.clear_param_store()
    # # WL: edited to make SCORE produce no NaNs. =====
    # # lr value:
    # # - original value in vae: 1.0e-3 --- SCORE produces NaNs at some point.
    # # - default of Adam(..)  : 1.0e-3
    # # - current value        : 1.0e-4 --- SCORE produces no NaNs.
    # learning_rate = 1.0e-4
    # # ===============================================
    opt = optim.Adam({"lr": args.learning_rate})
    elbo = Trace_ELBO()
    svi = SVI(model.main, guide.main, opt, loss=elbo)

    # # train (init)
    # loss_avg = evaluate(svi, train_loader)
    # param_state = copy.deepcopy(pyro.get_param_store().get_state())
    # elbo_l = [-loss_avg]
    # param_state_l = [param_state]

    # train
    times = [time.time()]
    logger.info(f"\nepoch\t" + "elbo\t" + "time(sec)")

    for i in range(1, args.num_epochs + 1):
        loss_avg = train_epoch(svi, train_loader)
        # elbo_l.append(-loss_avg)
        #
        # if (i+1) % param_freq == 0:
        #     param_state = copy.deepcopy(pyro.get_param_store().get_state())
        #     param_state_l.append(param_state)

        if (args.eval_frequency > 0
                and i % args.eval_frequency == 0) or (i == 1):
            times.append(time.time())
            logger.info(f"{i:06d}\t"
                        f"{-loss_avg:.4f}\t"
                        f"{times[-1]-times[-2]:.3f}")
Beispiel #14
0
    def train(self, train_data, len_dataset, num_iterations):
        # Treina o algoritmo, usando inferência variacional com o
        # otimizador Adam, ambos implementados no Pyro
        optim = Adam({"lr": 0.01})
        svi = SVI(self.model, self.guide, optim, loss=Trace_ELBO())
        loss = 0
        losses = []
        for j in range(num_iterations):
            loss = 0
            for data in train_data:
                # calculate the loss and take a gradient step
                loss += svi.step(data[0].float(), data[1])
            total_epoch_loss_train = loss / len_dataset

            losses.append(total_epoch_loss_train)
            #print("Epoca: ", j, " Perda: ", total_epoch_loss_train)
        return losses
Beispiel #15
0
def test_posterior_predictive_svi_auto_delta_guide(parallel):
    true_probs = torch.ones(5) * 0.7
    num_trials = torch.ones(5) * 1000
    num_success = dist.Binomial(num_trials, true_probs).sample()
    conditioned_model = poutine.condition(model, data={"obs": num_success})
    guide = AutoDelta(conditioned_model)
    svi = SVI(conditioned_model, guide, optim.Adam(dict(lr=1.0)), Trace_ELBO())
    for i in range(1000):
        svi.step(num_trials)
    posterior_predictive = Predictive(model,
                                      guide=guide,
                                      num_samples=10000,
                                      parallel=parallel)
    marginal_return_vals = posterior_predictive.get_samples(num_trials)["obs"]
    assert_close(marginal_return_vals.mean(dim=0),
                 torch.ones(5) * 700,
                 rtol=0.05)
Beispiel #16
0
def get_pyro_model(return_all=True):
    nn_model = NN_Model(input_size=3, hidden_size=2, output_size=1)
    model = model_fn(nn_model)
    guide = guide_fn(nn_model)
    AdamArgs = {'lr': 3e-3}
    optimizer = torch.optim.Adam
    scheduler = pyro.optim.ExponentialLR({
        'optimizer': optimizer,
        'optim_args': AdamArgs,
        'gamma': 0.99995
    })
    svi = SVI(model, guide, scheduler, loss=Trace_ELBO(), num_samples=1000)

    if return_all:
        return svi, model, guide
    else:
        return svi
def run_svi(model, guide, x, y, optimizer=pyro.optim.Adam({"lr": 0.03}),
            loss=Trace_ELBO(), num_iters=5000, verbose=True):
    pyro.clear_param_store()
    svi = SVI(model,
          guide,
          optimizer,
          loss)
    elbos = []
    params = []
    for i in range(num_iters):
        elbo = svi.step(x, y)
        if verbose and i % 500 == 0:
            logging.info("Elbo loss: {}".format(elbo))
        elbos.append(elbo)
        params.append(dict(copy(pyro.get_param_store())))

    return elbos, params
Beispiel #18
0
 def fit(self, optim=Adam({'lr': 1e-3}), loss=Trace_ELBO(num_particles=1), max_iter=5000, random_instance=None):
     svi = SVI(self.model, self.guide, optim=optim, loss=loss)
     with trange(max_iter) as t:
         for i in t:
             t.set_description(f'迭代:{i}')
             svi.step(self.data)
             loss = svi.evaluate_loss(self.data)
             with torch.no_grad():
                 postfix_kwargs = {}
                 if random_instance is not None:
                     g = pyro.param('g')
                     s = pyro.param('s')
                     postfix_kwargs.update({
                         'g': '{0}'.format((g - random_instance.g).abs().mean()),
                         's': '{0}'.format((s - random_instance.s).abs().mean())
                     })
                 t.set_postfix(loss=loss, **postfix_kwargs)
Beispiel #19
0
def test_svi_smoke():
    class Model(PyroModule):
        def __init__(self):
            super().__init__()
            self.loc = nn.Parameter(torch.zeros(2))
            self.scale = PyroParam(torch.ones(2),
                                   constraint=constraints.positive)
            self.z = PyroSample(
                lambda self: dist.Normal(self.loc, self.scale).to_event(1))

        def forward(self, data):
            loc, log_scale = self.z.unbind(-1)
            with pyro.plate("data"):
                pyro.sample("obs", dist.Cauchy(loc, log_scale.exp()), obs=data)

    class Guide(PyroModule):
        def __init__(self):
            super().__init__()
            self.loc = nn.Parameter(torch.zeros(2))
            self.scale = PyroParam(torch.ones(2),
                                   constraint=constraints.positive)
            self.z = PyroSample(
                lambda self: dist.Normal(self.loc, self.scale).to_event(1))

        def forward(self, *args, **kwargs):
            return self.z

    data = torch.randn(5)
    model = Model()
    trace = poutine.trace(model).get_trace(data)
    assert "loc" in trace.nodes.keys()
    assert trace.nodes["loc"]["type"] == "param"
    assert "scale" in trace.nodes
    assert trace.nodes["scale"]["type"] == "param"

    guide = Guide()
    trace = poutine.trace(guide).get_trace(data)
    assert "loc" in trace.nodes.keys()
    assert trace.nodes["loc"]["type"] == "param"
    assert "scale" in trace.nodes
    assert trace.nodes["scale"]["type"] == "param"

    optim = Adam({"lr": 0.01})
    svi = SVI(model, guide, optim, Trace_ELBO())
    for step in range(3):
        svi.step(data)
Beispiel #20
0
    def do_test_fixedness(self, fixed_parts):
        pyro.clear_param_store()

        def model():
            alpha_p_log = pyro.param("alpha_p_log", self.alpha_p_log_0.clone())
            beta_p_log = pyro.param("beta_p_log", self.beta_p_log_0.clone())
            alpha_p, beta_p = torch.exp(alpha_p_log), torch.exp(beta_p_log)
            lambda_latent = pyro.sample("lambda_latent",
                                        dist.Gamma(alpha_p, beta_p))
            pyro.sample("obs", dist.Poisson(lambda_latent), obs=self.data)
            return lambda_latent

        def guide():
            alpha_q_log = pyro.param("alpha_q_log", self.alpha_q_log_0.clone())
            beta_q_log = pyro.param("beta_q_log", self.beta_q_log_0.clone())
            alpha_q, beta_q = torch.exp(alpha_q_log), torch.exp(beta_q_log)
            pyro.sample("lambda_latent", dist.Gamma(alpha_q, beta_q))

        def per_param_args(param_name):
            if "model" in fixed_parts and "p_" in param_name:
                return {"lr": 0.0}
            if "guide" in fixed_parts and "q_" in param_name:
                return {"lr": 0.0}
            return {"lr": 0.01}

        adam = optim.Adam(per_param_args)
        svi = SVI(model, guide, adam, loss=Trace_ELBO())

        for _ in range(3):
            svi.step()

        model_unchanged = (torch.equal(
            pyro.param("alpha_p_log").data,
            self.alpha_p_log_0)) and (torch.equal(
                pyro.param("beta_p_log").data, self.beta_p_log_0))
        guide_unchanged = (torch.equal(
            pyro.param("alpha_q_log").data,
            self.alpha_q_log_0)) and (torch.equal(
                pyro.param("beta_q_log").data, self.beta_q_log_0))
        model_changed = not model_unchanged
        guide_changed = not guide_unchanged
        error = ("model" in fixed_parts
                 and model_changed) or ("guide" in fixed_parts
                                        and guide_changed)
        return not error
Beispiel #21
0
    def fit(
        self,
        df,
        max_iter=6000,
        patience=200,
        optimiser_settings={"lr": 1.0e-2},
        elbo_kwargs={"num_particles": 5},
    ):
        teams = sorted(list(set(df["home_team"]) | set(df["away_team"])))
        home_team = df["home_team"].values
        away_team = df["away_team"].values
        home_goals = torch.tensor(df["home_goals"].values, dtype=torch.float32)
        away_goals = torch.tensor(df["away_goals"].values, dtype=torch.float32)
        gameweek = ((df["date"] - df["date"].min()).dt.days // 7).values

        self.team_to_index = {team: i for i, team in enumerate(teams)}
        self.index_to_team = {
            value: key
            for key, value in self.team_to_index.items()
        }
        self.n_teams = len(teams)
        self.min_date = df["date"].min()

        conditioned_model = condition(self.model,
                                      data={
                                          "home_goals": home_goals,
                                          "away_goals": away_goals
                                      })
        guide = AutoDiagonalNormal(conditioned_model)

        optimizer = Adam(optimiser_settings)
        elbo = Trace_ELBO(**elbo_kwargs)
        svi = SVI(conditioned_model, guide, optimizer, loss=elbo)

        pyro.clear_param_store()
        fitted_svi, losses = early_stopping(svi,
                                            home_team,
                                            away_team,
                                            gameweek,
                                            max_iter=max_iter,
                                            patience=patience)

        self.guide = guide

        return losses
Beispiel #22
0
    def fit(self,
            data_loader,
            optim,
            num_epochs,
            callback=None,
            num_particles=1,
            closed_form_kl=True,
            device=None):
        """Optimizes the variational parameters on data from data_loader using optim for num_epochs.

        :param Iterable data_loader: iterable over batches of data, e.g. a torch.utils.data.DataLoader. Assumes that
            each element consists of a length two tuple of list, with the first element either containing a single
            object or a list of objects, e.g. torch.Tensors, that are the inputs to the neural network. The second
            element is a single torch.Tensor e.g. of class labels.
        :param optim: pyro optimizer to be used for constructing an SVI object, e.g. pyro.optim.Adam({"lr": 1e-3}).
        :param int num_epochs: number of passes over data_loader.
        :param callable callback: optional function to invoke after every training epoch. Receives the BNN object,
            the epoch number and the average value of the ELBO over the epoch. May return True to terminate
            optimization before num_epochs, e.g. if it finds that a validation log likelihood saturates.
        :param int num_particles: number of MC samples for estimating the ELBO.
        :param bool closed_form_kl: whether to use TraceMeanField_ELBO or Trace_ELBO, i.e. calculate KL divergence
            between approximate posterior and prior in closed form or via a Monte Carlo estimate.
        :param torch.device device: optional device to send the data to.
        """
        old_training_state = self.net.training
        self.net.train(True)

        loss = TraceMeanField_ELBO(
            num_particles) if closed_form_kl else Trace_ELBO(num_particles)
        svi = SVI(self.model, self.guide, optim, loss=loss)

        for i in range(num_epochs):
            elbo = 0.
            num_batch = 1
            for num_batch, (input_data, observation_data) in enumerate(
                    iter(data_loader), 1):
                elbo += svi.step(tuple(_to(input_data, device)),
                                 tuple(_to(observation_data, device))[0])

            # the callback can stop training by returning True
            if callback is not None and callback(self, i, elbo / num_batch):
                break

        self.net.train(old_training_state)
        return svi
Beispiel #23
0
def train_model(pmf):
    def model(data):
        # sample f from the prior
        # Probabilities are generated by the pmf
        f = pyro.sample("latent_fairness", pmf)
        f2 = dist.Bernoulli(f)
        for i in range(len(data)):
            s = pyro.sample("obs_{}".format(i), f2, obs=data[i])

    def guide(data):
        alpha_q = pyro.param("alpha_q",
                             torch.tensor(15.0),
                             constraint=constraints.positive)
        beta_q = pyro.param("beta_q",
                            torch.tensor(15.0),
                            constraint=constraints.positive)
        # sample latent_fairness from the distribution Beta(alpha_q, beta_q)
        pyro.sample("latent_fairness", dist.Beta(alpha_q, beta_q))

    adam_params = {"lr": 0.0005, "betas": (0.90, 0.999)}
    optimizer = ClippedAdam(adam_params)

    svi = SVI(model, guide, optimizer, loss=Trace_ELBO())

    for step in range(n_steps):
        loss = svi.step(data)
        if step % 100 == 0:
            logging.info(".")
            logging.info("Elbo loss: {}".format(loss))

    # grab the learned variational parameters
    a_q = pyro.param("alpha_q").item()
    b_q = pyro.param("beta_q").item()

    inferred_mean = a_q / (a_q + b_q)
    # compute inferred standard deviation
    factor = b_q / (a_q * (1.0 + a_q + b_q))
    inferred_std = inferred_mean * math.sqrt(factor)
    print("\nbased on the data and our prior belief, the fairness " +
          "of the coin is %.3f +- %.3f" % (inferred_mean, inferred_std))

    beta_posterior = torch.distributions.beta.Beta(a_q, b_q)
    posterior = torch.distributions.bernoulli.Bernoulli(
        beta_posterior.sample())
    logging.info("Sampling:{}".format(posterior.sample()))
Beispiel #24
0
def main(args):
    pyro.set_rng_seed(1219)
    pyro.clear_param_store()

    data = pickle.load(open('data/results.pkl', 'rb'))
    data = torch.tensor(data)
    train_set, test_set = data[:30000], data[30000:]
    train_set = torch.utils.data.TensorDataset(train_set)
    test_set = torch.utils.data.TensorDataset(test_set)
    kwargs = {'num_workers': 1, 'pin_memory': args.use_cuda}
    train_loader = torch.utils.data.DataLoader(dataset=train_set,
                                               batch_size=args.batchsize,
                                               shuffle=True,
                                               **kwargs)
    test_loader = torch.utils.data.DataLoader(dataset=test_set,
                                              batch_size=args.batchsize,
                                              shuffle=False,
                                              **kwargs)

    br = BayesianRanking(2000)
    optimizer = Adam({"lr": args.lr})
    svi = SVI(br.model, br.guide, optimizer, loss=Trace_ELBO())

    train_elbo = []
    test_elbo = []
    for epoch in range(args.epochs):
        total_epoch_loss_train = train(svi,
                                       train_loader,
                                       use_cuda=args.use_cuda)
        train_elbo.append(-total_epoch_loss_train)
        print("[epoch %03d]  average training loss: %.4f" %
              (epoch, total_epoch_loss_train))

        if epoch % args.test_freq == 0:
            # report test diagnostics
            total_epoch_loss_test = evaluate(svi,
                                             test_loader,
                                             use_cuda=args.use_cuda)
            test_elbo.append(-total_epoch_loss_test)
            print("[epoch %03d] average test loss: %.4f" %
                  (epoch, total_epoch_loss_test))

    mu = pickle.load(open('./data/mu_gt.pkl', 'rb'))
    pred_mu_q = pyro.param('mu_q').detach().numpy().squeeze()
    print(ss.spearmanr(mu, pred_mu_q))
    def load(self, path):
        """Load model from path

        Args:
            load_path (string): Path to saved model
        """
        model_path = path + '_model'
        opt_path = path + '_opt'
        guide_path = path + '_guide'
        self.net = torch.load(model_path)

        pyro.get_param_store().load(path + '_params')

        self.optim = Adam({"lr": self.learning_rate})
        self.optim.load(opt_path)
        self.guide = AutoDiagonalNormal(self.model)
        self.guide = torch.load(guide_path)
        self.svi = SVI(self.net, self.guide, self.optim, loss=Trace_ELBO())
Beispiel #26
0
def test_posterior_predictive_svi_manual_guide(parallel):
    true_probs = torch.ones(5) * 0.7
    num_trials = torch.ones(5) * 1000
    num_success = dist.Binomial(num_trials, true_probs).sample()
    conditioned_model = poutine.condition(model, data={"obs": num_success})
    elbo = Trace_ELBO(num_particles=100, vectorize_particles=True)
    svi = SVI(conditioned_model, beta_guide, optim.Adam(dict(lr=1.0)), elbo)
    for i in range(1000):
        svi.step(num_trials)
    posterior_predictive = Predictive(
        model,
        guide=beta_guide,
        num_samples=10000,
        parallel=parallel,
        return_sites=["_RETURN"],
    )
    marginal_return_vals = posterior_predictive(num_trials)["_RETURN"]
    assert_close(marginal_return_vals.mean(dim=0), torch.ones(5) * 700, rtol=0.05)
Beispiel #27
0
def train(model):
    pyro.clear_param_store()

    model(x)

    guide = AutoDiagonalNormal(model)
    guide(x)

    adam = optim.Adam({"lr": 0.1})
    svi = SVI(model, guide, adam, loss=Trace_ELBO())

    for _ in range(1000):
        svi.step(x)

    import matplotlib.pyplot as plt
    med = guide.median()['trans_score.weight'].detach()
    plt.imshow(med)
    return med
def test_posterior_predictive_svi_auto_diag_normal_guide():
    true_probs = torch.ones(5) * 0.7
    num_trials = torch.ones(5) * 1000
    num_success = dist.Binomial(num_trials, true_probs).sample()
    conditioned_model = poutine.condition(model, data={"obs": num_success})
    opt = optim.Adam(dict(lr=0.1))
    loss = Trace_ELBO()
    guide = AutoDiagonalNormal(conditioned_model)
    svi_run = SVI(conditioned_model,
                  guide,
                  opt,
                  loss,
                  num_steps=1000,
                  num_samples=100).run(num_trials)
    posterior_predictive = TracePredictive(model, svi_run,
                                           num_samples=10000).run(num_trials)
    marginal_return_vals = posterior_predictive.marginal().empirical["_RETURN"]
    assert_close(marginal_return_vals.mean, torch.ones(5) * 700, rtol=0.05)
def test_posterior_predictive_svi_one_hot():
    pseudocounts = torch.ones(3) * 0.1
    true_probs = torch.tensor([0.15, 0.6, 0.25])
    classes = dist.OneHotCategorical(true_probs).sample((10000, ))
    opt = optim.Adam(dict(lr=0.1))
    loss = Trace_ELBO()
    guide = AutoDelta(one_hot_model)
    svi_run = SVI(one_hot_model,
                  guide,
                  opt,
                  loss,
                  num_steps=1000,
                  num_samples=1000).run(pseudocounts, classes=classes)
    posterior_predictive = TracePredictive(one_hot_model,
                                           svi_run,
                                           num_samples=10000).run(pseudocounts)
    marginal_return_vals = posterior_predictive.marginal().empirical["_RETURN"]
    assert_close(marginal_return_vals.mean, true_probs.unsqueeze(0), rtol=0.1)
Beispiel #30
0
    def _train_svi(self, train_loader, epochs, lr, savedir, device):
        print("\n == fullBNN SVI training ==")

        optimizer = pyro.optim.Adam({"lr":lr})
        elbo = Trace_ELBO()
        svi = SVI(self.model, self.guide, optimizer, loss=elbo)

        loss_list = []
        accuracy_list = []

        start = time.time()
        for epoch in range(epochs):
            loss = 0.0
            correct_predictions = 0.0

            for x_batch, y_batch in train_loader:

                x_batch = x_batch.to(device)
                y_batch = y_batch.to(device)
                loss += svi.step(x_data=x_batch, y_data=y_batch.argmax(dim=-1))

                outputs = self.forward(x_batch, training=True, avg_posterior=False).to(device)
                predictions = outputs.argmax(-1)
                labels = y_batch.argmax(-1)
                correct_predictions += (predictions == labels).sum().item()
            
            if DEBUG:
                print("\n", pyro.get_param_store()["model.0.weight_loc"][0][:5])
                print("\n",predictions[:10],"\n", labels[:10])

            total_loss = loss / len(train_loader.dataset)
            accuracy = 100 * correct_predictions / len(train_loader.dataset)

            print(f"\n[Epoch {epoch + 1}]\t loss: {total_loss:.2f} \t accuracy: {accuracy:.2f}", 
                  end="\t")

            loss_list.append(loss)
            accuracy_list.append(accuracy)

        execution_time(start=start, end=time.time())
        self.save(savedir)

        plot_loss_accuracy(dict={'loss':loss_list, 'accuracy':accuracy_list},
                           path=os.path.join(savedir, self.name+"_training.png"))