コード例 #1
0
    def _build_svi(self, loss=None):
        def per_param_callable(module_name, param_name):
            params = {
                'eps': 1e-5,
                'amsgrad': self.hparams.use_amsgrad,
                'weight_decay': self.hparams.l2
            }
            if module_name == 'intensity_flow_components' or module_name == 'thickness_flow_components':
                params['lr'] = self.hparams.pgm_lr
                return params
            else:
                params['lr'] = self.hparams.lr
                return params

        if loss is None:
            loss = self.svi_loss

        if self.hparams.use_cf_guide:

            def guide(*args, **kwargs):
                return self.pyro_model.counterfactual_guide(
                    *args,
                    **kwargs,
                    counterfactual_type=self.hparams.cf_elbo_type)

            self.svi = SVI(self.pyro_model.svi_model, guide,
                           Adam(per_param_callable), loss)
        else:
            self.svi = SVI(self.pyro_model.svi_model,
                           self.pyro_model.svi_guide, Adam(per_param_callable),
                           loss)
        self.svi.loss_class = loss
コード例 #2
0
    def fit_GP(self):
        ### train every GPf and GPo, cache necessary variables for further filtering


        self.GPs_losses = []
        self.GPo_losses = []
        # TODO CACHE DIFFERENT STUFF IF USE SPARSE GP
        pyro.clear_param_store()
        num_steps = 2500
        for (i, GPs) in enumerate(self.state_transition_model_list):
            losses = GPs.optimize(optimizer=Adam({"lr": 0.005}), num_steps=num_steps)
            self.GPs_losses.append(losses)
            print("training for state transition model {} is done!".format(i))



        for (i ,GPo) in enumerate(self.observation_model_list):
            losses = GPo.optimize(optimizer=Adam({"lr": 0.005}), num_steps=num_steps)
            self.GPo_losses.append(losses)
            print("training for observation model {} is done!".format(i))

        self.cache_variable()

        # save the mode
        self.save_model()
        return self.GPs_losses, self.GPo_losses
コード例 #3
0
    def _build_svi(self, loss=None):
        def per_param_callable(module_name, param_name):
            params = {
                'eps': 1e-5,
                'amsgrad': self.hparams.use_amsgrad,
                'weight_decay': self.hparams.l2
            }
            if 'flow_components' in module_name or 'sex_logits' in param_name:
                params['lr'] = self.hparams.pgm_lr
            else:
                params['lr'] = self.hparams.lr

            print(
                f'building opt for {module_name} - {param_name} with p: {params}'
            )
            return params

        if loss is None:
            loss = self.svi_loss

        if self.hparams.use_cf_guide:

            def guide(*args, **kwargs):
                return self.pyro_model.counterfactual_guide(
                    *args,
                    **kwargs,
                    counterfactual_type=self.hparams.cf_elbo_type)

            self.svi = SVI(self.pyro_model.svi_model, guide,
                           Adam(per_param_callable), loss)
        else:
            self.svi = SVI(self.pyro_model.svi_model,
                           self.pyro_model.svi_guide, Adam(per_param_callable),
                           loss)
        self.svi.loss_class = loss
コード例 #4
0
ファイル: test_jit.py プロジェクト: zippeurfou/pyro
def test_beta_bernoulli(Elbo, vectorized):
    pyro.clear_param_store()
    data = torch.tensor([1.0] * 6 + [0.0] * 4)

    def model1(data):
        alpha0 = torch.tensor(10.0)
        beta0 = torch.tensor(10.0)
        f = pyro.sample("latent_fairness", dist.Beta(alpha0, beta0))
        for i in pyro.irange("irange", len(data)):
            pyro.sample("obs_{}".format(i), dist.Bernoulli(f), obs=data[i])

    def model2(data):
        alpha0 = torch.tensor(10.0)
        beta0 = torch.tensor(10.0)
        f = pyro.sample("latent_fairness", dist.Beta(alpha0, beta0))
        pyro.sample("obs",
                    dist.Bernoulli(f).expand_by(data.shape).independent(1),
                    obs=data)

    model = model2 if vectorized else model1

    def guide(data):
        alpha_q = pyro.param("alpha_q",
                             torch.tensor(15.0),
                             constraint=constraints.positive)
        beta_q = pyro.param("beta_q",
                            torch.tensor(15.0),
                            constraint=constraints.positive)
        pyro.sample("latent_fairness", dist.Beta(alpha_q, beta_q))

    elbo = Elbo(num_particles=7, strict_enumeration_warning=False)
    optim = Adam({"lr": 0.0005, "betas": (0.90, 0.999)})
    svi = SVI(model, guide, optim, elbo)
    for step in range(40):
        svi.step(data)
コード例 #5
0
ファイル: test_em.py プロジェクト: youngshingjun/pyro
def test_svi_multi():
    args = make_args()
    args.assignment_grad = True
    detections = generate_data(args)

    pyro.clear_param_store()
    pyro.param('noise_scale',
               torch.tensor(args.init_noise_scale),
               constraint=constraints.positive)
    pyro.param('objects_loc', torch.randn(args.max_num_objects, 1))

    # Learn object_loc via Newton and noise_scale via Adam.
    elbo = TraceEnum_ELBO(max_plate_nesting=2)
    adam = Adam({'lr': 0.1})
    newton = Newton(trust_radii={'objects_loc': 1.0})
    optim = MixedMultiOptimizer([(['noise_scale'], adam),
                                 (['objects_loc'], newton)])
    for svi_step in range(50):
        with poutine.trace(param_only=True) as param_capture:
            loss = elbo.differentiable_loss(model, guide, detections, args)
        params = {
            name: pyro.param(name).unconstrained()
            for name in param_capture.trace.nodes.keys()
        }
        optim.step(loss, params)
        logger.debug(
            'step {: >2d}, loss = {:0.6f}, noise_scale = {:0.6f}'.format(
                svi_step, loss.item(),
                pyro.param('noise_scale').item()))
コード例 #6
0
ファイル: test_memory.py プロジェクト: zippeurfou/pyro
def test_svi():
    n = 11
    data = torch.zeros(n)

    def model(data):
        loc = pyro.param('loc', torch.zeros(n, requires_grad=True))
        scale = pyro.param('log_scale', torch.zeros(n,
                                                    requires_grad=True)).exp()
        pyro.sample('obs', dist.Normal(loc, scale).independent(1), obs=data)

    def guide(data):
        pass

    optim = Adam({'lr': 1e-3})
    inference = SVI(model, guide, optim, Trace_ELBO())

    counts = []
    gc.collect()
    gc.collect()
    expected = count_objects_of_type(Trace)
    for _ in range(10):
        inference.step(data)
        counts.append(count_objects_of_type(Trace))

    assert set(counts) == set([expected]), counts
コード例 #7
0
def test_subsample_model(auto_class):

    def model(x, y=None, batch_size=None):
        loc = pyro.param("loc", lambda: torch.tensor(0.))
        scale = pyro.param("scale", lambda: torch.tensor(1.),
                           constraint=constraints.positive)
        with pyro.plate("batch", len(x), subsample_size=batch_size):
            batch_x = pyro.subsample(x, event_dim=0)
            batch_y = pyro.subsample(y, event_dim=0) if y is not None else None
            mean = loc + scale * batch_x
            sigma = pyro.sample("sigma", dist.LogNormal(0., 1.))
            return pyro.sample("obs", dist.Normal(mean, sigma), obs=batch_y)

    guide = auto_class(model)

    full_size = 50
    batch_size = 20
    pyro.set_rng_seed(123456789)
    x = torch.randn(full_size)
    with torch.no_grad():
        y = model(x)
    assert y.shape == x.shape

    pyro.get_param_store().clear()
    pyro.set_rng_seed(123456789)
    svi = SVI(model, guide, Adam({"lr": 0.02}), Trace_ELBO())
    for step in range(5):
        svi.step(x, y, batch_size=batch_size)
コード例 #8
0
def test_median(auto_class, Elbo):

    def model():
        pyro.sample("x", dist.Normal(0.0, 1.0))
        pyro.sample("y", dist.LogNormal(0.0, 1.0))
        pyro.sample("z", dist.Beta(2.0, 2.0))

    guide = auto_class(model)
    optim = Adam({'lr': 0.05, 'betas': (0.8, 0.99)})
    elbo = Elbo(strict_enumeration_warning=False,
                num_particles=100, vectorize_particles=True)
    infer = SVI(model, guide, optim, elbo)
    for _ in range(100):
        infer.step()

    if auto_class is AutoLaplaceApproximation:
        guide = guide.laplace_approximation()

    median = guide.median()
    assert_equal(median["x"], torch.tensor(0.0), prec=0.1)
    if auto_class is AutoDelta:
        assert_equal(median["y"], torch.tensor(-1.0).exp(), prec=0.1)
    else:
        assert_equal(median["y"], torch.tensor(1.0), prec=0.1)
    assert_equal(median["z"], torch.tensor(0.5), prec=0.1)
コード例 #9
0
def training():
  optim = Adam({"lr":0.5})
  svi = SVI(model_2, guide_2, optim, loss=Trace_ELBO())
  num_samples = 50

  num_iterations = 50
  acc = []
  for j in range(num_iterations):
      loss = svi.step(x_train, y_train)
      
      def predict(x):
          sampled_models = [guide_2(None, None) for _ in range(num_samples)]
          yhats = [model(x).data for model in sampled_models]
          mean = torch.mean(torch.stack(yhats), 0)
          return np.argmax(mean.numpy(), axis=1)
          #return mean
      

      predicted = predict(x_test)
      predicted = torch.tensor(predicted)
      total = y_test.size(0)
      correct = (predicted == y_test).sum().item()
      acc.append(100*correct/total)
      print(j, acc[j], loss)
      if acc[j] >=85:
        break

  return acc
コード例 #10
0
ファイル: test_jit.py プロジェクト: pyro-ppl/pyro
def test_dirichlet_bernoulli(Elbo, vectorized):
    pyro.clear_param_store()
    data = torch.tensor([1.0] * 6 + [0.0] * 4)

    def model1(data):
        concentration0 = constant([10.0, 10.0])
        f = pyro.sample("latent_fairness", dist.Dirichlet(concentration0))[1]
        for i in pyro.plate("plate", len(data)):
            pyro.sample("obs_{}".format(i), dist.Bernoulli(f), obs=data[i])

    def model2(data):
        concentration0 = constant([10.0, 10.0])
        f = pyro.sample("latent_fairness", dist.Dirichlet(concentration0))[1]
        pyro.sample(
            "obs", dist.Bernoulli(f).expand_by(data.shape).to_event(1), obs=data
        )

    model = model2 if vectorized else model1

    def guide(data):
        concentration_q = pyro.param(
            "concentration_q", constant([15.0, 15.0]), constraint=constraints.positive
        )
        pyro.sample("latent_fairness", dist.Dirichlet(concentration_q))

    elbo = Elbo(
        num_particles=7, strict_enumeration_warning=False, ignore_jit_warnings=True
    )
    optim = Adam({"lr": 0.0005, "betas": (0.90, 0.999)})
    svi = SVI(model, guide, optim, elbo)
    for step in range(40):
        svi.step(data)
コード例 #11
0
ファイル: test_module.py プロジェクト: youngshingjun/pyro
def test_sample():

    class Model(nn.Linear, PyroModule):
        def __init__(self, in_features, out_features):
            super().__init__(in_features, out_features)
            self.weight = PyroSample(
                lambda self: dist.Normal(0, 1)
                                 .expand([self.out_features,
                                          self.in_features])
                                 .to_event(2))

    class Guide(nn.Linear, PyroModule):
        def __init__(self, in_features, out_features):
            super().__init__(in_features, out_features)
            self.loc = PyroParam(torch.zeros_like(self.weight))
            self.scale = PyroParam(torch.ones_like(self.weight),
                                   constraint=constraints.positive)
            self.weight = PyroSample(
                lambda self: dist.Normal(self.loc, self.scale)
                                 .to_event(2))

    data = torch.randn(8)
    model = Model(8, 2)
    guide = Guide(8, 2)

    optim = Adam({"lr": 0.01})
    svi = SVI(model, guide, optim, Trace_ELBO())
    for step in range(3):
        svi.step(data)
コード例 #12
0
ファイル: tree_data.py プロジェクト: zhouyonglong/pyro
def main(args):
    optim = Adam({"lr": 0.1})
    inference = SVI(model, guide, optim, loss="ELBO")

    # Data is an arbitrary json-like structure with tensors at leaves.
    one = ng_ones(1)
    data = {
        "foo": one,
        "bar": [0 * one, 1 * one, 2 * one],
        "baz": {
            "noun": {
                "concrete": 4 * one,
                "abstract": 6 * one,
            },
            "verb": 2 * one,
        },
    }

    print('Step\tLoss')
    for step in range(args.num_epochs):
        if step % 100 == 0:
            loss = inference.step(data)
            print('{}\t{:0.5g}'.format(step, loss))

    print('Parameters:')
    for name in sorted(pyro.get_param_store().get_all_param_names()):
        print('{} = {}'.format(name, pyro.param(name).data.cpu().numpy()))
コード例 #13
0
def svi_test():
    rain_prob_prior = torch.tensor(.3)
    my_sprinkler_prob_prior = torch.tensor(.6)
    neighbor_sprinkler_prob_prior = torch.tensor(.2)
    conditioned_lawn = pyro.condition(lawn,
                                      data={
                                          "my_lawn": torch.tensor([1.]),
                                          "neighbor_lawn": torch.tensor([0.])
                                      })
    # guide = AutoGuide(lawn)
    # set up the optimizer
    adam_params = {"lr": 0.005, "betas": (0.90, 0.999)}
    optimizer = Adam(adam_params)

    # setup the inference algorithm
    svi = SVI(conditioned_lawn, lawn_guide, optimizer, loss=Trace_ELBO())

    n_steps = 1000
    # do gradient steps
    for step in range(n_steps):
        svi.step(rain_prob_prior, my_sprinkler_prob_prior,
                 neighbor_sprinkler_prob_prior)
        if step % 100 == 0:
            print("step: ", step)
            for p in [
                    'rain_prob', 'my_sprinkler_prob', 'neighbor_sprinkler_prob'
            ]:
                print(p, ": ", pyro.param(p).item())
コード例 #14
0
 def train(self,
           num_steps,
           lr=1e-2,
           restart=True,
           autoguide=None,
           use_tqdm=True):
     if restart:
         pyro.clear_param_store()
         if autoguide is None:
             autoguide = AutoMultivariateNormal
         else:
             autoguide = getattr(pyro.infer.autoguide, autoguide)
         self.guide = autoguide(self, init_loc_fn=init_to_mean)
     svi = SVI(self,
               guide=self.guide,
               optim=Adam({"lr": lr}),
               loss=Trace_ELBO())
     loss = []
     if use_tqdm:
         iterator = tqdm.notebook.tnrange(num_steps)
     else:
         iterator = range(num_steps)
     for _ in iterator:
         loss.append(svi.step())
     return loss
コード例 #15
0
    def fit(self,
            train_loader,
            test_loader,
            num_epochs=30,
            test_frequency=5,
            stop_at_loss=100.):
        # setup optimizer
        optimizer = Adam({'lr': 1.0e-3})
        # loss function
        elbo = JitTrace_ELBO()
        # setup svi
        svi = SVI(self.model, self.guide, optimizer, loss=elbo)
        for epoch in range(num_epochs):
            epoch_loss = torch.tensor([
                svi.step(x, y) for x, y in tqdm(train_loader)
            ]).mean().item() / train_loader.batch_size
            print(f'Epoch {epoch + 1} : Loss {epoch_loss}')

            if epoch and epoch % test_frequency == 0:
                # report test diagnostics
                test_loss = torch.tensor([
                    svi.step(x, y) for x, y in tqdm(test_loader)
                ]).mean().item() / test_loader.batch_size
                print(f'*Test Loss* {test_loss}')
                # save to disk
                self.save()
                if int(test_loss) <= int(stop_at_loss):
                    return self
コード例 #16
0
ファイル: svb.py プロジェクト: meobet/vne
    def fit(self, dataset, batch_size, num_epochs=1, verbose=0):
        self.train(True)
        svi = SVI(self.pyro_model,
                  self.pyro_guide,
                  Adam({"lr": self.lr}),
                  loss="ELBO")

        fit_loss = []
        for epoch in range(num_epochs):
            epoch_loss = []
            timer = time.time()

            # Iterate over data.
            for x, y in dataset.batches(batch_size):
                inputs = to_binary(torch.from_numpy(x).long(),
                                   (x.shape[0], self.input_dim),
                                   use_cuda=self.use_cuda)
                inputs = variable(self, inputs)
                loss = svi.step(inputs)

                # statistics
                if verbose > 1:
                    print("Batch", len(epoch_loss), "loss:", loss,
                          "average time:",
                          (time.time() - timer) / float(len(epoch_loss) + 1))
                epoch_loss.append(loss)
            if verbose > 0:
                print("loss =", np.mean(epoch_loss, axis=0), "time =",
                      time.time() - timer)
            fit_loss.append(np.mean(epoch_loss))
        return fit_loss
コード例 #17
0
ファイル: tree_data.py プロジェクト: youisbaby/pyro
def main(args):
    pyro.set_rng_seed(0)
    pyro.enable_validation(__debug__)

    optim = Adam({"lr": 0.1})
    inference = SVI(model, guide, optim, loss=Trace_ELBO())

    # Data is an arbitrary json-like structure with tensors at leaves.
    one = torch.tensor(1.0)
    data = {
        "foo": one,
        "bar": [0 * one, 1 * one, 2 * one],
        "baz": {
            "noun": {
                "concrete": 4 * one,
                "abstract": 6 * one,
            },
            "verb": 2 * one,
        },
    }

    print('Step\tLoss')
    loss = 0.0
    for step in range(args.num_epochs):
        loss += inference.step(data)
        if step and step % 10 == 0:
            print('{}\t{:0.5g}'.format(step, loss))
            loss = 0.0

    print('Parameters:')
    for name, value in sorted(pyro.get_param_store().items()):
        print('{} = {}'.format(name, value.detach().cpu().numpy()))
コード例 #18
0
    def init_opt(self):
        if self.guide is None:
            self.build_guide()
        adam_params = {"lr": 0.01, "betas": (0.90, 0.999)}
        optimizer = Adam(adam_params)

        self.svi = SVI(self.model, self.guide, optimizer, loss=Trace_ELBO())
コード例 #19
0
ファイル: linear_reg_m1.py プロジェクト: bahammel/bayes
def main(args):
    pyro.clear_param_store()
    data = build_linear_dataset(N, p)
    if args.cuda:
        # make tensors and modules CUDA
        data = data.cuda()
        softplus.cuda()
        regression_model.cuda()

    # perform inference
    optim = Adam({"lr": 0.05})
    elbo = JitTrace_ELBO() if args.jit else Trace_ELBO()
    svi = SVI(model, guide, optim, loss=elbo)
    for j in range(args.num_epochs):
        if args.batch_size == N:
            # use the entire data set
            epoch_loss = svi.step(data)
        else:
            # mini batch
            epoch_loss = 0.0
            perm = torch.randperm(N) if not args.cuda else torch.randperm(
                N).cuda()
            # shuffle data
            data = data[perm]
            # get indices of each batch
            all_batches = get_batch_indices(N, args.batch_size)
            for ix, batch_start in enumerate(all_batches[:-1]):
                batch_end = all_batches[ix + 1]
                batch_data = data[batch_start:batch_end]
                epoch_loss += svi.step(batch_data)
        if j % 100 == 0:
            print("epoch avg loss {}".format(epoch_loss / float(N)))
コード例 #20
0
def main(args):
    # pyro.enable_validation(True)
    
    logging.info('Generating data')
    pyro.set_rng_seed(0)
    # We can generate synthetic data directly by calling the model.
    true_topic_weights, true_topic_words, data = model_original(args=args)

    # We'll train using SVI.
    logging.info('-' * 40)
    logging.info('Training on {} documents'.format(args.num_docs))
    # wy: currently don't do enumeration.
    # # Elbo = JitTraceEnum_ELBO if args.jit else TraceEnum_ELBO
    # Elbo = TraceEnum_ELBO
    Elbo = TraceGraph_ELBO
    elbo = Elbo(max_plate_nesting=2)
    optim = Adam({'lr': args.learning_rate})
    svi = SVI(model, guide, optim, elbo)
    logging.info('Step\tLoss')
    for step in range(args.num_steps):
        loss = svi.step(data, args=args, batch_size=args.batch_size)
        if step % 10 == 0:
            logging.info('{: >5d}\t{}'.format(step, loss))
    loss = elbo.loss(model, guide, data, args=args)
    logging.info('final loss = {}'.format(loss))
コード例 #21
0
    def train(self, dataset, model, logger=print):
        m = model.model()
        m.to(self.device)
        if self.config['train']['load_model']:
            model.load()
        loss_fn = pyro.infer.Trace_ELBO(max_plate_nesting=1).differentiable_loss
        
        epochs = self.config['train']['epochs']
        total_step = len(dataset) // self.config['train']['batch_size']
        dataloader = DataLoader(dataset, batch_size=self.config['train']['batch_size'],
                                shuffle=True, num_workers=4)

        optim = Adam({"lr": self.config['train']['learn_rate']})
        svi = SVI(m.model, m.guide, optim, loss=loss_fn, num_samples=1000)
        n_iter = 0
        for epoch in range(epochs):
            logger('epoch: ', epoch)
            for i, (X, y) in tqdm(enumerate(dataloader)):
                X = X.view(-1, len(self.config['dataset']['features']))
                X = X.to(self.device)
                y = y.to(self.device)

                loss = svi.step(X, y)
                if (i+1) % 2 == 0:
                    logger('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
                               .format(epoch+1, epochs, i+1, total_step, loss/total_step))
                    if self.writer: self.writer.add_scalar('data/loss', loss.item(), n_iter)
                    n_iter += 1
            if epoch % self.config['train']['save_every_epoch'] == 0:
                model.save()
        model.save()
        if self.writer: self.writer.close()
コード例 #22
0
ファイル: test_svgd.py プロジェクト: zeta1999/pyro
def test_shapes(shape, stein_kernel):
    pyro.clear_param_store()
    shape1, shape2 = (5, ) + shape, shape + (6, )

    mean_init1 = torch.arange(
        _product(shape1)).double().reshape(shape1) / 100.0
    mean_init2 = torch.arange(_product(shape2)).double().reshape(shape2)

    def model():
        pyro.sample("z1",
                    dist.LogNormal(mean_init1, 1.0e-8).to_event(len(shape1)))
        pyro.sample("scalar", dist.Normal(0.0, 1.0))
        pyro.sample("z2",
                    dist.Normal(mean_init2, 1.0e-8).to_event(len(shape2)))

    num_particles = 7
    svgd = SVGD(model, stein_kernel(), Adam({"lr": 0.0}), num_particles, 0)

    for step in range(2):
        svgd.step()

    particles = svgd.get_named_particles()
    assert particles['z1'].shape == (num_particles, ) + shape1
    assert particles['z2'].shape == (num_particles, ) + shape2

    for particle in range(num_particles):
        assert_equal(particles['z1'][particle, ...],
                     mean_init1.exp(),
                     prec=1.0e-6)
        assert_equal(particles['z2'][particle, ...], mean_init2, prec=1.0e-6)
コード例 #23
0
def test_linear_regression_smoke(auto_class, Elbo):
    N, D = 10, 3

    class RandomLinear(nn.Linear, PyroModule):
        def __init__(self, in_features, out_features):
            super().__init__(in_features, out_features)
            self.weight = PyroSample(dist.Normal(0., 1.).expand([out_features, in_features]).to_event(2))
            self.bias = PyroSample(dist.Normal(0., 10.).expand([out_features]).to_event(1))

    class LinearRegression(PyroModule):
        def __init__(self):
            super().__init__()
            self.linear = RandomLinear(D, 1)

        def forward(self, x, y=None):
            mean = self.linear(x).squeeze(-1)
            sigma = pyro.sample("sigma", dist.LogNormal(0., 1.))
            with pyro.plate('plate', N):
                return pyro.sample('obs', dist.Normal(mean, sigma), obs=y)

    x, y = torch.randn(N, D), torch.randn(N)
    model = LinearRegression()
    guide = auto_class(model)
    infer = SVI(model, guide, Adam({'lr': 0.005}), Elbo(strict_enumeration_warning=False))
    infer.step(x, y)
コード例 #24
0
def train(x_data, y_data, dataset_total_length, use_cuda, n_iterations):
    batch_size = 512
    optim = Adam({"lr": 0.005})

    train_ds = TensorDataset(x_data, y_data)
    train_dl = DataLoader(train_ds, batch_size=batch_size, shuffle=False)

    kl_factor = train_dl.batch_size / len(train_dl.dataset)

    svi = SVI(model, guide, optim, loss=Trace_ELBO(), num_samples=1000)
    pyro.clear_param_store()
    for j in range(n_iterations):
        start = time.time()
        loss = 0
        for x, y in train_dl:

            if use_cuda:
                x.cuda()
                y.cuda()

            # calculate the loss and take a gradient step
            loss += svi.step(dataset_total_length, x, y, kl_factor)
        elapsed_time = time.time() - start
        if j % 1 == 0:
            print("[iteration %04d] loss: %.4f took: %ds" % (j + 1, loss, elapsed_time))

    return svi
コード例 #25
0
def test_subsample_guide_2(auto_class, independent):

    # Simplified from Model2 in tutorial/source/forecasting_iii.ipynb
    def model(data):
        size, size = data.shape
        origin_plate = pyro.plate("origin", size, dim=-2)
        destin_plate = pyro.plate("destin", size, dim=-1)
        with origin_plate, destin_plate:
            batch = pyro.subsample(data, event_dim=0)
            assert batch.size(0) == batch.size(1), batch.shape
            pyro.sample("obs", dist.Normal(0, 1), obs=batch)

    def create_plates(data):
        size, size = data.shape
        origin_plate = pyro.plate("origin", size, subsample_size=5, dim=-2)
        if independent:
            destin_plate = pyro.plate("destin", size, subsample_size=5, dim=-1)
        else:
            with origin_plate as subsample:
                pass
            destin_plate = pyro.plate("destin", size, subsample=subsample, dim=-1)
        return origin_plate, destin_plate

    guide = auto_class(model, create_plates=create_plates)
    svi = SVI(model, guide, Adam({"lr": 0.01}), Trace_ELBO())

    data = torch.randn(10, 10)
    for step in range(2):
        svi.step(data)
コード例 #26
0
ファイル: VI.py プロジェクト: watabe951/Blog
    def VI(self, x_data, y_data, num_samples=1000, num_iterations=30000):
        self.guide = AutoDiagonalNormal(self.model)
        optim = Adam({"lr": 1e-3})
        loss = Trace_ELBO()
        svi = SVI(self.model, self.guide, optim=optim, loss=loss)

        # train
        # print("train!")
        pyro.clear_param_store()
        # print("train2!")
        for j in range(num_iterations):
            # print("train3!")
            # print("x_data.size():{}, y_data.size():{}".format(x_data.size(), y_data.size()))
            loss = svi.step(x_data, y_data)
            # print("train4!")
            # if j % (num_iterations // 10) == 0:
            print("[iteration %05d] loss: %.4f" % (j + 1, loss / len(x_data)))
        print("sample!")
        # num_samplesだけ事後分布からサンプルを生成
        dict = {}
        for i in range(num_samples):
            sample = self.guide()  # sampling
            for name, value in sample.items():
                if not dict.keys().__contains__(name):
                    dict[name] = value.unsqueeze(0)
                else:
                    dict[name] = torch.cat(
                        [dict[name], value.unsqueeze(0)], dim=0)
        self.posterior_samples = dict
コード例 #27
0
ファイル: test_em.py プロジェクト: youngshingjun/pyro
def test_em_nested_in_svi(assignment_grad):
    args = make_args()
    args.assignment_grad = assignment_grad
    detections = generate_data(args)

    pyro.clear_param_store()
    pyro.param('noise_scale',
               torch.tensor(args.init_noise_scale),
               constraint=constraints.positive)
    pyro.param('objects_loc', torch.randn(args.max_num_objects, 1))

    # Learn object_loc via EM and noise_scale via SVI.
    optim = Adam({'lr': 0.1})
    elbo = TraceEnum_ELBO(max_plate_nesting=2)
    newton = Newton(trust_radii={'objects_loc': 1.0})
    svi = SVI(poutine.block(model, hide=['objects_loc']),
              poutine.block(guide, hide=['objects_loc']), optim, elbo)
    for svi_step in range(50):
        for em_step in range(2):
            objects_loc = pyro.param('objects_loc').detach_().requires_grad_()
            assert pyro.param('objects_loc').grad_fn is None
            loss = elbo.differentiable_loss(model, guide, detections,
                                            args)  # E-step
            updated = newton.get_step(loss,
                                      {'objects_loc': objects_loc})  # M-step
            assert updated['objects_loc'].grad_fn is not None
            pyro.get_param_store()['objects_loc'] = updated['objects_loc']
            assert pyro.param('objects_loc').grad_fn is not None
        loss = svi.step(detections, args)
        logger.debug(
            'step {: >2d}, loss = {:0.6f}, noise_scale = {:0.6f}'.format(
                svi_step, loss,
                pyro.param('noise_scale').item()))
コード例 #28
0
def assert_ok(model, guide, **kwargs):
    """
    Assert that inference works without warnings or errors.
    """
    pyro.clear_param_store()
    inference = SVI(model, guide, Adam({"lr": 1e-6}), "ELBO", **kwargs)
    inference.step()
コード例 #29
0
 def fit(self,
         optim=Adam({'lr': 1e-3}),
         loss=Trace_ELBO(num_particles=1),
         max_iter=5000,
         random_instance=None):
     """
     :param optim: 优化算法
     :param loss: 损失函数
     :param max_iter: 最大迭代次数
     :param random_instance: 随机数据生成实例
     """
     svi = SVI(self.model, self.guide, optim=optim, loss=loss)
     with trange(max_iter) as t:
         for i in t:
             t.set_description(f'迭代:{i}')
             svi.step(self.data)
             loss = svi.evaluate_loss(self.data)
             if isinstance(optim, PyroLRScheduler):
                 optim.step()
             with torch.no_grad():
                 postfix_kwargs = {}
                 if random_instance is not None:
                     g = pyro.param('g')
                     s = pyro.param('s')
                     postfix_kwargs.update({
                         'g':
                         '{0}'.format((g - random_instance.g).abs().mean()),
                         's':
                         '{0}'.format((s - random_instance.s).abs().mean())
                     })
                 t.set_postfix(loss=loss, **postfix_kwargs)
コード例 #30
0
    def train(self,
              epochs: int = 3000,
              lr: float = 0.005,
              window: int = 1000,
              log: bool = True):
        pyro.get_param_store().clear()
        adam_params = {"lr": lr, "betas": (0.95, 0.999)}
        optimizer = Adam(adam_params)
        # setup the inference algorithm
        svi = SVI(self.sample_model,
                  self.sample_guide,
                  optimizer,
                  loss=Trace_ELBO())

        n_steps = epochs
        # do gradient steps
        if log:
            pbar = tqdm(range(n_steps))
        else:
            pbar = range(n_steps)
        elbos = []
        for step in pbar:
            elbo = svi.step(self.x_adj, self.preds[self.mapping[0]])
            elbos.append(elbo)
            avgs = self.ma(elbos, window)
            if step >= window:
                disp = avgs[-1]
            else:
                disp = elbo
            if log:
                pbar.set_description("Loss -> %.4f" % disp)
        return avgs