Esempio n. 1
0
 def gen_seq_x_and_obs(self, num_obs):
     mu = torch_randn2d(1, self.dim) * self.prior_sigma + self.prior_mu
     for i in range(num_obs):
         mu = torch.mm(
             mu, self.t_aa.t()) + torch_randn2d(1, self.dim) * self.t_sigma
         ob = torch.mm(
             mu, self.l_bb.t()) + torch_randn2d(1, self.dim) * self.l_sigma
         yield mu.detach(), ob.detach()
Esempio n. 2
0
    def gen_batch_obs(self, num_obs):
        mu = self.latent_mu
        num_pos = np.random.binomial(num_obs, self.p)
        num_neg = num_obs - num_pos

        pos_obs = torch_randn2d(num_obs, 1) * self.l_sigma + mu[:, 0].view(-1, 1)
        neg_obs = torch_randn2d(num_obs, 1) * self.l_sigma + (mu[:, 0] + mu[:, 1]).view(-1, 1)

        perms = torch.randperm(num_obs)
        pos_indices = perms[0 : num_pos]
        neg_indices = perms[num_pos : ]

        obs = torch.cat([pos_obs[pos_indices, :], neg_obs[neg_indices, :]], dim=0)
        obs = obs[perms, :]
        return obs
Esempio n. 3
0
 def _reset(self, mu_given=None):
     if mu_given is not None:
         self.latent_mu = torch.Tensor(
             np.array(mu_given, dtype=np.float32).reshape(1,
                                                          dim)).to(DEVICE)
     else:
         self.latent_mu = torch_randn2d(
             1, self.dim) * self.prior_sigma + self.prior_mu
Esempio n. 4
0
def eval_flow(flow, mvn_dist, val_db):
    flow.eval()
    val_gen = val_db.data_gen(batch_size=1,
                              phase='val',
                              auto_reset=False,
                              shuffle=False)
    ent = 0.0
    for n_s in tqdm(range(cmd_args.num_vals)):
        hist_obs = []
        particles = mvn_dist.get_samples(cmd_args.num_particles)
        densities = mvn_dist.get_log_pdf(particles)        
        for t, ob in enumerate(val_gen):
            particles, densities = flow(particles, densities, 
                                        prior_samples=particles,
                                        ob_m=ob)
            hist_obs.append(ob)
            with torch.no_grad():
                pos_mu, pos_sigma = db.get_true_posterior(torch.cat(hist_obs, dim=0))
                q_mu = torch.mean(particles, dim=0, keepdim=True)
                q_std = torch.std(particles, dim=0, keepdim=True)
                if n_s + 1 == cmd_args.num_vals:
                    print('step:', t)
                    print('true posterior:', pos_mu.cpu().data.numpy(), pos_sigma.cpu().data.numpy())
                    print('estimated:', q_mu.cpu().data.numpy(), q_std.cpu().data.numpy())

                p_particles = torch_randn2d(cmd_args.num_mc_samples, val_db.dim) * pos_sigma + pos_mu
                kde = KDE(particles)
                cur_ent = -torch.mean(kde.log_pdf(p_particles)).item()
                if n_s + 1 == cmd_args.num_vals:
                    print('cross entropy:', cur_ent)
                ent += cur_ent
            if t + 1 == cmd_args.train_samples:
                break
    print('avg ent over %d seqs: %.4f' % (cmd_args.num_vals, ent/cmd_args.num_vals))
    flow.train()
    return ent
Esempio n. 5
0
 def transition_sample(self, x):
     num_samples = x.shape[0]
     return torch.mm(x, self.t_aa.t()) + torch_randn2d(
         num_samples, self.dim) * self.t_sigma
Esempio n. 6
0
 def gen_batch_obs(self, num_obs):
     obs = torch_randn2d(num_obs, self.dim) * self.l_sigma + self.latent_mu
     return obs
Esempio n. 7
0
import random
import numpy as np
import torch
from pfbayes.common.cmd_args import cmd_args
from pfbayes.common.distributions import torch_randn2d

import pickle


random.seed(cmd_args.seed)
np.random.seed(cmd_args.seed)
torch.manual_seed(cmd_args.seed)

transition_a = torch_randn2d(cmd_args.gauss_dim, cmd_args.gauss_dim)
likelihood_b = torch_randn2d(cmd_args.gauss_dim, cmd_args.gauss_dim)

filename = 'lds_model10.pkl'
with open(filename, 'wb') as f:
    pickle.dump((transition_a, likelihood_b), f)