示例#1
0
 def model():
     mu_latent = pyro.sample("mu_latent", normal, self.mu0,
                             torch.pow(self.lam0, -0.5))
     for i, x in enumerate(self.data):
         pyro.observe("obs_%d" % i, dist.normal, x, mu_latent,
                      torch.pow(self.lam, -0.5))
     return mu_latent
示例#2
0
    def forward(self, prefix, alpha, temperature=0.8, condition=False, generate=0):
        assert len(prefix) > 0

        hidden = self.rnn.init_hidden()
        out = self.char_tensor(prefix[0])
        generated_chars = prefix[0]

        for i in range(1, len(prefix) + generate):
            out, hidden = self.rnn(out, hidden)
            ps = softmax(out.mul(alpha.expand(out.size())))
            dist = Categorical(ps, one_hot=False)
            name = 'char_{0}'.format(i)
            if i < len(prefix):
                # Use character provided in prefix
                char = prefix[i]
                if condition:
                    char_index = self.all_chars.index(char)
                    observe(name, dist, Tensor([char_index]))
            else:
                # Sample a character
                char_index = sample(name, dist).data[0][0]  # FIXME
                char = self.all_chars[char_index]
            generated_chars += char
            out = self.char_tensor(char)

        return generated_chars
示例#3
0
    def model(self, data):
        a_tilda = Variable(torch.ones(self.data_size[0]))
        b_tilda = Variable(torch.ones(self.data_size[0]))

        eps = pyro.sample('eps', dist.gamma, a_tilda, a_tilda / b_tilda)
        eps = torch.cat([eps] * self.K, 1)

        a = Variable(torch.ones(self.data_size[0], self.K))
        theta = pyro.sample('theta', dist.gamma, a, eps)

        c_tilda = Variable(torch.ones(self.data_size[1]))
        d_tilda = Variable(torch.ones(self.data_size[1]))

        eta = pyro.sample('eta', dist.gamma, c_tilda, c_tilda / d_tilda)
        eta = torch.cat([eta] * self.K, 1)

        c = Variable(torch.ones(self.data_size[1], self.K))
        beta = pyro.sample('beta', dist.gamma, c, eta)

        zeta = pyro.sample('zeta', dist.poisson,
                           torch.matmul(theta, torch.t(beta)))

        for i in range(self.data_size[0]):
            for j in range(self.data_size[1]):
                if data[i, j] == 0:
                    continue
                pyro.observe("obs_{}{}".format(i, j), dist.poisson, data[i, j],
                             zeta[i, j])
示例#4
0
 def model():
     mu = pyro.sample(
         "mu", Normal(Variable(torch.zeros(1)),
                      Variable(torch.ones(1))))
     xd = Normal(mu, Variable(torch.ones(1)), batch_size=50)
     pyro.observe("xs", xd, self.data)
     return mu
示例#5
0
 def model():
     mu_latent = pyro.sample(
         "mu_latent", DiagNormal(self.mu0, torch.pow(self.tau0, -0.5)))
     x_dist = LogNormal(mu_latent, torch.pow(self.tau, -0.5))
     pyro.observe("obs0", x_dist, self.data[0])
     pyro.observe("obs1", x_dist, self.data[1])
     return mu_latent
示例#6
0
 def model():
     lambda_latent = pyro.sample("lambda_latent",
                                 Gamma(self.alpha0, self.beta0))
     x_dist = Exponential(lambda_latent)
     pyro.observe("obs0", x_dist, self.data[0])
     pyro.observe("obs1", x_dist, self.data[1])
     return lambda_latent
示例#7
0
文件: vae_z_c.py 项目: zaxtax/pyro
def model_given_c(data, cll):
    decoder_c = pyro.module("decoder_c", pt_decode_c)
    decoder_z = pyro.module("decoder_z", pt_decode_z)
    z_mu, z_sigma = decoder_c.forward(cll)
    z = pyro.sample("latent_z", DiagNormal(z_mu, z_sigma))
    img_mu = decoder_z.forward(z)
    pyro.observe("obs", Bernoulli(img_mu), data.view(-1, 784))
示例#8
0
 def model(self, data):
     decoder = pyro.module('decoder', self.vae_decoder)
     z_mean, z_std = ng_zeros([data.size(0),
                               20]), ng_ones([data.size(0), 20])
     z = pyro.sample('latent', Normal(z_mean, z_std))
     img = decoder.forward(z)
     pyro.observe('obs', Bernoulli(img), data.view(-1, 784))
示例#9
0
    def model(self):
        # register PyTorch module `decoder` with Pyro
        pyro.module("decoder", self.decoder)
        # Setup hyperparameters for prior p(z)
        z_mu = ng_zeros([self.n_samples, self.n_latent])
        z_sigma = ng_ones([self.n_samples, self.n_latent])
        # sample from prior
        z = pyro.sample("latent", dist.normal, z_mu, z_sigma)
        # decode the latent code z
        z_adj = self.decoder(z)

        # Subsampling
        if self.subsampling:
            with pyro.iarange("data",
                              self.n_subsample,
                              subsample=self.sample()) as ind:
                pyro.observe('obs', dist.bernoulli,
                             self.adj_labels.view(1, -1)[0][ind],
                             z_adj.view(1, -1)[0][ind])
        # Reweighting
        else:
            with pyro.iarange("data"):
                pyro.observe('obs',
                             weighted_bernoulli,
                             self.adj_labels.view(1, -1),
                             z_adj.view(1, -1),
                             weight=self.pos_weight)
示例#10
0
 def model():
     mu_latent = pyro.sample("mu_latent", dist.normal,
                             self.mu0, torch.pow(self.tau0, -0.5))
     sigma = torch.pow(self.tau, -0.5)
     pyro.observe("obs0", dist.lognormal, self.data[0], mu_latent, sigma)
     pyro.observe("obs1", dist.lognormal, self.data[1], mu_latent, sigma)
     return mu_latent
示例#11
0
文件: vae_z_c.py 项目: zaxtax/pyro
def guide_given_c(data, cll):
    encoder_x = pyro.module("encoder_x", pt_encode_x)
    encoder_z = pyro.module("encoder_z", pt_encode_z)

    z_mu, z_sigma = encoder_x.forward(data)
    z = pyro.sample("latent_z", DiagNormal(z_mu, z_sigma))
    alpha_cat = encoder_z.forward(z)
    pyro.observe("latent_class", Categorical(alpha_cat), cll)
示例#12
0
def model_xz(data, foo):
    decoder_xz = pyro.module("decoder_xz", pt_decode_xz)
    z_mu, z_sigma = Variable(torch.zeros([data.size(0), 20])), Variable(
        torch.ones([data.size(0), 20]))
    z = pyro.sample("latent", DiagNormal(z_mu, z_sigma))
    img_mu = decoder_xz.forward(z)
    pyro.observe("obs", Bernoulli(img_mu), data.view(-1, 784))
    return z
示例#13
0
 def model():
     mu_latent = pyro.sample("mu_latent", dist.normal, self.mu0,
                             torch.pow(self.tau0, -0.5))
     bijector = AffineExp(torch.pow(self.tau, -0.5), mu_latent)
     x_dist = TransformedDistribution(dist.normal, bijector)
     pyro.observe("obs0", x_dist, self.data[0], ng_zeros(1), ng_ones(1))
     pyro.observe("obs1", x_dist, self.data[1], ng_zeros(1), ng_ones(1))
     return mu_latent
示例#14
0
def local_model(i, datum):
    beta = Variable(torch.ones(1, 10)) * 0.1
    cll = pyro.sample("class_of_datum_" + str(i), Categorical(beta))
    mean_param = Variable(torch.zeros(1, 784), requires_grad=True)
    # do MLE for class means
    mu = pyro.param("mean_of_class_" + str(cll[0]), mean_param)
    pyro.observe("obs_" + str(i), Bernoulli(mu), datum)
    return cll
示例#15
0
 def obs_inner(i, _i, _x):
     for k in range(n_superfluous_top):
         pyro.sample("z_%d_%d" % (i, k),
                     dist.Normal(ng_zeros(4 - i, 1), ng_ones(4 - i, 1), reparameterized=False))
     pyro.observe("obs_%d" % i, dist.normal, _x, mu_latent, torch.pow(self.lam, -0.5))
     for k in range(n_superfluous_top, n_superfluous_top + n_superfluous_bottom):
         pyro.sample("z_%d_%d" % (i, k),
                     dist.Normal(ng_zeros(4 - i, 1), ng_ones(4 - i, 1), reparameterized=False))
示例#16
0
 def model():
     mu_latent = pyro.sample(
             "mu_latent",
             dist.Normal(self.mu0, torch.pow(self.lam0, -0.5), reparameterized=reparameterized))
     for i, x in enumerate(self.data):
         pyro.observe("obs_%d" % i, dist.normal, x, mu_latent,
                      torch.pow(self.lam, -0.5))
     return mu_latent
示例#17
0
 def model():
     mu_latent = pyro.sample("mu_latent", dist.normal,
                             self.mu0, torch.pow(self.tau0, -0.5))
     bijector = AffineExp(torch.pow(self.tau, -0.5), mu_latent)
     x_dist = TransformedDistribution(dist.normal, bijector)
     pyro.observe("obs0", x_dist, self.data[0], ng_zeros(1), ng_ones(1))
     pyro.observe("obs1", x_dist, self.data[1], ng_zeros(1), ng_ones(1))
     return mu_latent
示例#18
0
文件: svb.py 项目: meobet/vne
 def pyro_model(self, x):
     pyro.module("output_embedding", self.output_embedding)
     mu = variable(self, torch.zeros(x.size(0), self.num_latent_factors))
     sigma = variable(self, torch.ones(x.size(0), self.num_latent_factors))
     z = pyro.sample("latent", dist.normal, mu, sigma)
     out_prob = (self.sigmoid(self.decode(z)) + fudge) * (1 - 2 * fudge)
     pyro.observe("obs", dist.bernoulli, x, out_prob)
     return z
示例#19
0
def guide_observed2(data, cll):
    encoder_c = pyro.module("encoder_c", pt_encode_c)
    alpha = encoder_c.forward(data)
    pyro.observe("latent_class", Categorical(alpha), cll)

    encoder = pyro.module("encoder_o", pt_encode_o)
    z_mu, z_sigma = encoder.forward(data, cll)
    z = pyro.sample("latent_z", DiagNormal(z_mu, z_sigma))
 def sub_model(datum):
     mu_latent = Variable(torch.ones(nr_samples, dim_z)) * 0.5
     sigma_latent = Variable(torch.ones(mu_latent.size()))
     z = pyro.sample("embedding_of_datum_" + str(i),
                     DiagNormal(mu_latent, sigma_latent))
     mean_beta = z.mm(weight)
     beta = sigmoid(mean_beta)
     pyro.observe("obs_" + str(i), Bernoulli(beta), datum)
示例#21
0
    def model(self, input_variable, target_variable, step):
        # register PyTorch module `decoder` with Pyro
        pyro.module("decoder_dense", self.decoder_dense)
        pyro.module("decoder_rnn", self.decoder_rnn)

        # setup hyperparameters for prior p(z)
        # the type_as ensures we get CUDA Tensors if x is on gpu
        z_mu = ng_zeros([self.num_layers, self.z_dim], type_as=target_variable.data)
        z_sigma = ng_ones([self.num_layers, self.z_dim], type_as=target_variable.data)

        # sample from prior
        # (value will be sampled by guide when computing the ELBO)
        z = pyro.sample("latent", dist.normal, z_mu, z_sigma)

        # init vars
        target_length = target_variable.shape[0]

        decoder_input = dataset.to_onehot([[self.dataset.SOS_index]])
        decoder_input = decoder_input.cuda() if USE_CUDA else decoder_input

        decoder_outputs = np.ones((target_length))
        decoder_hidden = self.decoder_dense(z)

        # # Teacher forcing
        for di in range(target_length):
            decoder_output, decoder_hidden = self.decoder_rnn(
                decoder_input, decoder_hidden)
            decoder_input = target_variable[di]

            if self.use_cuda:
                decoder_outputs[di] = np.argmax(decoder_output.cpu().data.numpy())
            else:
                decoder_outputs[di] = np.argmax(decoder_output.data.numpy())

            pyro.observe("obs_{}".format(di), dist.bernoulli, target_variable[di], decoder_output[0])

        # ----------------------------------------------------------------
        # prepare offer
        if self.use_cuda:
            offer = np.argmax(input_variable.cpu().data.numpy(), axis=1).astype(int)
        else:
            offer = np.argmax(input_variable.data.numpy(), axis=1).astype(int)

        # prepare answer
        if self.use_cuda:
            answer = np.argmax(target_variable.cpu().data.numpy(), axis=1).astype(int)
        else:
            answer = np.argmax(target_variable.data.numpy(), axis=1).astype(int)

        # prepare rnn
        rnn_response = list(map(int, decoder_outputs))
        
        # print output
        if step % 10 == 0:
            print("---------------------------")
            print("Offer: ", dataset.to_phrase(offer))
            print("Answer:", self.dataset.to_phrase(answer))
            print("RNN:", self.dataset.to_phrase(rnn_response))
示例#22
0
文件: vae_z_c.py 项目: zaxtax/pyro
def model_latent(data):
    decoder_c = pyro.module("decoder_c", pt_decode_c)
    decoder_z = pyro.module("decoder_z", pt_decode_z)
    alpha = Variable(torch.ones([data.size(0), 10])) / 10.
    cll = pyro.sample('latent_class', Categorical(alpha))
    z_mu, z_sigma = decoder_c.forward(cll)
    z = pyro.sample("latent_z", DiagNormal(z_mu, z_sigma))
    img_mu = decoder_z.forward(z)
    pyro.observe("obs", Bernoulli(img_mu), data.view(-1, 784))
示例#23
0
 def model():
     latent1 = pyro.sample(
         "latent1",
         Normal(Variable(torch.zeros(2)), Variable(torch.ones(2))))
     latent2 = pyro.sample("latent2",
                           Normal(latent1, 5 * Variable(torch.ones(2))))
     x_dist = Normal(latent2, Variable(torch.ones(2)))
     pyro.observe("obs", x_dist, Variable(torch.ones(2)))
     return latent1
示例#24
0
 def model():
     latent1 = pyro.sample("latent1",
                           Normal(Variable(torch.zeros(2)),
                                  Variable(torch.ones(2))))
     latent2 = pyro.sample("latent2",
                           Normal(latent1,
                                  5 * Variable(torch.ones(2))))
     x_dist = Normal(latent2, Variable(torch.ones(2)))
     pyro.observe("obs", x_dist, Variable(torch.ones(2)))
     return latent1
示例#25
0
def local_model(i, datum):
    beta = Variable(torch.ones(1)) * 0.5
    c = pyro.sample("class_of_datum_" + str(i), Bernoulli(beta))
    mean_param = Variable(torch.zeros(784), requires_grad=True)
    # do MLE for class means
    m = pyro.param("mean_of_class_" + str(c[0]), mean_param)

    sigma = Variable(torch.ones(m.size()))
    pyro.observe("obs_" + str(i), DiagNormal(m, sigma), datum)
    return c
示例#26
0
 def model():
     mu_latent = pyro.sample(
         "mu_latent", DiagNormal(self.mu0, torch.pow(self.tau0, -0.5)))
     unit_normal = dist.DiagNormal(Variable(torch.zeros(1, 1)),
                                   Variable(torch.ones(1, 1)))
     bijector = AffineExp(torch.pow(self.tau, -0.5), mu_latent)
     x_dist = TransformedDistribution(unit_normal, bijector)
     # x_dist = LogNormal(mu_latent, torch.pow(self.tau,-0.5))
     pyro.observe("obs0", x_dist, self.data[0])
     pyro.observe("obs1", x_dist, self.data[1])
     return mu_latent
示例#27
0
 def model():
     mu_latent_prime = pyro.sample(
             "mu_latent_prime",
             dist.Normal(self.mu0, torch.pow(self.lam0, -0.5), reparameterized=repa1))
     mu_latent = pyro.sample(
             "mu_latent",
             dist.Normal(mu_latent_prime, torch.pow(self.lam0, -0.5), reparameterized=repa2))
     for i, x in enumerate(self.data):
         pyro.observe("obs_%d" % i, dist.normal, x, mu_latent,
                      torch.pow(self.lam, -0.5))
     return mu_latent
示例#28
0
 def model():
     alpha_p_log = pyro.param(
         "alpha_p_log", Variable(
             self.alpha_p_log_0.clone(), requires_grad=True), tags="model")
     beta_p_log = pyro.param(
         "beta_p_log", Variable(
             self.beta_p_log_0.clone(), requires_grad=True), tags="model")
     alpha_p, beta_p = torch.exp(alpha_p_log), torch.exp(beta_p_log)
     lambda_latent = pyro.sample("lambda_latent", dist.gamma, alpha_p, beta_p)
     pyro.observe("obs", dist.poisson, self.data, lambda_latent)
     return lambda_latent
示例#29
0
def gmm_batch_model(data):
    p = pyro.param("p", Variable(torch.Tensor([0.3]), requires_grad=True))
    p = torch.cat([p, 1 - p])
    sigma = pyro.param("sigma", Variable(torch.Tensor([1.0]), requires_grad=True))
    mus = Variable(torch.Tensor([-1, 1]))
    with pyro.iarange("data", len(data)) as batch:
        n = len(batch)
        z = pyro.sample("z", dist.Categorical(p.unsqueeze(0).expand(n, 2)))
        assert z.size() == (n, 2)
        mu = torch.mv(z, mus)
        pyro.observe("x", dist.Normal(mu, sigma.expand(n)), data[batch])
示例#30
0
def gmm_model(data, verbose=False):
    p = pyro.param("p", Variable(torch.Tensor([0.3]), requires_grad=True))
    sigma = pyro.param("sigma", Variable(torch.Tensor([1.0]), requires_grad=True))
    mus = Variable(torch.Tensor([-1, 1]))
    for i in pyro.irange("data", len(data)):
        z = pyro.sample("z_{}".format(i), dist.Bernoulli(p))
        assert z.size() == (1,)
        z = z.long().data[0]
        if verbose:
            print("M{} z_{} = {}".format("  " * i, i, z))
        pyro.observe("x_{}".format(i), dist.Normal(mus[z], sigma), data[i])
示例#31
0
def gmm_batch_model(data):
    p = pyro.param("p", Variable(torch.Tensor([0.3]), requires_grad=True))
    p = torch.cat([p, 1 - p])
    sigma = pyro.param("sigma",
                       Variable(torch.Tensor([1.0]), requires_grad=True))
    mus = Variable(torch.Tensor([-1, 1]))
    with pyro.iarange("data", len(data)) as batch:
        n = len(batch)
        z = pyro.sample("z", dist.Categorical(p.unsqueeze(0).expand(n, 2)))
        assert z.size() == (n, 2)
        mu = torch.mv(z, mus)
        pyro.observe("x", dist.Normal(mu, sigma.expand(n)), data[batch])
示例#32
0
def gmm_model(data, verbose=False):
    p = pyro.param("p", Variable(torch.Tensor([0.3]), requires_grad=True))
    sigma = pyro.param("sigma",
                       Variable(torch.Tensor([1.0]), requires_grad=True))
    mus = Variable(torch.Tensor([-1, 1]))
    for i in pyro.irange("data", len(data)):
        z = pyro.sample("z_{}".format(i), dist.Bernoulli(p))
        assert z.size() == (1, )
        z = z.long().data[0]
        if verbose:
            print("M{} z_{} = {}".format("  " * i, i, z))
        pyro.observe("x_{}".format(i), dist.Normal(mus[z], sigma), data[i])
        def model(*args, **kwargs):
            next_mean = self.mu0
            for k in range(1, self.N + 1):
                latent_dist = dist.Normal(next_mean, torch.pow(self.lambdas[k - 1], -0.5))
                mu_latent = pyro.sample("mu_latent_%d" % k, latent_dist)
                next_mean = mu_latent

            mu_N = next_mean
            for i, x in enumerate(self.data):
                pyro.observe("obs_%d" % i, dist.normal, x, mu_N,
                             torch.pow(self.lambdas[self.N], -0.5))
            return mu_N
示例#34
0
 def model():
     alpha_p_log = pyro.param(
         "alpha_p_log", Variable(self.alpha_p_log_0,
                                 requires_grad=True))
     beta_p_log = pyro.param(
         "beta_p_log", Variable(self.beta_p_log_0, requires_grad=True))
     alpha_p, beta_p = torch.exp(alpha_p_log), torch.exp(beta_p_log)
     lambda_latent = pyro.sample("lambda_latent",
                                 Gamma(alpha_p, beta_p))
     x_dist = Poisson(lambda_latent)
     pyro.observe("obs", x_dist, self.data)
     return lambda_latent
        def model(*args, **kwargs):
            next_mean = self.mu0
            for k in range(1, self.N + 1):
                latent_dist = dist.Normal(next_mean, torch.pow(self.lambdas[k - 1], -0.5))
                mu_latent = pyro.sample("mu_latent_%d" % k, latent_dist)
                next_mean = mu_latent

            mu_N = next_mean
            for i, x in enumerate(self.data):
                pyro.observe("obs_%d" % i, dist.normal, x, mu_N,
                             torch.pow(self.lambdas[self.N], -0.5))
            return mu_N
示例#36
0
 def model(self, x):
     # register PyTorch module `decoder` with Pyro
     pyro.module("decoder", self.decoder)
     # setup hyperparameters for prior p(z)
     # the type_as ensures we get cuda Tensors if x is on gpu
     z_mu = ng_zeros([x.size(0), self.z_dim], type_as=x.data)
     z_sigma = ng_ones([x.size(0), self.z_dim], type_as=x.data)
     # sample from prior (value will be sampled by guide when computing the ELBO)
     z = pyro.sample("latent", dist.normal, z_mu, z_sigma)
     # decode the latent code z
     mu_img = self.decoder.forward(z)
     # score against actual images
     pyro.observe("obs", dist.bernoulli, x.view(-1, 784), mu_img)
示例#37
0
文件: vae.py 项目: Magica-Chen/pyro
 def model(self, x):
     # register PyTorch module `decoder` with Pyro
     pyro.module("decoder", self.decoder)
     # setup hyperparameters for prior p(z)
     # the type_as ensures we get cuda Tensors if x is on gpu
     z_mu = ng_zeros([x.size(0), self.z_dim], type_as=x.data)
     z_sigma = ng_ones([x.size(0), self.z_dim], type_as=x.data)
     # sample from prior (value will be sampled by guide when computing the ELBO)
     z = pyro.sample("latent", dist.normal, z_mu, z_sigma)
     # decode the latent code z
     mu_img = self.decoder.forward(z)
     # score against actual images
     pyro.observe("obs", dist.bernoulli, x.view(-1, 784), mu_img)
示例#38
0
 def model():
     p_latent = pyro.sample("p_latent", dist.beta, self.alpha0, self.beta0)
     pyro.map_data("aaa",
                   self.data, lambda i, x: pyro.observe(
                       "obs_{}".format(i), dist.bernoulli, x, p_latent),
                   batch_size=self.batch_size)
     return p_latent
示例#39
0
    def model_classify(self, xs, ys=None):
        """
        this model is used to add an auxiliary (supervised) loss as described in the
        NIPS 2014 paper by Kingma et al titled
        "Semi-Supervised Learning with Deep Generative Models"
        """
        # register all pytorch (sub)modules with pyro
        pyro.module("ss_vae", self)

        # inform Pyro that the variables in the batch of xs, ys are conditionally independent
        with pyro.iarange("independent"):
            # this here is the extra Term to yield an auxiliary loss that we do gradient descend on
            # similar to the NIPS 14 paper (Kingma et al).
            if ys is not None:
                alpha = self.encoder_y.forward(xs)
                pyro.observe("y_aux", dist.categorical, ys, alpha, log_pdf_mask=self.aux_loss_multiplier)
示例#40
0
 def model():
     mu_latent = pyro.sample("mu_latent", dist.normal,
                             self.mu0, torch.pow(self.lam0, -0.5))
     pyro.map_data("aaa", self.data, lambda i,
                   x: pyro.observe(
                       "obs_%d" % i, dist.normal,
                       x, mu_latent, torch.pow(self.lam, -0.5)),
                   batch_size=self.batch_size)
     return mu_latent
示例#41
0
def model(data):
    # Create unit normal priors over the parameters
    mu = Variable(torch.zeros(p, 1)).type_as(data)
    sigma = Variable(torch.ones(p, 1)).type_as(data)
    bias_mu = Variable(torch.zeros(1)).type_as(data)
    bias_sigma = Variable(torch.ones(1)).type_as(data)
    w_prior, b_prior = Normal(mu, sigma), Normal(bias_mu, bias_sigma)
    priors = {'linear.weight': w_prior, 'linear.bias': b_prior}
    # lift module parameters to random variables sampled from the priors
    lifted_module = pyro.random_module("module", regression_model, priors)
    # sample a regressor (which also samples w and b)
    lifted_reg_model = lifted_module()

    with pyro.iarange("map", N, subsample=data):
        x_data = data[:, :-1]
        y_data = data[:, -1]
        # run the regressor forward conditioned on inputs
        prediction_mean = lifted_reg_model(x_data).squeeze()
        pyro.observe("obs", Normal(prediction_mean, Variable(torch.ones(data.size(0))).type_as(data)), y_data.squeeze())
示例#42
0
文件: dmm.py 项目: Magica-Chen/pyro
    def model(self, mini_batch, mini_batch_reversed, mini_batch_mask,
              mini_batch_seq_lengths, annealing_factor=1.0):

        # this is the number of time steps we need to process in the mini-batch
        T_max = mini_batch.size(1)

        # register all PyTorch (sub)modules with pyro
        # this needs to happen in both the model and guide
        pyro.module("dmm", self)

        # set z_prev = z_0 to setup the recursive conditioning in p(z_t | z_{t-1})
        z_prev = self.z_0.expand(mini_batch.size(0), self.z_0.size(0))

        # sample the latents z and observed x's one time step at a time
        for t in range(1, T_max + 1):
            # the next three lines of code sample z_t ~ p(z_t | z_{t-1})
            # note that (both here and elsewhere) log_pdf_mask takes care of both
            # (i)  KL annealing; and
            # (ii) raggedness in the observed data (i.e. different sequences
            #      in the mini-batch have different lengths)

            # first compute the parameters of the diagonal gaussian distribution p(z_t | z_{t-1})
            z_mu, z_sigma = self.trans(z_prev)
            # then sample z_t according to dist.Normal(z_mu, z_sigma)
            z_t = pyro.sample("z_%d" % t,
                              dist.normal,
                              z_mu,
                              z_sigma,
                              log_pdf_mask=annealing_factor * mini_batch_mask[:, t - 1:t])

            # compute the probabilities that parameterize the bernoulli likelihood
            emission_probs_t = self.emitter(z_t)
            # the next statement instructs pyro to observe x_t according to the
            # bernoulli distribution p(x_t|z_t)
            pyro.observe("obs_x_%d" % t, dist.bernoulli, mini_batch[:, t - 1, :],
                         emission_probs_t,
                         log_pdf_mask=mini_batch_mask[:, t - 1:t])
            # the latent sampled at this time step will be conditioned upon
            # in the next time step so keep track of it
            z_prev = z_t
        def model(*args, **kwargs):
            top_latent_dist = dist.Normal(self.mu0, torch.pow(self.lambdas[0], -0.5))
            previous_names = ["mu_latent_1"]
            top_latent = pyro.sample(previous_names[0], top_latent_dist)
            previous_latents_and_names = list(zip([top_latent], previous_names))

            # for sampling model variables in different sequential orders
            def permute(x, n):
                if model_permutation:
                    return [x[self.model_permutations[n - 1][i]] for i in range(len(x))]
                return x

            def unpermute(x, n):
                if model_permutation:
                    return [x[self.model_unpermutations[n - 1][i]] for i in range(len(x))]
                return x

            for n in range(2, self.N + 1):
                new_latents_and_names = []
                for prev_latent, prev_name in permute(previous_latents_and_names, n - 1):
                    latent_dist = dist.Normal(prev_latent, torch.pow(self.lambdas[n - 1], -0.5))
                    couple = []
                    for LR in ['L', 'R']:
                        new_name = prev_name + LR
                        mu_latent_LR = pyro.sample(new_name, latent_dist)
                        couple.append([mu_latent_LR, new_name])
                    new_latents_and_names.append(couple)
                _previous_latents_and_names = unpermute(new_latents_and_names, n - 1)
                previous_latents_and_names = []
                for x in _previous_latents_and_names:
                    previous_latents_and_names.append(x[0])
                    previous_latents_and_names.append(x[1])

            for i, data_i in enumerate(self.data):
                for k, x in enumerate(data_i):
                    pyro.observe("obs_%s_%d" % (previous_latents_and_names[i][1], k),
                                 dist.normal, x, previous_latents_and_names[i][0],
                                 torch.pow(self.lambdas[-1], -0.5))
            return top_latent
示例#44
0
    def model(self, xs, ys=None):
        """
        The model corresponds to the following generative process:
        p(z) = normal(0,I)              # handwriting style (latent)
        p(y|x) = categorical(I/10.)     # which digit (semi-supervised)
        p(x|y,z) = bernoulli(mu(y,z))   # an image
        mu is given by a neural network  `decoder`

        :param xs: a batch of scaled vectors of pixels from an image
        :param ys: (optional) a batch of the class labels i.e.
                   the digit corresponding to the image(s)
        :return: None
        """
        # register this pytorch module and all of its sub-modules with pyro
        pyro.module("ss_vae", self)

        batch_size = xs.size(0)
        with pyro.iarange("independent"):

            # sample the handwriting style from the constant prior distribution
            prior_mu = Variable(torch.zeros([batch_size, self.z_dim]))
            prior_sigma = Variable(torch.ones([batch_size, self.z_dim]))
            zs = pyro.sample("z", dist.normal, prior_mu, prior_sigma)

            # if the label y (which digit to write) is supervised, sample from the
            # constant prior, otherwise, observe the value (i.e. score it against the constant prior)
            alpha_prior = Variable(torch.ones([batch_size, self.output_size]) / (1.0 * self.output_size))
            if ys is None:
                ys = pyro.sample("y", dist.categorical, alpha_prior)
            else:
                pyro.observe("y", dist.categorical, ys, alpha_prior)

            # finally, score the image (x) using the handwriting style (z) and
            # the class label y (which digit to write) against the
            # parametrized distribution p(x|y,z) = bernoulli(decoder(y,z))
            # where `decoder` is a neural network
            mu = self.decoder.forward([zs, ys])
            pyro.observe("x", dist.bernoulli, xs, mu)
示例#45
0
        def model():
            p_latent = pyro.param("p1", Variable(torch.Tensor([[0.7], [0.3]])))
            p_obs = pyro.param("p2", Variable(torch.Tensor([[0.9], [0.1]])))

            latents = [Variable(torch.ones(1, 1))]
            observes = []
            for t in range(self.model_steps):

                latents.append(
                    pyro.sample("latent_{}".format(str(t)),
                                Bernoulli(torch.index_select(p_latent, 0, latents[-1].view(-1).long()))))

                observes.append(
                    pyro.observe("observe_{}".format(str(t)),
                                 Bernoulli(torch.index_select(p_obs, 0, latents[-1].view(-1).long())),
                                 self.data[t]))
            return torch.sum(torch.cat(latents))
示例#46
0
        def model():
            ps = pyro.param("ps", Variable(torch.Tensor([[0.8], [0.3]])))
            mu = pyro.param("mu", Variable(torch.Tensor([[-0.1], [0.9]])))
            sigma = Variable(torch.ones(1, 1))

            latents = [Variable(torch.ones(1))]
            observes = []
            for t in range(3):

                latents.append(
                    pyro.sample("latent_{}".format(str(t)),
                                Bernoulli(ps[latents[-1][0].long().data])))

                observes.append(
                    pyro.observe("observe_{}".format(str(t)),
                                 Normal(mu[latents[-1][0].long().data], sigma),
                                 pyro.ones(1)))
            return latents
示例#47
0
 def model():
     p_latent = pyro.sample("p_latent", dist.beta, self.alpha0, self.beta0)
     for i, x in enumerate(self.data):
         pyro.observe("obs_{}".format(i), dist.bernoulli, x,
                      torch.pow(torch.pow(p_latent, 2.0), 0.5))
     return p_latent
示例#48
0
 def model():
     mu = pyro.sample("mu", Normal(Variable(torch.zeros(1)),
                                   Variable(torch.ones(1))))
     xd = Normal(mu, Variable(torch.ones(1)), batch_size=50)
     pyro.observe("xs", xd, self.data)
     return mu
示例#49
0
 def model():
     lambda_latent = pyro.sample("lambda_latent", dist.gamma, self.alpha0, self.beta0)
     for i, x in enumerate(self.data):
         pyro.observe("obs_{}".format(i), dist.poisson, x, lambda_latent)
     return lambda_latent
示例#50
0
 def model():
     lambda_latent = pyro.sample("lambda_latent", dist.gamma, self.alpha0, self.beta0)
     pyro.observe("obs0", dist.exponential, self.data[0], lambda_latent)
     pyro.observe("obs1", dist.exponential, self.data[1], lambda_latent)
     return lambda_latent
示例#51
0
文件: air.py 项目: Magica-Chen/pyro
    def model_step(self, t, n, prev, batch, z_pres_prior_p=default_z_pres_prior_p):

        # Sample presence indicators.
        if not self.fudge_z_pres:
            z_pres_dist = Bernoulli(z_pres_prior_p(t) * prev.z_pres)
        else:
            z_pres_dist = Uniform(self.ng_zeros(n, 1), self.ng_ones(n, 1))
        z_pres = pyro.sample('z_pres_{}'.format(t), z_pres_dist)

        # If zero is sampled for a data point, then no more objects
        # will be added to its output image. We can't
        # straight-forwardly avoid generating further objects, so
        # instead we zero out the log_pdf of future choices.
        sample_mask = z_pres if self.use_masking else None

        # Sample attention window position.
        z_where = pyro.sample('z_where_{}'.format(t),
                              dist.normal,
                              self.z_where_mu_prior,
                              self.z_where_sigma_prior,
                              batch_size=n,
                              log_pdf_mask=sample_mask)

        # Sample latent code for contents of the attention window.
        z_what = pyro.sample('z_what_{}'.format(t),
                             dist.normal,
                             self.ng_zeros([self.z_what_size]),
                             self.ng_ones([self.z_what_size]),
                             batch_size=n,
                             log_pdf_mask=sample_mask)

        # Map latent code to pixel space.
        y_att = self.decode(z_what)

        # Position/scale attention window within larger image.
        y = window_to_image(z_where, self.window_size, self.x_size, y_att)

        # Combine the image generated at this step with the image so far.
        # (Note that there's no notion of occlusion here. Overlapping
        # objects can create pixel intensities > 1.)
        x = prev.x + (y * z_pres.view(-1, 1, 1))

        if batch is not None:
            # Add observations.

            # Observations are made as soon as we are done generating
            # objects for a data point. This ensures that future
            # discrete choices are not included in the ELBO. i.e. The
            # corresponding log(q/p) in the objectives will be zero
            # since we mask out all future choices and make no further
            # observations for data points that are complete.

            if not self.use_masking:
                observe_mask = 1.0
            elif t == (self.num_steps - 1):
                observe_mask = prev.z_pres
            else:
                observe_mask = prev.z_pres - z_pres

            if self.use_masking or t == (self.num_steps - 1):
                pyro.observe("obs_{}".format(t),
                             dist.normal,
                             batch.view(n, -1),
                             x.view(n, -1),
                             self.ng_ones(x.view(n, -1).size()) * 0.3,
                             log_pdf_mask=observe_mask)

        return ModelState(x=x, z_pres=z_pres, z_where=z_where)
示例#52
0
 def obs_inner(i, _i, _x):
     pyro.observe("obs_%d_%d" % (i, _i), dist.normal, _x, mu_latent,
                  torch.pow(self.lam, -0.5))
示例#53
0
 def model(self, data):
     decoder = pyro.module('decoder', self.vae_decoder)
     z_mean, z_std = ng_zeros([data.size(0), 20]), ng_ones([data.size(0), 20])
     z = pyro.sample('latent', Normal(z_mean, z_std))
     img = decoder.forward(z)
     pyro.observe('obs', Bernoulli(img), data.view(-1, 784))
示例#54
0
 def model():
     prior_dist = Normal(self.mu0, torch.pow(self.lam0, -0.5))
     mu_latent = pyro.sample("mu_latent", prior_dist)
     x_dist = Normal(mu_latent, torch.pow(self.lam, -0.5))
     pyro.observe("obs", x_dist, self.data)
     return mu_latent
示例#55
0
 def model(subsample_size):
     with pyro.iarange("data", len(data), subsample_size) as ind:
         x = data[ind]
         z = pyro.sample("z", dist.Normal(ng_zeros(len(x)), ng_ones(len(x)),
                                          reparameterized=reparameterized))
         pyro.observe("x", dist.Normal(z, ng_ones(len(x)), reparameterized=reparameterized), x)
示例#56
0
 def model_obs_dup():
     pyro.sample("mu_q", dist.normal, ng_zeros(1), ng_ones(1))
     pyro.observe("mu_q", dist.normal, ng_zeros(1), ng_ones(1), ng_zeros(1))
示例#57
0
 def model():
     lambda_latent = pyro.sample("lambda_latent", dist.gamma, self.alpha0, self.beta0)
     pyro.map_data("aaa",
                   self.data, lambda i, x: pyro.observe(
                       "obs_{}".format(i), dist.poisson, x, lambda_latent), batch_size=3)
     return lambda_latent