def main(data=None, args=None, batch_size=None):
    data = torch.reshape(data, [64, 1000])

    # Globals.
    with pyro.plate("topics", 8):
        # shape = [8] + []
        topic_weights = pyro.sample("topic_weights", Gamma(1. / 8, 1.))
        # shape = [8] + [1024]
        topic_words = pyro.sample("topic_words",
                                  Dirichlet(torch.ones(1024) / 1024))

    # Locals.
    with pyro.plate("documents", 1000) as ind:
        # shape = [64, 32]
        data = data[:, ind]
        # shape = [32] + [8]
        doc_topics = pyro.sample("doc_topics", Dirichlet(topic_weights))

        with pyro.plate("words", 64):
            # The word_topics variable is marginalized out during inference,
            # achieved by specifying infer={"enumerate": "parallel"} and using
            # TraceEnum_ELBO for inference. Thus we can ignore this variable in
            # the guide.
            # shape = [64, 32] + []
            word_topics = pyro.sample("word_topics",
                                      Categorical(doc_topics),
                                      infer={"enumerate": "parallel"})
            # shape = [64, 32] + []
            data = pyro.sample("doc_words",
                               Categorical(topic_words[word_topics]),
                               obs=data)
Пример #2
0
def test_categorical_gradient_with_logits(init_tensor_type):
    p = Variable(init_tensor_type([-float('inf'), 0]), requires_grad=True)
    categorical = Categorical(logits=p)
    log_pdf = categorical.batch_log_pdf(Variable(init_tensor_type([0, 1])))
    log_pdf.sum().backward()
    assert_equal(log_pdf.data[0], 0)
    assert_equal(p.grad.data[0], 0)
Пример #3
0
def test_categorical_gradient_with_logits(init_tensor_type):
    p = Variable(init_tensor_type([-float('inf'), 0]), requires_grad=True)
    categorical = Categorical(logits=p)
    log_pdf = categorical.batch_log_pdf(Variable(init_tensor_type([0, 1])))
    log_pdf.sum().backward()
    assert_equal(log_pdf.data[0], 0)
    assert_equal(p.grad.data[0], 0)
def generate_model(data=None, args=None, batch_size=None):
    # Globals.
    with pyro.plate("topics", 8):
        topic_weights = pyro.sample("topic_weights", Gamma(1. / 8, 1.))
        topic_words = pyro.sample("topic_words",
                                  Dirichlet(torch.ones(1024) / 1024))

    # Locals.
    with pyro.plate("documents", 1000) as ind:
        if data is not None:
            data = data[:, ind]
        doc_topics = pyro.sample("doc_topics", Dirichlet(topic_weights))
        with pyro.plate("words", 64):
            # The word_topics variable is marginalized out during inference,
            # achieved by specifying infer={"enumerate": "parallel"} and using
            # TraceEnum_ELBO for inference. Thus we can ignore this variable in
            # the guide.
            word_topics = pyro.sample("word_topics",
                                      Categorical(doc_topics),
                                      infer={"enumerate": "parallel"})
            data = pyro.sample("doc_words",
                               Categorical(topic_words[word_topics]),
                               obs=data)

    return topic_weights, topic_words, data
def softmax_like(env, *, trajectory_model, agent_model, log=False):
    """softmax_like

    :param env: OpenAI Gym environment
    :param trajectory_model: trajectory probabilistic program
    :param agent_model: agent's probabilistic program
    :param log: boolean; if True, print log info
    """

    Qs = torch.as_tensor([
        infer_Q(
            env,
            action,
            trajectory_model=trajectory_model,
            agent_model=agent_model,
            log=log,
        ) for action in range(env.action_space.n)
    ])
    action_logits = args.alpha * Qs
    action_dist = Categorical(logits=action_logits)

    if log:
        print('policy:')
        print(
            tabulate(
                [action_dist.probs.tolist()],
                headers=env.actions,
                tablefmt='fancy_grid',
            ))

    return action_dist.sample()
    def sample_TRG_sentence(self, src_lengths):
        num_words = self.model.trg_embed.weight.size(0)
        prob = 1. / num_words
        distr = Categorical(
            probs=torch.tensor([prob for _ in range(num_words)]))
        trgs = [[self.sos_index] for _ in src_lengths]
        for i, s in enumerate(src_lengths):
            for _ in range(s):
                trgs[i] = trgs[i] + [distr.sample().item()]

        return src_lengths.new_tensor(trgs).long()
Пример #7
0
def model(data=None, args=None, batch_size=None):
    if debug: print("model:")
    data = torch.reshape(data, [64, 1000])

    # Globals.
    with pyro.plate("topics", 8):
        # shape = [8] + []
        topic_weights = pyro.sample("topic_weights", Gamma(1. / 8, 1.))
        # shape = [8] + [1024]
        topic_words = pyro.sample("topic_words",
                                  Dirichlet(torch.ones(1024) / 1024))
        if debug:
            print("topic_weights\t: shape={}, sum={}".format(
                topic_weights.shape, torch.sum(topic_weights)))
            print("topic_words\t: shape={}".format(topic_words.shape))

    # Locals.
    # with pyro.plate("documents", 1000) as ind:
    with pyro.plate("documents", 1000, 32, dim=-1) as ind:
        # if data is not None:
        #     data = data[:, ind]
        # shape = [64, 32]
        data = data[:, ind]
        # shape = [32] + [8]
        doc_topics = pyro.sample("doc_topics", Dirichlet(topic_weights))
        if debug:
            print("data\t\t: shape={}".format(data.shape))
            print("doc_topics\t: shape={}, [0].sum={}".format(
                doc_topics.shape, torch.sum(doc_topics[0])))

        # with pyro.plate("words", 64):
        with pyro.plate("words", 64, dim=-2):
            # The word_topics variable is marginalized out during inference,
            # achieved by specifying infer={"enumerate": "parallel"} and using
            # TraceEnum_ELBO for inference. Thus we can ignore this variable in
            # the guide.
            # shape = [64, 32] + []
            word_topics =\
                pyro.sample("word_topics", Categorical(doc_topics),
                            infer={"enumerate": "parallel"})
            # pyro.sample("word_topics", Categorical(doc_topics))
            # shape = [64, 32] + []
            data =\
                pyro.sample("doc_words", Categorical(topic_words[word_topics]),
                            obs=data)
            if debug:
                print("word_topics\t: shape={}".format(word_topics.shape))
                print("data\t\t: shape={}".format(data.shape))

    return topic_weights, topic_words, data
    def reset(self, keep_state=False):  # pylint: disable=arguments-differ
        self.__time = 0

        if keep_state:
            state_probs = one_hot(self.state, self.state_space.n).float()
        else:
            state_probs = self.start

        self.confounder = pyro.sample('U', Categorical(self.U))
        self.state = pyro.sample(f'S_{self.__time}', Categorical(state_probs))
        self.done = torch.tensor(0)
        self.action_prev = None
        self.reward_prev = None

        return self.state, self.confounder
Пример #9
0
    def forward(self, prefix, alpha, temperature=0.8, condition=False, generate=0):
        assert len(prefix) > 0

        hidden = self.rnn.init_hidden()
        out = self.char_tensor(prefix[0])
        generated_chars = prefix[0]

        for i in range(1, len(prefix) + generate):
            out, hidden = self.rnn(out, hidden)
            ps = softmax(out.mul(alpha.expand(out.size())))
            dist = Categorical(ps, one_hot=False)
            name = 'char_{0}'.format(i)
            if i < len(prefix):
                # Use character provided in prefix
                char = prefix[i]
                if condition:
                    char_index = self.all_chars.index(char)
                    observe(name, dist, Tensor([char_index]))
            else:
                # Sample a character
                char_index = sample(name, dist).data[0][0]  # FIXME
                char = self.all_chars[char_index]
            generated_chars += char
            out = self.char_tensor(char)

        return generated_chars
Пример #10
0
    def model(self, x_data: torch.Tensor, y_data: torch.Tensor):
        fc1w_prior = Normal(
            loc=torch.zeros_like(self.fc1.weight),
            scale=torch.ones_like(self.fc1.weight),
        )
        fc1b_prior = Normal(loc=torch.zeros_like(self.fc1.bias),
                            scale=torch.ones_like(self.fc1.bias))

        outw_prior = Normal(
            loc=torch.zeros_like(self.out.weight),
            scale=torch.ones_like(self.out.weight),
        )
        outb_prior = Normal(loc=torch.zeros_like(self.out.bias),
                            scale=torch.ones_like(self.out.bias))

        priors = {
            "fc1.weight": fc1w_prior,
            "fc1.bias": fc1b_prior,
            "out.weight": outw_prior,
            "out.bias": outb_prior,
        }

        # lift module parameters to random variables sampled from the priors
        lifted_module = pyro.random_module("module", self, priors)
        # sample a regressor (which also samples w and b)
        lifted_reg_model = lifted_module()

        lhat = self.log_softmax(lifted_reg_model(x_data))

        pyro.sample("obs", Categorical(logits=lhat), obs=y_data)
Пример #11
0
 def posterior(self, potentials: T) -> 'Mixture':
     post_components, post_lognorm = product(potentials,
                                             self.components,
                                             expand=True)
     post_logits = self.mixing.logits + post_lognorm
     post_mixing = Categorical(logits=post_logits)
     return Mixture(post_mixing, post_components)
Пример #12
0
    def posterior(
            self,
            potentials: MultivariateNormal) -> 'MultivariateNormalMixture':
        means = potentials.mean.unsqueeze(1)  # (N, 1, D)
        precs = potentials.precision_matrix.unsqueeze(1)  # (N, 1, D, D)
        covs = potentials.covariance_matrix.unsqueeze(1)  # (N, 1, D, D)

        prior_means = self.components.mean.unsqueeze(0)  # (1, K, D)
        prior_precs = self.components.precision_matrix.unsqueeze(
            0)  # (1, K, D, D)
        prior_covs = self.components.covariance_matrix.unsqueeze(
            0)  # (1, K, D, D)

        post_precs = precs + prior_precs
        post_means = posdef_solve(
            precs @ means[..., None] + prior_precs @ prior_means[..., None],
            post_precs)[0].squeeze(-1)
        post_components = MultivariateNormal(post_means,
                                             precision_matrix=post_precs)

        post_lognorm = MultivariateNormal(prior_means,
                                          covs + prior_covs).log_prob(means)
        post_logits = self.mixing.logits + post_lognorm

        return MultivariateNormalMixture(Categorical(logits=post_logits),
                                         post_components)
Пример #13
0
    def model(self, features, target):
        def normal_prior(x):
            return Normal(torch.zeros_like(x),
                          torch.ones_like(x)).to_event(x.dim())

        self.priors = {}

        for i in range(len(self.net.hidden_sizes)):
            self.priors['h' + str(i) + '.weight'] = normal_prior(
                getattr(self.net, 'h' + str(i)).weight)
            self.priors['h' + str(i) + '.bias'] = normal_prior(
                getattr(self.net, 'h' + str(i)).bias)

        self.priors['out' + '.weight'] = normal_prior(self.net.out.weight)
        self.priors['out' + '.bias'] = normal_prior(self.net.out.bias)

        # lift module parameters to random variables sampled from the priors
        lifted_module = pyro.random_module("module", self.net, self.priors)
        # sample a regressor (which also samples w and b)
        model_sample = lifted_module()
        # print(model_sample)

        with pyro.plate("data", len(target)):

            # yhat = self.log_softmax(model_sample(features))
            # target is not one-hot encoded
            # pyro.sample("obs",
            #             Categorical(logits=yhat), obs=target)

            yhat = self.softmax(model_sample(features))

            # target is not one-hot encoded
            pyro.sample("obs", Categorical(probs=yhat), obs=target)
            return yhat
Пример #14
0
    def model(self,
              src,
              trg,
              src_mask,
              trg_mask,
              src_lengths,
              trg_lengths,
              y_trg,
              kl=1.0):
        pyro.module('VNMT', self)
        encoder_hidden, encoder_final = self.encoder_hidden_x, self.encoder_final
        X = self.X_avg

        if self.posterior is not None:
            #regular VNMT
            z_mean, z_sig = self.prior(X)
        else:
            #match our...own parameters, should just mean KL(...) = 0 ery time
            mu_post, sig_post = self.get_batch_params(ret_posterior=True)
            z_mean, z_sig = mu_post, sig_post

        self.prior_params = {'mu': z_mean, 'sig': z_sig}
        with pyro.plate('data'):
            #TODO FYI: technically, the correct scaling is 1./ size_of_data
            with poutine.scale(scale=self.get_model_kl_const(scale=kl)):
                #TODO probably...a good idea to test this with flows also on prior...you know, so it's correct?
                use_flows = True
                dist = self.getDistribution(z_mean,
                                            z_sig,
                                            use_cached_flows=True,
                                            extra_cond=use_flows,
                                            cond_input=None)
                z = pyro.sample('z', dist)
            #TODO, need to add the latent z as input to decoder

            z = z if self.projection is None else self.projection(z)

            inputs = self.getWordEmbeddingsWithWordDropout(
                self.trg_embed, trg, trg_mask)
            #key thing is HERE, i am directly calling our decoder
            _, _, pre_output = self.decoder(inputs,
                                            encoder_hidden,
                                            encoder_final,
                                            src_mask,
                                            trg_mask,
                                            additional_input=z)
            logits = self.generator(pre_output)
            obs = y_trg.contiguous().view(-1)
            mask = trg_mask.contiguous().view(-1)
            try:
                mask = mask.bool()
            except AttributeError as e:
                #do nothing, is just a versioning issue
                _ = 0
            #My assumption is this will usually just sum the loss so we need to average it ourselves
            with poutine.scale(scale=self.get_reconstruction_const(scale=kl)):
                pyro.sample('preds',
                            Categorical(logits=logits.contiguous().view(
                                -1, logits.size(-1))).mask(mask),
                            obs=obs)
Пример #15
0
def guide_t0(data):
    # T-1 alpha params for beta sampling
    kappa = pyro.param('kappa',
                       lambda: Uniform(0, 2).sample([T - 1]),
                       constraint=constraints.positive)

    # concentration params for q_theta #[T,C]
    tau = pyro.param('tau',
                     lambda: MultivariateNormal(0.5 * torch.ones(C), 0.25 *
                                                torch.eye(C)).sample([T]),
                     constraint=constraints.unit_interval)

    # N params for categorical dist; topic weights; symmetric prior
    phi = pyro.param('phi',
                     lambda: Dirichlet(1 / T * torch.ones(T)).sample([N]),
                     constraint=constraints.simplex)

    with pyro.plate("beta_plate", T - 1):
        q_beta = 0
        q_beta += pyro.sample("beta", Beta(torch.ones(T - 1), kappa))
        # q_beta *= 1

    # sample probs for multinomial distributions
    with pyro.plate("theta_plate", T):
        # outputs multinomial probabilities for each topic
        q_theta = 0
        q_theta += pyro.sample("theta", Dirichlet(tau))
        # q_theta *= 1

    with pyro.plate("data", N):
        z = 0
        z += pyro.sample("z", Categorical(phi))
Пример #16
0
    def model(self,
              src,
              trg,
              src_mask,
              trg_mask,
              src_lengths,
              trg_lengths,
              y_trg,
              kl=1.0):
        pyro.module('VanillaNMT', self)
        self.encoder_hidden_x, self.encoder_final = self.encoder(
            self.src_embed(src), src_mask, src_lengths)
        encoder_hidden, encoder_final = self.encoder_hidden_x, self.encoder_final

        with pyro.plate('data'):
            #for consistency, although word dropout ...supposedly makes less sense with out latent variables
            inputs = self.getWordEmbeddingsWithWordDropout(
                self.trg_embed, trg, trg_mask)
            #key thing is HERE, i am directly calling our decoder
            _, _, pre_output = self.decoder(inputs, encoder_hidden,
                                            encoder_final, src_mask, trg_mask)
            logits = self.generator(pre_output)
            obs = y_trg.contiguous().view(-1)
            mask = trg_mask.contiguous().view(-1)
            try:
                mask = mask.bool()
            except AttributeError as e:
                #do nothing, is just a versioning issue
                _ = 0
            #My assumption is this will usually just sum the loss so we need to average it ourselves
            with poutine.scale(scale=self.get_reconstruction_const(scale=kl)):
                pyro.sample('preds',
                            Categorical(logits=logits.contiguous().view(
                                -1, logits.size(-1))).mask(mask),
                            obs=obs)
def model_4(x_data, y_data):

  conv1w_prior = Normal(loc=torch.zeros_like(net.conv1.weight), scale=torch.ones_like(net.conv1.weight))
  conv1b_prior = Normal(loc=torch.zeros_like(net.conv1.bias), scale=torch.ones_like(net.conv1.bias))

  conv2w_prior = Normal(loc=torch.zeros_like(net.conv2.weight), scale=torch.ones_like(net.conv2.weight))
  conv2b_prior = Normal(loc=torch.zeros_like(net.conv2.bias), scale=torch.ones_like(net.conv2.bias))

  conv3w_prior = Normal(loc=torch.zeros_like(net.conv3.weight), scale=torch.ones_like(net.conv3.weight))
  conv3b_prior = Normal(loc=torch.zeros_like(net.conv3.bias), scale=torch.ones_like(net.conv3.bias))

  conv4w_prior = Normal(loc=torch.zeros_like(net.conv4.weight), scale=torch.ones_like(net.conv4.weight))
  conv4b_prior = Normal(loc=torch.zeros_like(net.conv4.bias), scale=torch.ones_like(net.conv4.bias))

  fc1w_prior = Normal(loc=torch.zeros_like(net.fc1.weight), scale=torch.ones_like(net.fc1.weight))
  fc1b_prior = Normal(loc=torch.zeros_like(net.fc1.bias), scale=torch.ones_like(net.fc1.bias))

  fc2w_prior = Normal(loc=torch.zeros_like(net.fc2.weight), scale=torch.ones_like(net.fc2.weight))
  fc2b_prior = Normal(loc=torch.zeros_like(net.fc2.bias), scale=torch.ones_like(net.fc2.bias))

  priors = {'conv1.weight': conv1w_prior, 'conv1.bias': conv1b_prior,
            'conv2.weight': conv2w_prior, 'conv2.bias': conv2b_prior,
            'conv3.weight': conv3w_prior, 'conv3.bias': conv3b_prior,
            'conv4.weight': conv4w_prior, 'conv4.bias': conv4b_prior,
            'fc1.weight': fc1w_prior, 'fc1.bias': fc1b_prior,
            'fc2.weight': fc2w_prior, 'fc2.bias': fc2b_prior}

  lifted_module = pyro.random_module("module", net, priors)

  lifted_reg_model = lifted_module()

  lhat = log_softmax(lifted_reg_model(x_data))

  pyro.sample("obs", Categorical(logits=lhat), obs=y_data)
def model(data):
    x_data = data[0]
    y_data = data[1]
    '''
    mu, sigma = Variable(torch.zeros(10, p)), Variable(10 * torch.ones(10, p))
    bias_mu, bias_sigma = Variable(torch.zeros(10)), Variable(10 * torch.ones(10))

    w_prior, b_prior = Normal(mu, sigma), Normal(bias_mu, bias_sigma)
    priors = {'linear.weight': w_prior, 'linear.bias': b_prior}
    '''

    w_prior1, b_prior1 = Normal(mu1, sigma1), Normal(bias_mu1, bias_sigma1)
    w_prior2, b_prior2 = Normal(mu2, sigma2), Normal(bias_mu2, bias_sigma2)
    w_prior3, b_prior3 = Normal(mu3, sigma3), Normal(bias_mu3, bias_sigma3)

    priors = {
        'linear.weight': w_prior1,
        'linear.bias': b_prior1,
        'linear2.weight': w_prior2,
        'linear2.bias': b_prior2,
        'linear3.weight': w_prior3,
        'linear3.bias': b_prior3
    }
    lifted_module = pyro.random_module("module", bnn, priors)
    lifted_bnn_model = lifted_module()

    # run regressor forward conditioned on data
    prediction = lifted_bnn_model(x_data).squeeze()
    pyro.sample("obs", Categorical(ps=prediction), obs=y_data)
    def languageModelOptimization(self, z, z_hid, src, src_lengths,
                                  src_input_mask, kl):
        src = src.clone()  #pretty sure that's a bug anyways...
        #need to redo src side as batch doesn't handle it
        #TODO...probably should be handled in rebatch
        src_indx = src[:, :-1]
        src_trgs = src[:, 1:]
        self.src_tok_count = (src_trgs != self.pad_index).data.sum().item()
        src_output_mask = (src_trgs != self.pad_index
                           )  #similar to what is done in Batch class for trg
        z_x = self.resize_z(z_hid, self.num_layers)

        inputs = self.getWordEmbeddingsWithWordDropout(self.src_embed,
                                                       src_indx,
                                                       src_output_mask)
        _, _, pre_output = self.lang_model(inputs,
                                           src_input_mask,
                                           src_output_mask,
                                           hidden=z_x,
                                           z=z)
        logits = self.lm_generator(pre_output)
        logits = logits.contiguous().view(-1, logits.size(-1))
        obs = src_trgs.contiguous().view(-1)
        mask = src_output_mask.contiguous().view(-1)
        try:
            mask = mask.bool()
        except AttributeError as e:
            #do nothing, is a versionining thing to supress a warning
            _ = 0

        with poutine.scale(scale=self.get_reconstruction_const(scale=kl)):
            pyro.sample('lm_preds',
                        Categorical(logits=logits).mask(mask),
                        obs=obs)
Пример #20
0
def circle(site, env, state):
    state, confounder = state

    policy = torch.tensor([
        [
            [0.0, 0.0, 0.0, 1.0],
            [0.0, 0.0, 0.0, 1.0],
            [0.0, 1.0, 0.0, 0.0],
            [1.0, 0.0, 0.0, 0.0],
            [1.0, 1.0, 1.0, 1.0],
            [0.0, 1.0, 0.0, 0.0],
            [1.0, 0.0, 0.0, 0.0],
            [0.0, 0.0, 1.0, 0.0],
            [0.0, 0.0, 1.0, 0.0],
        ],
        [
            [0.0, 1.0, 0.0, 0.0],
            [0.0, 0.0, 1.0, 0.0],
            [0.0, 0.0, 1.0, 0.0],
            [0.0, 1.0, 0.0, 0.0],
            [1.0, 1.0, 1.0, 1.0],
            [1.0, 0.0, 0.0, 0.0],
            [0.0, 0.0, 0.0, 1.0],
            [0.0, 0.0, 0.0, 1.0],
            [1.0, 0.0, 0.0, 0.0],
        ],
    ])

    action_probs = policy[confounder, state]
    return pyro.sample(site, Categorical(action_probs))
    def translationModelOptimization(self, z, z_hid, src, src_mask,
                                     src_lengths, trg, trg_mask, trg_lengths,
                                     y_trg, kl):
        #self.num_layers*2 because encoder is bidirectional
        z_hid = self.resize_z(z_hid, self.num_layers * 2)

        encoder_hidden, encoder_final = self.encoder(self.src_embed(src),
                                                     src_mask,
                                                     src_lengths,
                                                     hidden=z_hid)
        inputs = self.getWordEmbeddingsWithWordDropout(self.trg_embed, trg,
                                                       trg_mask)
        #key thing is HERE, i am directly calling our decoder
        _, _, pre_output = self.decoder(inputs,
                                        encoder_hidden,
                                        encoder_final,
                                        src_mask,
                                        trg_mask,
                                        additional_input=z)
        logits = self.generator(pre_output)
        logits = logits.contiguous().view(-1, logits.size(-1))
        obs = y_trg.contiguous().view(-1)
        mask = trg_mask.contiguous().view(-1)
        try:
            mask = mask.bool()
        except AttributeError as e:
            #do nothing, means it's an older pytorch version
            _ = 0
        #My assumption is this will usually just sum the loss so we need to average it ourselves
        with poutine.scale(scale=self.get_reconstruction_const(scale=kl)):
            pyro.sample('preds',
                        Categorical(logits=logits).mask(mask),
                        obs=obs)
def probabilistic_model(inputs, labels):
    '''
    pyro.random_module() converts weights and biases into random variables 
    that have the prior probability distribution given by 
    dense_weight_prior and dense_bias_prior for a normal distribution
    this "overloads" the parameters of the random module with
    samples from the prior!
    '''
    resnet = ResNet18()
    dense_weight_prior = Normal(loc=torch.zeros_like(resnet.dense.weight),
                                scale=torch.ones_like(resnet.dense.weight))
    dense_bias_prior = Normal(loc=torch.zeros_like(resnet.dense.bias),
                              scale=torch.ones_like(resnet.dense.bias))

    priors = {
        'dense.weight': dense_weight_prior,
        'dense.bias': dense_bias_prior
    }

    lifted_module = pyro.random_module("module", resnet, priors)

    # This samples a neural network (which also samples weights and biases)
    # we wrap the nn model with random_module and sample and instance
    # of the nn
    sampled_nn_model = lifted_module()

    # runs the sampled nn on the input data
    lhat = F.log_softmax(sampled_nn_model(inputs))

    # this shows the output of the network will be categorical
    pyro.sample("obs", Categorical(logits=lhat), obs=labels)
Пример #23
0
def model(x_data, y_data):
    fc1w_prior = Normal(loc=torch.zeros_like(net.fc1.weight),
                        scale=torch.ones_like(net.fc1.weight))
    fc1b_prior = Normal(loc=torch.zeros_like(net.fc1.bias),
                        scale=torch.ones_like(net.fc1.bias))

    outw_prior = Normal(loc=torch.zeros_like(net.out.weight),
                        scale=torch.ones_like(net.out.weight))
    outb_prior = Normal(loc=torch.zeros_like(net.out.bias),
                        scale=torch.ones_like(net.out.bias))

    priors = {
        'fc1.weight': fc1w_prior,
        'fc1.bias': fc1b_prior,
        'out.weight': outw_prior,
        'out.bias': outb_prior
    }
    # lift module parameters to random variables sampled from the priors
    lifted_module = pyro.random_module("module", net, priors)
    # sample a regressor (which also samples w and b)
    lifted_reg_model = lifted_module()

    lhat = lifted_reg_model(x_data)

    pyro.sample("obs", Categorical(logits=lhat), obs=y_data)
Пример #24
0
def test_replay_enumerate_poutine(depth, first_available_dim):
    num_particles = 2
    y_dist = Categorical(torch.tensor([0.5, 0.25, 0.25]))

    def guide():
        pyro.sample("y", y_dist, infer={"enumerate": "parallel"})

    guide = poutine.enum(guide, first_available_dim=first_available_dim - depth)
    guide = poutine.trace(guide)
    guide_trace = guide.get_trace()

    def model():
        pyro.sample("x", Bernoulli(0.5))
        for i in range(depth):
            pyro.sample("a_{}".format(i), Bernoulli(0.5), infer={"enumerate": "parallel"})
        pyro.sample("y", y_dist, infer={"enumerate": "parallel"})
        for i in range(depth):
            pyro.sample("b_{}".format(i), Bernoulli(0.5), infer={"enumerate": "parallel"})

    model = poutine.enum(model, first_available_dim=first_available_dim)
    model = poutine.replay(model, trace=guide_trace)
    model = poutine.trace(model)

    for i in range(num_particles):
        tr = model.get_trace()
        assert tr.nodes["y"]["value"] is guide_trace.nodes["y"]["value"]
        tr.compute_log_prob()
        log_prob = sum(site["log_prob"] for name, site in tr.iter_stochastic_nodes())
        actual_shape = log_prob.shape
        expected_shape = (2,) * depth + (3,) + (2,) * depth + (1,) * (-1 - first_available_dim)
        assert actual_shape == expected_shape, 'error on iteration {}'.format(i)
Пример #25
0
def model_sample(cll=None):
    # wrap params for use in model -- required
    # decoder = pyro.module("decoder", pt_decode)

    # sample from prior
    z_mu, z_sigma = Variable(torch.zeros(
        [1, 20])), Variable(torch.ones([1, 20]))

    # sample
    z = pyro.sample("latent", DiagNormal(z_mu, z_sigma))

    alpha = Variable(torch.ones([1, 10]) / 10.)

    if cll.data.cpu().numpy() is None:
        bb()
        cll = pyro.sample('class', Categorical(alpha))
        print('sampling class')

    # decode into size of imgx1 for mu
    img_mu = pt_decode.forward(z, cll)
    # bb()
    # img=Bernoulli(img_mu).sample()
    # score against actual images
    img = pyro.sample("sample", Bernoulli(img_mu))
    # return img
    return img, img_mu
Пример #26
0
def guide_latent2(data):
    encoder_c = pyro.module("encoder_c", pt_encode_c)
    alpha = encoder_c.forward(data)
    cll = pyro.sample("latent_class", Categorical(alpha))

    encoder = pyro.module("encoder_o", pt_encode_o)
    z_mu, z_sigma = encoder.forward(data, cll)
    z = pyro.sample("latent_z", DiagNormal(z_mu, z_sigma))
    def step(self, action):
        assert self.__time >= 0
        assert 0 <= self.state < self.state_space.n

        if not 0 <= action < self.action_space.n:
            raise ValueError(
                f'Action should be an integer in {{0, ..., {self.action_space.n}}}'
            )

        if self.done is None or self.__time is None:
            raise InternalStateError(
                'The environment must be reset before being used')

        if self.done:
            raise InternalStateError(
                'The previous episode has ended and the environment must reset'
            )

        self.__time += 1

        state_next_dist = Categorical(self.T[self.state, action])
        state_next = pyro.sample(f'S_{self.__time}', state_next_dist)

        reward_dist = Delta(self.R[self.confounder, self.state, action,
                                   state_next])
        reward = pyro.sample(f'R_{self.__time}', reward_dist)

        if self.episodic:
            done = self.D[self.state, action]
        else:
            done = torch.tensor(False)

        done_probs = one_hot(done.long(), 2).float()
        done_dist = Categorical(done_probs)
        done = pyro.sample(f'D_{self.__time}', done_dist)

        info = {
            'T': self.T[self.state, action],
            'R': self.R[self.confounder, self.state, action],
        }

        self.state = state_next
        self.action_prev = action
        self.reward_prev = reward

        return state_next, reward, done, info
Пример #28
0
def guide_latent(data, cll):
    encoder_x = pyro.module("encoder_x", pt_encode_x)
    encoder_z = pyro.module("encoder_z", pt_encode_z)

    z_mu, z_sigma = encoder_x.forward(data)
    z = pyro.sample("latent_z", DiagNormal(z_mu, z_sigma))
    alpha_cat = encoder_z.forward(z)
    pyro.sample("latent_class", Categorical(alpha_cat))
Пример #29
0
def local_model(i, datum):
    beta = Variable(torch.ones(1, 10)) * 0.1
    cll = pyro.sample("class_of_datum_" + str(i), Categorical(beta))
    mean_param = Variable(torch.zeros(1, 784), requires_grad=True)
    # do MLE for class means
    mu = pyro.param("mean_of_class_" + str(cll[0]), mean_param)
    pyro.observe("obs_" + str(i), Bernoulli(mu), datum)
    return cll
Пример #30
0
def model_latent(data):
    decoder_c = pyro.module("decoder_c", pt_decode_c)
    decoder_z = pyro.module("decoder_z", pt_decode_z)
    alpha = Variable(torch.ones([data.size(0), 10])) / 10.
    cll = pyro.sample('latent_class', Categorical(alpha))
    z_mu, z_sigma = decoder_c.forward(cll)
    z = pyro.sample("latent_z", DiagNormal(z_mu, z_sigma))
    img_mu = decoder_z.forward(z)
    pyro.observe("obs", Bernoulli(img_mu), data.view(-1, 784))
Пример #31
0
 def model(self, x, y):
     priors = {
         name: make_normal_prior(p)
         for name, p in self.net.named_parameters()
     }
     lifted_module = pyro.random_module("module", self.net, priors)
     lifted_reg_model = lifted_module()
     lhat = F.log_softmax(lifted_reg_model(x), dim=1)
     pyro.sample("y", Categorical(logits=lhat), obs=y)