def guide(): p = pyro.param("p", torch.tensor(0.5, requires_grad=True)) outer_irange = pyro.irange("irange_0", 3, subsample_size) inner_irange = pyro.irange("irange_1", 3, subsample_size) for j in inner_irange: for i in outer_irange: pyro.sample("x_{}_{}".format(i, j), dist.Bernoulli(p))
def model(means, stds): a_irange = pyro.irange("a", len(means), mean_batch_size) b_irange = pyro.irange("b", len(stds), std_batch_size) return [[ pyro.sample("x_{}{}".format(i, j), dist.Normal(means[i], stds[j])) for j in b_irange ] for i in a_irange]
def model(): p = torch.tensor(0.5) outer_irange = pyro.irange("irange_0", 3, subsample_size) inner_irange = pyro.irange("irange_1", 3, subsample_size) for i in outer_irange: for j in inner_irange: pyro.sample("x_{}_{}".format(i, j), dist.Bernoulli(p))
def model(): with pyro.iarange("particles", num_particles): pyro.sample("x", dist.Bernoulli(p).expand_by([num_particles])) inner_irange = pyro.irange("inner", outer_dim) for i in pyro.irange("outer", inner_dim): pyro.sample("y_{}".format(i), dist.Bernoulli(p).expand_by([num_particles])) for j in inner_irange: pyro.sample("z_{}_{}".format(i, j), dist.Bernoulli(p).expand_by([num_particles]))
def model(batch_size_outer=2, batch_size_inner=2): data = [[torch.ones(1)] * 2] * 2 loc_latent = pyro.sample( "loc_latent", dist.Normal(torch.zeros(1), torch.ones(1))) for i in pyro.irange("irange_outer", 2, batch_size_outer): for j in pyro.irange("irange_inner_%d" % i, 2, batch_size_inner): pyro.sample( "z_%d_%d" % (i, j), dist.Normal(loc_latent + data[i][j], torch.ones(1)))
def nested_irange_model(subsample_size): mu = Variable(torch.zeros(20)) sigma = Variable(torch.ones(20)) result = [] for i in pyro.irange("outer", 20, subsample_size): result.append([]) for j in pyro.irange("inner", 20, 5): pyro.sample("x_{}_{}".format(i, j), dist.normal, mu[i] + mu[j], sigma[i] + sigma[j]) result[-1].append(j) return result
def model(self, data): ''' The generative distribution ''' pyro.module("decoder", self.decoder) # sample all the priors simulaneously with pyro.iarange("score_sample", len(self.vocab)): z = pyro.sample(f'latent_scores', dist.Dirichlet(self.alpha_prior), ) datasets = data.source.unique() # loop through the datasets for i in pyro.irange("data_loop", len(datasets)): dataset = datasets[i] subset = data.loc[data.source == dataset] sent = torch.tensor(subset.sent.values.tolist(), dtype=torch.float) if len(sent.shape) == 1: sent = sent.unsqueeze(-1) z_word = z[subset.word_id.values] rho = self.decoder.forward(z_word, dataset) if dataset in ['mpqa', 'huliu', 'general_inquirer']: pyro.sample(f"obs_{dataset}", dist.Bernoulli(rho), obs=sent) if dataset == 'vader': if self.vader_multinomial: pyro.sample( f"obs_{dataset}", dist.Multinomial(probs=rho, total_count=10), obs=sent, ) else: n = rho.size(0) batch = n // 20 for j in pyro.irange("vader_chunks", 20): pyro.sample( f"obs_{dataset}_{j}", dist.Categorical(rho[j*batch:(j+1)*batch,:]), obs=sent + 4. ) if dataset == 'senticnet': loc, scale = rho pyro.sample(f"obs_{dataset}", dist.Normal(loc, scale), obs=sent) if dataset == 'sentiwordnet': loc, scale = rho pyro.sample( f"obs_{dataset}", dist.MultivariateNormal(loc, scale), obs=sent )
def nested_irange_model(subsample_size): loc = torch.zeros(20) scale = torch.ones(20) result = [] inner_irange = pyro.irange("inner", 20, 5) for i in pyro.irange("outer", 20, subsample_size): result.append([]) for j in inner_irange: pyro.sample("x_{}_{}".format(i, j), dist.Normal(loc[i] + loc[j], scale[i] + scale[j])) result[-1].append(j) return result
def model(): p = Variable(torch.Tensor([0.5])) for i in pyro.irange("irange0", 2): pyro.sample("x_%d" % i, dist.bernoulli, p) if i == 0: for j in pyro.irange("irange1", 2): with pyro.iarange("iarange1", 10, 5) as ind: pyro.sample("y_%d" % j, dist.bernoulli, p, batch_size=len(ind)) elif i == 1: with pyro.iarange("iarange1", 10, 5) as ind: pyro.sample("z", dist.bernoulli, p, batch_size=len(ind))
def guide(): p = pyro.param("p", Variable(torch.Tensor([0.5]), requires_grad=True)) for i in pyro.irange("irange0", 2): pyro.sample("x_%d" % i, dist.bernoulli, p) if i == 0: for j in pyro.irange("irange1", 2): with pyro.iarange("iarange1", 10, 5) as ind: pyro.sample("y_%d" % j, dist.bernoulli, p, batch_size=len(ind)) elif i == 1: with pyro.iarange("iarange1", 10, 5) as ind: pyro.sample("z", dist.bernoulli, p, batch_size=len(ind))
def model(): pyro.sample("w", dist.Bernoulli(0.5), infer={'enumerate': 'parallel'}) inner_iarange = pyro.iarange("iarange", 10, 5) for i in pyro.irange("irange1", 2): with inner_iarange: pyro.sample("x_{}".format(i), dist.Bernoulli(0.5).expand_by([5]), infer={'enumerate': enumerate_}) for i in pyro.irange("irange2", 2): pyro.sample("y_{}".format(i), dist.Bernoulli(0.5))
def guide(): q = pyro.param("q") with pyro.iarange("particles", num_particles): pyro.sample("x", dist.Bernoulli(q).expand_by([num_particles]), infer={"enumerate": enumerate1}) inner_irange = pyro.irange("inner", inner_dim) for i in pyro.irange("outer", outer_dim): pyro.sample("y_{}".format(i), dist.Bernoulli(q).expand_by([num_particles]), infer={"enumerate": enumerate2}) for j in inner_irange: pyro.sample("z_{}_{}".format(i, j), dist.Bernoulli(q).expand_by([num_particles]), infer={"enumerate": enumerate3})
def guide(): p = pyro.param("p", torch.tensor(0.5, requires_grad=True)) inner_iarange = pyro.iarange("iarange1", 10, 5) for i in pyro.irange("irange0", 2): pyro.sample("x_%d" % i, dist.Bernoulli(p)) if i == 0: for j in pyro.irange("irange1", 2): with inner_iarange as ind: pyro.sample("y_%d" % j, dist.Bernoulli(p).expand_by([len(ind)])) elif i == 1: with inner_iarange as ind: pyro.sample("z", dist.Bernoulli(p).expand_by([len(ind)]))
def model(): p = torch.tensor(0.5) inner_iarange = pyro.iarange("iarange1", 10, 5) for i in pyro.irange("irange0", 2): pyro.sample("x_%d" % i, dist.Bernoulli(p)) if i == 0: for j in pyro.irange("irange1", 2): with inner_iarange as ind: pyro.sample("y_%d" % j, dist.Bernoulli(p).expand_by([len(ind)])) elif i == 1: with inner_iarange as ind: pyro.sample("z", dist.Bernoulli(p).expand_by([len(ind)]))
def model(): loc_latent = pyro.sample( "loc_latent", fakes.NonreparameterizedNormal(self.loc0, torch.pow(self.lam0, -0.5)).independent(1)) for i in pyro.irange("outer", self.n_outer): for j in pyro.irange("inner_%d" % i, self.n_inner): pyro.sample("obs_%d_%d" % (i, j), dist.Normal(loc_latent, torch.pow(self.lam, -0.5)).independent(1), obs=self.data[i][j])
def diamond_guide(dim): p0 = torch.tensor(math.exp(-0.70), requires_grad=True) p1 = torch.tensor(math.exp(-0.43), requires_grad=True) pyro.sample("a1", dist.Bernoulli(p0)) for i in pyro.irange("irange", dim): pyro.sample("b{}".format(i), dist.Bernoulli(p1)) pyro.sample("c1", dist.Bernoulli(p0))
def model(): loc_latent = pyro.sample( "loc_latent", fakes.NonreparameterizedNormal(self.loc0, torch.pow(self.lam0, -0.5)).independent(1)) for i in pyro.irange("outer", 3): x_i = self.data_as_list[i] with pyro.iarange("inner_%d" % i, x_i.size(0)): for k in range(n_superfluous_top): z_i_k = pyro.sample( "z_%d_%d" % (i, k), fakes.NonreparameterizedNormal(0, 1).expand_by( [4 - i])) assert z_i_k.shape == (4 - i, ) obs_i = pyro.sample("obs_%d" % i, dist.Normal( loc_latent, torch.pow(self.lam, -0.5)).independent(1), obs=x_i) assert obs_i.shape == (4 - i, 2) for k in range(n_superfluous_top, n_superfluous_top + n_superfluous_bottom): z_i_k = pyro.sample( "z_%d_%d" % (i, k), fakes.NonreparameterizedNormal(0, 1).expand_by( [4 - i])) assert z_i_k.shape == (4 - i, )
def guide(): loc_q = pyro.param( "loc_q", torch.tensor(self.analytic_loc_n.expand(2) + 0.094, requires_grad=True)) log_sig_q = pyro.param( "log_sig_q", torch.tensor(self.analytic_log_sig_n.expand(2) - 0.07, requires_grad=True)) sig_q = torch.exp(log_sig_q) trivial_baseline = pyro.module("loc_baseline", pt_loc_baseline) baseline_value = trivial_baseline(torch.ones(1)).squeeze() loc_latent = pyro.sample( "loc_latent", fakes.NonreparameterizedNormal(loc_q, sig_q).independent(1), infer=dict(baseline=dict(baseline_value=baseline_value))) for i in pyro.irange("outer", 3): with pyro.iarange("inner_%d" % i, 4 - i): for k in range(n_superfluous_top + n_superfluous_bottom): z_baseline = pyro.module( "z_baseline_%d_%d" % (i, k), pt_superfluous_baselines[3 * k + i]) baseline_value = z_baseline(loc_latent.detach()) mean_i = pyro.param( "mean_%d_%d" % (i, k), torch.tensor(0.5 * torch.ones(4 - i), requires_grad=True)) z_i_k = pyro.sample( "z_%d_%d" % (i, k), fakes.NonreparameterizedNormal(mean_i, 1), infer=dict(baseline=dict( baseline_value=baseline_value))) assert z_i_k.shape == (4 - i, )
def guide(self, data): ''' The variational distribution ''' pyro.module("encoder", self.encoder) # These betas are learned in training self.betas = torch.zeros((len(self.vocab), self.latent_dim)) + self.smoothing # encode the sentiment scores datasets = data.source.unique() for i in pyro.irange("data_loop", len(datasets)): dataset = datasets[i] subset = data.loc[data.source == dataset] sent = torch.tensor(subset.sent.values.tolist(), dtype=torch.float) if len(sent.shape) == 1: sent = sent.unsqueeze(-1) # sum the omegas self.betas[subset.word_id.values] += self.encoder.forward(sent, dataset) with pyro.iarange("score_sample", len(self.vocab)): pyro.sample( f"latent_scores", dist.Dirichlet(self.betas) )
def guide(): q = pyro.param("q") pyro.sample("x", dist.Bernoulli(q), infer={"enumerate": enumerate1}) for i in pyro.irange("irange", irange_dim): pyro.sample("y_{}".format(i), dist.Bernoulli(q), infer={"enumerate": enumerate2})
def model(corpus): global counter dhWeights = pyro.sample("dhWeights", dhWeights_Prior) #Variable(torch.FloatTensor([0.0] * len(itos_deps)), requires_grad=True) distanceWeights = pyro.sample("distanceWeights", distanceWeights_Prior) #Variable(torch.FloatTensor([0.0] * len(itos_deps)), requires_grad=True) for q in pyro.irange("data_loop", corpus.length(), subsample_size=5, use_cuda=False): point = corpus.getSentence(q) current = [point] counter += 1 printHere = (counter % 100 == 0) batchOrderedLogits = zip(*map(lambda (y,x):orderSentence(x, dhLogits, y % batchSize==0 and printHere, dhWeights, distanceWeights), zip(range(len(current)),current))) batchOrdered = batchOrderedLogits[0] lengths = map(len, current) maxLength = lengths[int(0.8*batchSize)] assert batchSize == 1 if printHere: print "BACKWARD 3 "+__file__+" "+language+" "+str(myID)+" "+str(counter) logitCorr = batchOrdered[0][-1]["relevant_logprob_sum"] pyro.sample("result_Correct_{}".format(q), Bernoulli(logits=logitCorr), obs=Variable(torch.FloatTensor([1.0])))
def guide(): p = pyro.param("p", torch.tensor(0.5, requires_grad=True)) inner_iarange = pyro.iarange("iarange", 3, 2) for i in pyro.irange("irange", 3, 2): with inner_iarange as ind: pyro.sample("x_{}".format(i), dist.Bernoulli(p).expand_by([len(ind)]))
def gmm_guide(data, verbose=False): for i in pyro.irange("data", len(data)): p = pyro.param("p_{}".format(i), torch.tensor(0.6, requires_grad=True)) z = pyro.sample("z_{}".format(i), dist.Bernoulli(p)) z = z.long() if verbose: logger.debug("G{} z_{} = {}".format(" " * i, i, z.cpu().numpy()))
def model(): p = torch.tensor(0.5) inner_iarange = pyro.iarange("iarange", 3, 2) for i in pyro.irange("irange", 3, 2): with inner_iarange as ind: pyro.sample("x_{}".format(i), dist.Bernoulli(p).expand_by([len(ind)]))
def model(policies): policies = pyro.param("policies", policies) for i in range(5): n = rounds[str(i)] for j in pyro.irange('data', 8, subsample_size=n): action = move(policies[i][j], i) pyro.sample('obs_'.format(i), dist.Bernoulli(policies[i, j]), obs=action)
def irange_model(subsample_size): loc = torch.zeros(20) scale = torch.ones(20) result = [] for i in pyro.irange('irange', 20, subsample_size): pyro.sample("x_{}".format(i), dist.Normal(loc[i], scale[i])) result.append(i) return result
def irange_model(subsample_size): mu = Variable(torch.zeros(20)) sigma = Variable(torch.ones(20)) result = [] for i in pyro.irange('irange', 20, subsample_size): pyro.sample("x_{}".format(i), dist.normal, mu[i], sigma[i]) result.append(i) return result
def guide(): p = pyro.param("p", Variable(torch.Tensor([0.5]), requires_grad=True)) for i in pyro.irange("irange", 10, 5): with pyro.iarange("iarange", 10, 5) as ind: pyro.sample("x_{}".format(i), dist.bernoulli, p, batch_size=len(ind))
def guide(): q = pyro.param("q") with pyro.iarange("particles", num_particles): pyro.sample("x", dist.Bernoulli(q).expand_by([num_particles]), infer={"enumerate": enumerate1}) for i in pyro.irange("irange", irange_dim): pyro.sample("y_{}".format(i), dist.Bernoulli(q).expand_by([num_particles]), infer={"enumerate": enumerate2})
def gmm_guide(data, verbose=False): for i in pyro.irange("data", len(data)): p = pyro.param("p_{}".format(i), Variable(torch.Tensor([0.6]), requires_grad=True)) z = pyro.sample("z_{}".format(i), dist.Bernoulli(p)) assert z.size() == (1,) z = z.long().data[0] if verbose: print("G{} z_{} = {}".format(" " * i, i, z))
def model(): p = Variable(torch.Tensor([0.5])) for i in pyro.irange("irange", 10, 5): with pyro.iarange("iarange", 10, 5) as ind: pyro.sample("x_{}".format(i), dist.bernoulli, p, batch_size=len(ind))
def diamond_model(dim): p0 = torch.tensor(math.exp(-0.20), requires_grad=True) p1 = torch.tensor(math.exp(-0.33), requires_grad=True) pyro.sample("a1", dist.Bernoulli(p0)) pyro.sample("c1", dist.Bernoulli(p1)) for i in pyro.irange("irange", 2): b_i = pyro.sample("b{}".format(i), dist.Bernoulli(p0 * p1)) assert b_i.shape == () pyro.sample("obs", dist.Bernoulli(p0), obs=torch.tensor(1.0))
def gmm_model(data, verbose=False): p = pyro.param("p", torch.tensor(0.3, requires_grad=True)) scale = pyro.param("scale", torch.tensor(1.0, requires_grad=True)) mus = torch.tensor([-1.0, 1.0]) for i in pyro.irange("data", len(data)): z = pyro.sample("z_{}".format(i), dist.Bernoulli(p)) z = z.long() if verbose: logger.debug("M{} z_{} = {}".format(" " * i, i, z.cpu().numpy())) pyro.sample("x_{}".format(i), dist.Normal(mus[z], scale), obs=data[i])
def gmm_model(data, verbose=False): p = pyro.param("p", Variable(torch.Tensor([0.3]), requires_grad=True)) sigma = pyro.param("sigma", Variable(torch.Tensor([1.0]), requires_grad=True)) mus = Variable(torch.Tensor([-1, 1])) for i in pyro.irange("data", len(data)): z = pyro.sample("z_{}".format(i), dist.Bernoulli(p)) assert z.size() == (1,) z = z.long().data[0] if verbose: print("M{} z_{} = {}".format(" " * i, i, z)) pyro.observe("x_{}".format(i), dist.Normal(mus[z], sigma), data[i])
def nested_model_guide(include_obs=True, dim1=11, dim2=7): p0 = torch.tensor(math.exp(-0.40 - include_obs * 0.2), requires_grad=True) p1 = torch.tensor(math.exp(-0.33 - include_obs * 0.1), requires_grad=True) pyro.sample("a1", dist.Bernoulli(p0 * p1)) for i in pyro.irange("irange", dim1): pyro.sample("b{}".format(i), dist.Bernoulli(p0)) with pyro.iarange("iarange_{}".format(i), dim2 + i) as ind: c_i = pyro.sample("c{}".format(i), dist.Bernoulli(p1).expand_by([len(ind)])) assert c_i.shape == (dim2 + i,) if include_obs: obs_i = pyro.sample("obs{}".format(i), dist.Bernoulli(c_i), obs=torch.ones(c_i.size())) assert obs_i.shape == (dim2 + i,)
def model(): x = pyro.sample("x", dist.Normal(0, 1)) assert x.shape == () for i in pyro.irange("irange", 3): y = pyro.sample("y_{}".format(i), dist.Normal(0, 1).expand_by([2, 1 + i, 2]).independent(3)) assert y.shape == (2, 1 + i, 2) z = pyro.sample("z", dist.Normal(0, 1).expand_by([2]).independent(1)) assert z.shape == (2,) pyro.sample("obs", dist.Bernoulli(0.1), obs=torch.tensor(0))
def guide(): loc_q = pyro.param("loc_q", torch.tensor( analytic_loc_n.data + torch.tensor([-0.18, 0.23]), requires_grad=True)) log_sig_q = pyro.param("log_sig_q", torch.tensor( analytic_log_sig_n.data - torch.tensor([-0.18, 0.23]), requires_grad=True)) sig_q = torch.exp(log_sig_q) pyro.sample("loc_latent", dist.Normal(loc_q, sig_q).independent(1)) if map_type == "irange" or map_type is None: for i in pyro.irange("aaa", len(data), batch_size): pass elif map_type == "iarange": # dummy iarange to do subsampling for observe with pyro.iarange("aaa", len(data), batch_size): pass else: pass
def model(): loc_latent = pyro.sample("loc_latent", dist.Normal(loc0, torch.pow(lam0, -0.5)).independent(1)) if map_type == "irange": for i in pyro.irange("aaa", len(data), batch_size): pyro.sample("obs_%d" % i, dist.Normal(loc_latent, torch.pow(lam, -0.5)) .independent(1), obs=data[i]), elif map_type == "iarange": with pyro.iarange("aaa", len(data), batch_size) as ind: pyro.sample("obs", dist.Normal(loc_latent, torch.pow(lam, -0.5)) .independent(1), obs=data[ind]), else: for i, x in enumerate(data): pyro.sample('obs_%d' % i, dist.Normal(loc_latent, torch.pow(lam, -0.5)) .independent(1), obs=x) return loc_latent
def model(): with pyro.iarange("particles", num_particles): pyro.sample("x", dist.Bernoulli(p).expand_by([num_particles])) for i in pyro.irange("irange", irange_dim): pyro.sample("y_{}".format(i), dist.Bernoulli(p).expand_by([num_particles]))
def irange_cuda_model(subsample_size): loc = torch.zeros(20).cuda() scale = torch.ones(20).cuda() for i in pyro.irange("data", 20, subsample_size, use_cuda=True): pyro.sample("x_{}".format(i), dist.Normal(loc[i], scale[i]))
def model(means, stds): a_irange = pyro.irange("a", len(means), mean_batch_size) b_irange = pyro.irange("b", len(stds), std_batch_size) return [[pyro.sample("x_{}{}".format(i, j), dist.Normal(means[i], stds[j])) for j in b_irange] for i in a_irange]
def guide(): p = pyro.param("p", torch.tensor(0.5, requires_grad=True)) for i in pyro.irange("irange", 10, subsample_size): pass pyro.sample("x", dist.Bernoulli(p))
def irange_custom_model(subsample): result = [] for i in pyro.irange('irange', 20, subsample=subsample): result.append(i) return result
def guide(): p = pyro.param("p", torch.tensor(0.5, requires_grad=True)) for i in pyro.irange("irange", 10, 5): pyro.sample("x_{}".format(i), dist.Bernoulli(p))
def model(batch_size_outer=2, batch_size_inner=2): data = [[torch.ones(1)] * 2] * 2 loc_latent = pyro.sample("loc_latent", dist.Normal(torch.zeros(1), torch.ones(1))) for i in pyro.irange("irange_outer", 2, batch_size_outer): for j in pyro.irange("irange_inner_%d" % i, 2, batch_size_inner): pyro.sample("z_%d_%d" % (i, j), dist.Normal(loc_latent + data[i][j], torch.ones(1)))
def model(): p = torch.tensor(0.5) for i in pyro.irange("irange", 10, 5): pyro.sample("x_{}".format(i), dist.Bernoulli(p))