def normal_normal_model(data): x = torch.tensor([0.0]) y = pyro.sample('y', dist.Normal(x, torch.ones(data.shape))) pyro.sample('obs', dist.Normal(y, torch.tensor([1.0])), obs=data) return y
def guide(data): guide_loc = pyro.param("guide_loc", torch.tensor(0.)) guide_scale = pyro.param("guide_scale_log", torch.tensor(0.)).exp() pyro.sample("loc", dist.Normal(guide_loc, guide_scale))
def __init__(self, in_features, out_features): super().__init__() self.linear = PyroModule[nn.Linear](in_features, out_features) self.linear.weight = PyroSample(dist.Normal(0., 1.).expand([out_features, in_features]).to_event(2)) self.linear.bias = PyroSample(dist.Normal(0., 10.).expand([out_features]).to_event(1))
def model(): pyro.sample("x", dist.Normal(-0.2, 1.2)) pyro.sample("y", dist.Normal(0.2, 0.7))
def scale(guess): weight = pyro.sample("weight", dist.Normal(guess, 1.0)) return pyro.sample("measurement", dist.Normal(weight, 0.75))
def test_observe_warn(): with pytest.warns(RuntimeWarning): pyro.sample("x", dist.Normal(0, 1), obs=torch.tensor(0.))
def model(): with pyro.plate("J", size, subsample_size=subsample_size): pyro.sample("x", dist.Normal(0, 1))
def model(num_particles): p = pyro.param("p") with pyro.plate("num_particles", num_particles, dim=-2): z = pyro.sample("z", dist.Bernoulli(p)) with pyro.plate("data", 3): pyro.sample("x", dist.Normal(z, 1.), obs=data)
def model(): x = pyro.sample("x", dist.Categorical(torch.ones(3))) with pyro.plate("data", len(data)): pyro.sample("obs", dist.Normal(x.float(), 1), obs=data)
def model(data, params): # initialize data N = data["N"] n_age = data["n_age"] n_eth = data["n_eth"] age = data["age"].long() - 1 eth = data["eth"].long() - 1 x = data["x"] y = data["y"] # init parameters sigma_a1 = params["sigma_a1"] sigma_a2 = params["sigma_a2"] sigma_b1 = params["sigma_b1"] sigma_b2 = params["sigma_b2"] sigma_c = params["sigma_c"] sigma_d = params["sigma_d"] sigma_y = params["sigma_y"] mu_a1 = pyro.sample('mu_a1', dist.Normal(0., 1.)) mu_a2 = pyro.sample('mu_a2', dist.Normal(0., 1.)) mu_b1 = pyro.sample('mu_b1', dist.Normal(0., 1.)) mu_b2 = pyro.sample('mu_b2', dist.Normal(0., 1.)) mu_c = pyro.sample('mu_c', dist.Normal(0., 1.)) mu_d = pyro.sample('mu_d', dist.Normal(0., 1.)) plate_a = pyro.plate("as", n_eth, dim=-2) plate_b = pyro.plate("bs", n_age, dim=-1) with plate_a: a1 = pyro.sample('a1', dist.Normal(10 * mu_a1, sigma_a1)) a2 = pyro.sample('a2', dist.Normal(10 * mu_a2, sigma_a2)) with plate_b: b1 = pyro.sample('b1', dist.Normal(10 * mu_b1, sigma_b1)) b2 = pyro.sample('b2', dist.Normal(0.1 * mu_b2, sigma_b2)) with plate_a, plate_b: c = pyro.sample('c', dist.Normal(10. * mu_c, sigma_c)) d = pyro.sample('d', dist.Normal(0.1 * mu_d, sigma_d)) with pyro.plate("data", N): y_hat = a1[..., eth, :].squeeze(-1) + a2[..., eth, :].squeeze(-1) * x + b1[..., age].squeeze() + b2[..., age].squeeze() * \ x + c[..., eth, age] + d[..., eth, age] * x # A hack to make dimensions broadcast correctly when there is an IW plate if len(a1.size()) > 2: y_hat = y_hat.unsqueeze(-2) pyro.sample('y', dist.Normal(y_hat, sigma_y), obs=y)
def model(subsample_size): with pyro.iarange("data", len(data), subsample_size) as ind: x = data[ind] z = pyro.sample("z", dist.Normal(ng_zeros(len(x)), ng_ones(len(x)), reparameterized=reparameterized)) pyro.observe("x", dist.Normal(z, ng_ones(len(x)), reparameterized=reparameterized), x)
def model(data): mu = pyro.sample('mu', dist.Normal(0., 1.)) sigma = pyro.sample('sigma', dist.HalfCauchy(5.)) with pyro.plate('observe_data'): pyro.sample('obs', dist.Normal(mu, sigma), obs=data)
def linear(xes, yes): slope = pyro.sample("slope", dist.Normal(5, 10)) intercept = pyro.sample("intercept", dist.Normal(0, 10)) var = pyro.sample("var", dist.InverseGamma(3, 0.1)) x = slope * xes return slope
def forward(self, observations={"y1": 0, "y2": 0}): pyro.module("guide", self) summed_obs = observations["y1"] + observations["y2"] mean = self.linear(summed_obs.view(1, 1))[0, 0] pyro.sample("x", dist.Normal(mean, self.std))
def model(x, y): a = pyro.sample('a', dist.Normal(0., 5.)) b = pyro.sample('b', dist.Normal(0., 5.)) y = pyro.sample('y', dist.Normal(a * x + b, 1.), obs=y) return y
def model(): with pyro.plate_stack("plates", shape[:dim]): with pyro.plate("particles", 10000): pyro.sample( "x", dist.Normal(loc, scale).expand(shape).to_event(-dim))
def test_sample_ok(): x = pyro.sample("x", dist.Normal(0, 1)) assert isinstance(x, torch.Tensor) assert x.shape == ()
def get_posterior(self, *args, **kwargs): """ Returns a diagonal Normal posterior distribution. """ return dist.Normal(self.loc, self.scale).to_event(1)
def model_with_param(): x = pyro.param("x", torch.tensor(1.)) pyro.sample("y", dist.Normal(x, 1))
def perfect_guide(measurement=9.5): mean, std = true_mean_std(guess, measurement) return lambda guess: pyro.sample("weight", dist.Normal(mean, std))
def model(data): loc = pyro.param("loc", constant(0.0)) scale = pyro.param("scale", constant(1.0), constraint=constraints.positive) pyro.sample("x", dist.Normal(loc, scale).expand_by(data.shape).to_event(1), obs=data)
def perfect_intervention_guide(guess): return pyro.sample("weight", dist.Normal(guess, 1))
def init_vector(name, dims=None): return pyro.sample( name, dist.Normal(torch.zeros(dims), 0.2 * torch.ones(dims)).to_event(1))
def scale_parametrized_guide(guess): a = pyro.param("a", torch.tensor(1.)) # guess is a required param but you don't need to use it # a = pyro.param("a", torch.tensor(guess)) # not necessary to use the guess here b = pyro.param("b", torch.tensor(1.), constraint=constraints.positive) return pyro.sample("weight", dist.Normal(a, b))
def model(data): loc = pyro.sample("loc", dist.Normal(0., 1.)) with pyro.plate("data", len(data), dim=-1): pyro.sample("obs", dist.Normal(loc, 1.), obs=data)
def scale(guess): weight = pyro.sample("weight", dist.Normal(guess, 1.)) measure = pyro.sample("measure", dist.Normal(weight, 0.75)) return guess, weight, measure
def model(data): latent = named.Object("latent") latent.z.sample_(dist.Normal(0.0, 1.0)) model_recurse(data, latent)
def scale_obs(guess, measurement=9.5): # equivalent to conditioned_scale above weight = pyro.sample("weight", dist.Normal(guess, 1.)) # here we condition on measurement == 9.5 measure = pyro.sample("measure", dist.Normal(weight, 0.75), obs=measurement) return guess, weight, measure
def linear(a:Yaml, b:Yaml, x:Yaml): pyro.sample("y", dist.Normal(a + b*x, 1.0))
def model(prior_mean, observations={"x1": 0, "x2": 0}): x = pyro.sample("z", dist.Normal(prior_mean, torch.tensor(5**0.5))) y1 = pyro.sample("x1", dist.Normal(x, torch.tensor(2**0.5)), obs=observations["x1"]) y2 = pyro.sample("x2", dist.Normal(x, torch.tensor(2**0.5)), obs=observations["x2"]) return x