def test_guide_list(auto_class): def model(): pyro.sample("x", dist.Normal(0., 1.)) pyro.sample("y", dist.MultivariateNormal(torch.zeros(5), torch.eye(5, 5))) guide = AutoGuideList(model) guide.add(auto_class(poutine.block(model, expose=["x"]), prefix="auto_x")) guide.add(auto_class(poutine.block(model, expose=["y"]), prefix="auto_y")) guide()
def test_guide_list(auto_class): def model(): pyro.sample("x", dist.Normal(0., 1.).expand([2])) pyro.sample("y", dist.MultivariateNormal(torch.zeros(5), torch.eye(5, 5))) guide = AutoGuideList(model) guide.add(auto_class(poutine.block(model, expose=["x"]), prefix="auto_x")) guide.add(auto_class(poutine.block(model, expose=["y"]), prefix="auto_y")) guide()
def test_discrete_parallel(continuous_class): K = 2 data = torch.tensor([0., 1., 10., 11., 12.]) def model(data): weights = pyro.sample('weights', dist.Dirichlet(0.5 * torch.ones(K))) locs = pyro.sample('locs', dist.Normal(0, 10).expand_by([K]).independent(1)) scale = pyro.sample('scale', dist.LogNormal(0, 1)) with pyro.iarange('data', len(data)): weights = weights.expand(torch.Size((len(data),)) + weights.shape) assignment = pyro.sample('assignment', dist.Categorical(weights)) pyro.sample('obs', dist.Normal(locs[assignment], scale), obs=data) guide = AutoGuideList(model) guide.add(continuous_class(poutine.block(model, hide=["assignment"]))) guide.add(AutoDiscreteParallel(poutine.block(model, expose=["assignment"]))) elbo = TraceEnum_ELBO(max_iarange_nesting=1) loss = elbo.loss_and_grads(model, guide, data) assert np.isfinite(loss), loss
def test_callable(auto_class): def model(): pyro.sample("x", dist.Normal(0., 1.)) pyro.sample("y", dist.MultivariateNormal(torch.zeros(5), torch.eye(5, 5))) def guide_x(): x_loc = pyro.param("x_loc", torch.tensor(0.)) pyro.sample("x", dist.Delta(x_loc)) guide = AutoGuideList(model) guide.add(guide_x) guide.add(auto_class(poutine.block(model, expose=["y"]), prefix="auto_y")) values = guide() assert set(values) == set(["y"])
def auto_guide_callable(model): def guide_x(): x_loc = pyro.param("x_loc", torch.tensor(1.)) x_scale = pyro.param("x_scale", torch.tensor(2.), constraint=constraints.positive) pyro.sample("x", dist.Normal(x_loc, x_scale)) def median_x(): return {"x": pyro.param("x_loc", torch.tensor(1.))} guide = AutoGuideList(model) guide.add(AutoCallable(model, guide_x, median_x)) guide.add(AutoDiagonalNormal(poutine.block(model, hide=["x"]))) return guide
def test_discrete_parallel(continuous_class): K = 2 data = torch.tensor([0., 1., 10., 11., 12.]) def model(data): weights = pyro.sample('weights', dist.Dirichlet(0.5 * torch.ones(K))) locs = pyro.sample('locs', dist.Normal(0, 10).expand_by([K]).to_event(1)) scale = pyro.sample('scale', dist.LogNormal(0, 1)) with pyro.plate('data', len(data)): weights = weights.expand(torch.Size((len(data), )) + weights.shape) assignment = pyro.sample('assignment', dist.Categorical(weights)) pyro.sample('obs', dist.Normal(locs[assignment], scale), obs=data) guide = AutoGuideList(model) guide.add(continuous_class(poutine.block(model, hide=["assignment"]))) guide.add(AutoDiscreteParallel(poutine.block(model, expose=["assignment"]))) elbo = TraceEnum_ELBO(max_plate_nesting=1) loss = elbo.loss_and_grads(model, guide, data) assert np.isfinite(loss), loss
def auto_guide_list_x(model): guide = AutoGuideList(model) guide.add(AutoDelta(poutine.block(model, expose=["x"]))) guide.add(AutoDiagonalNormal(poutine.block(model, hide=["x"]))) return guide
'beta_resp_loc', torch.randn(num_resp, len(mix_params), device=x.device)) beta_scale = pyro.param( 'beta_resp_scale', torch.tril( 1. * torch.eye(len(mix_params), len(mix_params), device=x.device)), constraint=constraints.lower_cholesky) pyro.sample( "beta_resp", dist.MultivariateNormal(beta_loc, scale_tril=beta_scale).to_event(1)) # In[11]: guide = AutoGuideList(model) guide.add(AutoDiagonalNormal(poutine.block(model, expose=['theta', 'L_omega']))) guide.add(my_local_guide) # automatically wrapped in an AutoCallable # # Run variational inference # In[12]: # prepare data for running inference train_x = torch.tensor(alt_attributes, dtype=torch.float) train_x = train_x.cuda() train_y = torch.tensor(true_choices, dtype=torch.int) train_y = train_y.cuda() alt_av_cuda = torch.from_numpy(alt_availability) alt_av_cuda = alt_av_cuda.cuda()