num_samples = 5 model.fit_mcmc(warmup_steps=1, num_samples=num_samples, max_tree_depth=2, **options) # Predict and forecast. samples = model.predict(forecast=forecast) assert samples["S"].shape == (num_samples, duration + forecast) assert samples["I"].shape == (num_samples, duration + forecast) assert samples["beta"].shape == (num_samples, duration + forecast) @pytest.mark.parametrize("duration", [4, 12]) @pytest.mark.parametrize("forecast", [7]) @pytest.mark.parametrize( "options", [ xfail_param({}, reason="Delta is incompatible with relaxed inference"), {"num_quant_bins": 2}, {"num_quant_bins": 2, "haar": False}, {"num_quant_bins": 2, "haar_full_mass": 0}, {"num_quant_bins": 4}, ], ids=str, ) def test_sparse_smoke(duration, forecast, options): population = 100 recovery_time = 7.0 # Generate data. data = [None] * duration mask = torch.arange(duration) % 4 == 3 model = SparseSIRModel(population, recovery_time, data, mask)
expected_grads = {'loc': np.array([0.5, -2.0]), 'scale': np.array([2.0])} for name in sorted(params): logger.info('expected {} = {}'.format(name, expected_grads[name])) logger.info('actual {} = {}'.format(name, actual_grads[name])) assert_equal(actual_grads, expected_grads, prec=precision) @pytest.mark.parametrize("reparameterized", [True, False], ids=["reparam", "nonreparam"]) @pytest.mark.parametrize("subsample", [False, True], ids=["full", "subsample"]) @pytest.mark.parametrize("Elbo", [ Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceMeanField_ELBO, xfail_param(JitTrace_ELBO, reason="in broadcast_all: RuntimeError: expected int at position 0, but got: Tensor"), xfail_param(JitTraceGraph_ELBO, reason="in broadcast_all: RuntimeError: expected int at position 0, but got: Tensor"), xfail_param(JitTraceEnum_ELBO, reason="in broadcast_all: RuntimeError: expected int at position 0, but got: Tensor"), xfail_param(JitTraceMeanField_ELBO, reason="in broadcast_all: RuntimeError: expected int at position 0, but got: Tensor"), ]) def test_subsample_gradient_sequential(Elbo, reparameterized, subsample): pyro.clear_param_store() data = torch.tensor([-0.5, 2.0]) subsample_size = 1 if subsample else len(data) num_particles = 5000 precision = 0.333 Normal = dist.Normal if reparameterized else fakes.NonreparameterizedNormal
'contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=3 --jit', 'contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=4 --jit', 'contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=5 --jit', 'contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=6 --jit', 'contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=6 --jit --raftery-parameterization ', ] HOROVOD_EXAMPLES = [ 'svi_horovod.py --num-epochs=2 --size=400', pytest.param('svi_horovod.py --num-epochs=2 --size=400 --cuda', marks=[requires_cuda]), ] FUNSOR_EXAMPLES = [ xfail_param( 'contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=0 --funsor', reason="unreproducible recursion error on travis?"), 'contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=1 --funsor', 'contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=2 --funsor', 'contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=3 --funsor', 'contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=4 --funsor', 'contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=5 --funsor', 'contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=6 --funsor', 'contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=6 --raftery-parameterization --funsor', 'contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=6 --jit --funsor', 'contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=6 --jit --raftery-parameterization --funsor', xfail_param( 'contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=0 --tmc --tmc-num-samples=2 --funsor', reason="unreproducible recursion error on travis?"), 'contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=1 --tmc --tmc-num-samples=2 --funsor', 'contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=2 --tmc --tmc-num-samples=2 --funsor',
y = pyro.sample('y', dist.Normal(0, 3)) with pyro.plate('D', dim): pyro.sample('x', dist.Normal(0, torch.exp(y / 2))) def dirichlet_categorical(data): concentration = torch.tensor([1.0, 1.0, 1.0]) p_latent = pyro.sample('p', dist.Dirichlet(concentration)) with pyro.plate('N', data.shape[0]): pyro.sample('obs', dist.Categorical(p_latent), obs=data) return p_latent @pytest.mark.parametrize('jit', [ False, xfail_param(True, reason="https://github.com/pyro-ppl/pyro/issues/2292"), ]) def test_neals_funnel_smoke(jit): dim = 10 guide = AutoIAFNormal(neals_funnel) svi = SVI(neals_funnel, guide, optim.Adam({"lr": 1e-10}), Trace_ELBO()) for _ in range(1000): svi.step(dim) neutra = NeuTraReparam(guide.requires_grad_(False)) model = neutra.reparam(neals_funnel) nuts = NUTS(model, jit_compile=jit) mcmc = MCMC(nuts, num_samples=50, warmup_steps=50) mcmc.run(dim) samples = mcmc.get_samples()
pyro.sample("x", d) with context1, context2: pyro.sample("y", d) with context1, context3: pyro.sample("z", d) model_trace = poutine.trace(model).get_trace() print_debug_info(model_trace) trace_prob_evaluator = Eval(model_trace, True, 3) # all discrete sites enumerated out. assert_equal(trace_prob_evaluator.log_prob(model_trace), torch.tensor(0.)) @pytest.mark.parametrize("Eval", [ TraceTreeEvaluator, xfail_param(TraceEinsumEvaluator, reason="TODO: Debug this failure case.") ]) def test_enumeration_in_tree(Eval): @poutine.enum(first_available_dim=-5) @config_enumerate @poutine.condition( data={ "sample1": torch.tensor(0.), "sample2": torch.tensor(1.), "sample3": torch.tensor(2.) }) def model(): outer = pyro.plate("outer", 2, dim=-1) inner1 = pyro.plate("inner1", 2, dim=-3) inner2 = pyro.plate("inner2", 3, dim=-2) inner3 = pyro.plate("inner3", 2, dim=-4)
import pytest import six.moves.cPickle as pickle import torch import pyro.distributions as dist from pyro.distributions.torch_distribution import TorchDistributionMixin from tests.common import xfail_param # Collect distributions. BLACKLIST = [ dist.TorchDistribution, dist.ExponentialFamily, dist.OMTMultivariateNormal, ] XFAIL = { dist.Gumbel: xfail_param(dist.Gumbel, reason='cannot pickle weakref'), } DISTRIBUTIONS = [ d for d in dist.__dict__.values() if isinstance(d, type) if issubclass(d, TorchDistributionMixin) if d not in BLACKLIST ] DISTRIBUTIONS.sort(key=lambda d: d.__name__) DISTRIBUTIONS = [XFAIL.get(d, d) for d in DISTRIBUTIONS] # Provide default args if Dist(1, 1, ..., 1) is known to fail. ARGS = { dist.AVFMultivariateNormal: [torch.zeros(3), torch.eye(3), torch.rand(2, 4, 3)], dist.Bernoulli: [0.5], dist.Binomial: [2, 0.5],
scale = pyro.param("scale", torch.tensor(1.0)) with pyro.plate("plate_outer", data.size(-1), dim=-1): pyro.sample("x", dist.Normal(loc, scale)) with pyro_backend(backend): Elbo = infer.JitTrace_ELBO if jit else infer.Trace_ELBO elbo = Elbo(ignore_jit_warnings=True) assert_ok(model, guide, elbo) @pytest.mark.parametrize("jit", [False, True], ids=["py", "jit"]) @pytest.mark.parametrize( "backend", [ "pyro", xfail_param("minipyro", reason="not implemented"), ], ) def test_local_param_ok(backend, jit): data = torch.randn(10) def model(): locs = pyro.param("locs", torch.tensor([-1.0, 0.0, 1.0])) with pyro.plate("plate", len(data), dim=-1): x = pyro.sample("x", dist.Categorical(torch.ones(3) / 3)) pyro.sample("obs", dist.Normal(locs[x], 1.0), obs=data) def guide(): with pyro.plate("plate", len(data), dim=-1): p = pyro.param("p", torch.ones(len(data), 3) / 3, event_dim=1) pyro.sample("x", dist.Categorical(p))
actual_grad = q.unconstrained().grad / outer_particles assert_equal(actual_loss, expected_loss, prec=0.3, msg="".join([ "\nexpected loss = {}".format(expected_loss), "\n actual loss = {}".format(actual_loss), ])) assert_equal(actual_grad, expected_grad, prec=0.5, msg="".join([ "\nexpected grad = {}".format(expected_grad.detach().cpu().numpy()), "\n actual grad = {}".format(actual_grad.detach().cpu().numpy()), ])) @pytest.mark.parametrize('vectorized', [False, True]) @pytest.mark.parametrize('Elbo', [ TraceEnum_ELBO, xfail_param(JitTraceEnum_ELBO, reason="jit RuntimeError: Unsupported op descriptor: stack-2-dim_i"), ]) def test_beta_bernoulli(Elbo, vectorized): pyro.clear_param_store() data = torch.tensor([1.0] * 6 + [0.0] * 4) def model1(data): alpha0 = torch.tensor(10.0) beta0 = torch.tensor(10.0) f = pyro.sample("latent_fairness", dist.Beta(alpha0, beta0)) for i in pyro.irange("irange", len(data)): pyro.sample("obs_{}".format(i), dist.Bernoulli(f), obs=data[i]) def model2(data): alpha0 = torch.tensor(10.0) beta0 = torch.tensor(10.0)
'rsa/schelling.py --num-samples=10', 'rsa/schelling_false.py --num-samples=10', 'rsa/semantic_parsing.py --num-samples=10', 'sparse_gamma_def.py --num-epochs=2 --eval-particles=2 --eval-frequency=1', 'sparse_gamma_def.py --num-epochs=2 --eval-particles=2 --eval-frequency=1 --auto-guide', 'vae/ss_vae_M2.py --num-epochs=1', 'vae/ss_vae_M2.py --num-epochs=1 --aux-loss', 'vae/ss_vae_M2.py --num-epochs=1 --enum-discrete=parallel', 'vae/ss_vae_M2.py --num-epochs=1 --enum-discrete=sequential', 'vae/vae.py --num-epochs=1', 'vae/vae_comparison.py --num-epochs=1', ] CUDA_EXAMPLES = [ 'air/main.py --num-steps=1 --cuda', xfail_param('baseball.py --num-samples=200 --warmup-steps=100 --num-chains=2 --cuda', reason="https://github.com/pytorch/pytorch/issues/10375"), 'bayesian_regression.py --num-epochs=1 --cuda', 'contrib/gp/sv-dkl.py --epochs=1 --num-inducing=4 --cuda', 'dmm/dmm.py --num-epochs=1 --cuda', 'dmm/dmm.py --num-epochs=1 --num-iafs=1 --cuda', 'hmm.py --num-steps=1 --truncate=10 --model=0 --cuda', 'hmm.py --num-steps=1 --truncate=10 --model=1 --cuda', 'hmm.py --num-steps=1 --truncate=10 --model=2 --cuda', 'hmm.py --num-steps=1 --truncate=10 --model=3 --cuda', 'hmm.py --num-steps=1 --truncate=10 --model=4 --cuda', 'hmm.py --num-steps=1 --truncate=10 --model=5 --cuda', 'vae/vae.py --num-epochs=1 --cuda', 'vae/ss_vae_M2.py --num-epochs=1 --cuda', 'vae/ss_vae_M2.py --num-epochs=1 --aux-loss --cuda', 'vae/ss_vae_M2.py --num-epochs=1 --enum-discrete=parallel --cuda', 'vae/ss_vae_M2.py --num-epochs=1 --enum-discrete=sequential --cuda',
def model(): p = torch.tensor(0.5, requires_grad=True) with pyro.plate("plate_outer", 5, dim=plate_dims[0]): pyro.sample("x", dist.Bernoulli(p)) with pyro.plate("plate_inner_1", 6, dim=plate_dims[1]): pyro.sample("y", dist.Bernoulli(p)) with pyro.plate("plate_inner_2", 7, dim=plate_dims[2]): pyro.sample("z", dist.Bernoulli(p)) with pyro.plate("plate_inner_3", 8, dim=plate_dims[3]): pyro.sample("q", dist.Bernoulli(p)) assert_ok(model, max_plate_nesting=4) @pytest.mark.parametrize("tmc_strategy", [None, xfail_param("diagonal", reason="strategy not implemented yet")]) @pytest.mark.parametrize("subsampling", [False, True]) @pytest.mark.parametrize("reuse_plate", [False, True]) def test_enum_recycling_plate(subsampling, reuse_plate, tmc_strategy): @infer.config_enumerate(default="parallel", tmc=tmc_strategy, num_samples=2 if tmc_strategy else None) def model(): p = pyro.param("p", torch.ones(3, 3)) q = pyro.param("q", torch.tensor([0.5, 0.5])) plate_x = pyro.plate("plate_x", 4, subsample_size=3 if subsampling else None, dim=-1) plate_y = pyro.plate("plate_y", 5, subsample_size=3 if subsampling else None, dim=-1) plate_z = pyro.plate("plate_z", 6, subsample_size=3 if subsampling else None, dim=-2) a = pyro.sample("a", dist.Bernoulli(q[0])).long() w = 0 for i in pyro.markov(range(4)):
"contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=3 --jit", "contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=4 --jit", "contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=5 --jit", "contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=6 --jit", "contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=6 --jit --raftery-parameterization ", ] HOROVOD_EXAMPLES = [ "svi_horovod.py --num-epochs=2 --size=400", pytest.param("svi_horovod.py --num-epochs=2 --size=400 --cuda", marks=[requires_cuda]), ] FUNSOR_EXAMPLES = [ xfail_param( "contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=0 --funsor", reason="unreproducible recursion error on travis?", ), "contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=1 --funsor", "contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=2 --funsor", "contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=3 --funsor", "contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=4 --funsor", xfail_param( "contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=5 --funsor", reason="https://github.com/pyro-ppl/pyro/issues/3046", run=False, ), "contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=6 --funsor", "contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=6 --raftery-parameterization --funsor", "contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=6 --jit --funsor", "contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=6 --jit --raftery-parameterization --funsor", xfail_param(
assert_equal(actual_grad, expected_grad, prec=0.5, msg="".join([ "\nexpected grad = {}".format( expected_grad.detach().cpu().numpy()), "\n actual grad = {}".format( actual_grad.detach().cpu().numpy()), ])) @pytest.mark.parametrize('vectorized', [False, True]) @pytest.mark.parametrize('Elbo', [ TraceEnum_ELBO, xfail_param( JitTraceEnum_ELBO, reason="jit RuntimeError: Unsupported op descriptor: stack-2-dim_i"), ]) def test_beta_bernoulli(Elbo, vectorized): pyro.clear_param_store() data = torch.tensor([1.0] * 6 + [0.0] * 4) def model1(data): alpha0 = torch.tensor(10.0) beta0 = torch.tensor(10.0) f = pyro.sample("latent_fairness", dist.Beta(alpha0, beta0)) for i in pyro.irange("irange", len(data)): pyro.sample("obs_{}".format(i), dist.Bernoulli(f), obs=data[i]) def model2(data): alpha0 = torch.tensor(10.0)
for name, param in params.items()} expected_grads = {'loc': np.array([0.5, -2.0]), 'scale': np.array([2.0])} for name in sorted(params): logger.info('expected {} = {}'.format(name, expected_grads[name])) logger.info('actual {} = {}'.format(name, actual_grads[name])) assert_equal(actual_grads, expected_grads, prec=precision) @pytest.mark.parametrize("reparameterized", [True, False], ids=["reparam", "nonreparam"]) @pytest.mark.parametrize("subsample", [False, True], ids=["full", "subsample"]) @pytest.mark.parametrize("Elbo", [ Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, xfail_param(JitTrace_ELBO, reason="jit RuntimeError: Unsupported op descriptor: index-2"), xfail_param(JitTraceGraph_ELBO, reason="jit RuntimeError: Unsupported op descriptor: index-2"), xfail_param(JitTraceEnum_ELBO, reason="jit RuntimeError: Unsupported op descriptor: index-2"), ]) def test_subsample_gradient_sequential(Elbo, reparameterized, subsample): pyro.clear_param_store() data = torch.tensor([-0.5, 2.0]) subsample_size = 1 if subsample else len(data) num_particles = 5000 precision = 0.333 Normal = dist.Normal if reparameterized else fakes.NonreparameterizedNormal def model(): with pyro.iarange("data", len(data), subsample_size) as ind:
with pyro.plate("plate_outer", 5, dim=plate_dims[0]): pyro.sample("x", dist.Bernoulli(p)) with pyro.plate("plate_inner_1", 6, dim=plate_dims[1]): pyro.sample("y", dist.Bernoulli(p)) with pyro.plate("plate_inner_2", 7, dim=plate_dims[2]): pyro.sample("z", dist.Bernoulli(p)) with pyro.plate("plate_inner_3", 8, dim=plate_dims[3]): pyro.sample("q", dist.Bernoulli(p)) assert_ok(model, max_plate_nesting=4) @pytest.mark.parametrize( "tmc_strategy", [None, xfail_param("diagonal", reason="strategy not implemented yet")]) @pytest.mark.parametrize("subsampling", [False, True]) @pytest.mark.parametrize("reuse_plate", [False, True]) def test_enum_recycling_plate(subsampling, reuse_plate, tmc_strategy): @infer.config_enumerate(default="parallel", tmc=tmc_strategy, num_samples=2 if tmc_strategy else None) def model(): p = pyro.param("p", torch.ones(3, 3)) q = pyro.param("q", torch.tensor([0.5, 0.5])) plate_x = pyro.plate("plate_x", 4, subsample_size=3 if subsampling else None, dim=-1) plate_y = pyro.plate("plate_y", 5,
adam = optim.Adam({"lr": .0002, "betas": (0.97, 0.999)}) svi = SVI(model, guide, adam, loss) for k in range(n_steps): svi.step() assert_equal(pyro.param("alpha_q"), self.alpha_n, prec=0.2, msg='{} vs {}'.format( pyro.param("alpha_q").detach().cpu().numpy(), self.alpha_n.detach().cpu().numpy())) assert_equal(pyro.param("beta_q"), self.beta_n, prec=0.15, msg='{} vs {}'.format( pyro.param("beta_q").detach().cpu().numpy(), self.beta_n.detach().cpu().numpy())) @pytest.mark.stage("integration", "integration_batch_1") @pytest.mark.parametrize('elbo_impl', [ xfail_param(JitTrace_ELBO, reason="incorrect gradients", run=False), xfail_param(JitTraceGraph_ELBO, reason="incorrect gradients", run=False), xfail_param(JitTraceEnum_ELBO, reason="incorrect gradients", run=False), Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, RenyiELBO, ]) @pytest.mark.parametrize('gamma_dist,n_steps', [ (dist.Gamma, 5000), (fakes.NonreparameterizedGamma, 10000), (ShapeAugmentedGamma, 5000), ], ids=['reparam', 'nonreparam', 'rsvi']) def test_exponential_gamma(gamma_dist, n_steps, elbo_impl): pyro.clear_param_store()
'neutra.py -n 10 --num-warmup 10 --num-samples 10', 'rsa/generics.py --num-samples=10', 'rsa/hyperbole.py --price=10000', 'rsa/schelling.py --num-samples=10', 'rsa/schelling_false.py --num-samples=10', 'rsa/semantic_parsing.py --num-samples=10', 'sir_hmc.py -t=2 -w=2 -n=4 -d=2 -m=1 --enum', 'sir_hmc.py -t=2 -w=2 -n=4 -d=2 -p=10000 --sequential', 'sir_hmc.py -t=2 -w=2 -n=4 -d=100 -p=10000 -f 2', 'smcfilter.py --num-timesteps=3 --num-particles=10', 'sparse_gamma_def.py --num-epochs=2 --eval-particles=2 --eval-frequency=1 --guide custom', 'sparse_gamma_def.py --num-epochs=2 --eval-particles=2 --eval-frequency=1 --guide auto', 'sparse_gamma_def.py --num-epochs=2 --eval-particles=2 --eval-frequency=1 --guide easy', 'toy_mixture_model_discrete_enumeration.py --num-steps=1', xfail_param( 'sparse_regression.py --num-steps=2 --num-data=50 --num-dimensions 20', reason='https://github.com/pyro-ppl/pyro/issues/2082'), 'vae/ss_vae_M2.py --num-epochs=1', 'vae/ss_vae_M2.py --num-epochs=1 --aux-loss', 'vae/ss_vae_M2.py --num-epochs=1 --enum-discrete=parallel', 'vae/ss_vae_M2.py --num-epochs=1 --enum-discrete=sequential', 'vae/vae.py --num-epochs=1', 'vae/vae_comparison.py --num-epochs=1', ] CUDA_EXAMPLES = [ 'air/main.py --num-steps=1 --cuda', 'baseball.py --num-samples=200 --warmup-steps=100 --num-chains=2 --cuda', 'contrib/cevae/synthetic.py --num-epochs=1 --cuda', 'contrib/epidemiology/sir.py --nojit -t=2 -w=2 -n=4 -d=20 -p=1000 -f 2 --cuda', 'contrib/epidemiology/sir.py --nojit -t=2 -w=2 -n=4 -d=20 -p=1000 -f 2 -nb=16 --cuda',
'rsa/hyperbole.py --price=10000', 'rsa/schelling.py --num-samples=10', 'rsa/schelling_false.py --num-samples=10', 'rsa/semantic_parsing.py --num-samples=10', 'scanvi/scanvi.py --num-epochs 1 --dataset mock', 'sir_hmc.py -t=2 -w=2 -n=4 -d=2 -m=1 --enum', 'sir_hmc.py -t=2 -w=2 -n=4 -d=2 -p=10000 --sequential', 'sir_hmc.py -t=2 -w=2 -n=4 -d=100 -p=10000 -f 2', 'smcfilter.py --num-timesteps=3 --num-particles=10', 'sparse_gamma_def.py --num-epochs=2 --eval-particles=2 --eval-frequency=1 --guide custom', 'sparse_gamma_def.py --num-epochs=2 --eval-particles=2 --eval-frequency=1 --guide auto', 'sparse_gamma_def.py --num-epochs=2 --eval-particles=2 --eval-frequency=1 --guide easy', 'svi_horovod.py --num-epochs=2 --size=400 --no-horovod', 'toy_mixture_model_discrete_enumeration.py --num-steps=1', xfail_param( 'sparse_regression.py --num-steps=2 --num-data=50 --num-dimensions 20', reason='https://github.com/pyro-ppl/pyro/issues/2082'), 'vae/ss_vae_M2.py --num-epochs=1', 'vae/ss_vae_M2.py --num-epochs=1 --aux-loss', 'vae/ss_vae_M2.py --num-epochs=1 --enum-discrete=parallel', 'vae/ss_vae_M2.py --num-epochs=1 --enum-discrete=sequential', 'vae/vae.py --num-epochs=1', 'vae/vae_comparison.py --num-epochs=1', 'cvae/main.py --num-quadrant-inputs=1 --num-epochs=1', 'contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=0 ', 'contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=1 ', 'contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=2 ', 'contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=3 ', 'contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=4 ', 'contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=5 ', 'contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=6 ',