Example #1
0
def main():
    data = torch.load('data.pt').transpose(-1, -2)
    data = data[0]
    data = data[:, None]
    pyro.set_rng_seed(1)
    pyro.clear_param_store()
    covariates = torch.zeros(len(data), 0)
    forecaster = Forecaster(MM(),
                            data[:700],
                            covariates[:700],
                            learning_rate=0.1,
                            num_steps=400)
    for name, value in forecaster.guide.median().items():
        if value.numel() == 1:
            print("{} = {:0.4g}".format(name, value.item()))

    samples = forecaster(data[:700], covariates, num_samples=100)
    p10, p50, p90 = quantile(samples, (0.1, 0.5, 0.9)).squeeze(-1)
    eval_crps(samples, data[700:])
    plt.figure(figsize=(9, 3))
    plt.fill_between(torch.arange(700, 1404), p10, p90, color="red", alpha=0.3)
    plt.plot(torch.arange(700, 1404), p50, 'r-', label='forecast')
    plt.plot(torch.arange(700, 1404), data[700:1404], 'k-', label='truth')
    plt.xlim(700, 1404)
    plt.legend(loc="best")
    plt.show()
Example #2
0
def main(args):
    pyro.set_rng_seed(0)
    pyro.enable_validation()

    optim = Adam({"lr": 0.1})
    inference = SVI(model, guide, optim, loss=Trace_ELBO())

    # Data is an arbitrary json-like structure with tensors at leaves.
    one = torch.tensor(1.0)
    data = {
        "foo": one,
        "bar": [0 * one, 1 * one, 2 * one],
        "baz": {
            "noun": {
                "concrete": 4 * one,
                "abstract": 6 * one,
            },
            "verb": 2 * one,
        },
    }

    print('Step\tLoss')
    loss = 0.0
    for step in range(args.num_epochs):
        loss += inference.step(data)
        if step and step % 10 == 0:
            print('{}\t{:0.5g}'.format(step, loss))
            loss = 0.0

    print('Parameters:')
    for name in sorted(pyro.get_param_store().get_all_param_names()):
        print('{} = {}'.format(name, pyro.param(name).detach().cpu().numpy()))
Example #3
0
def main(args):
    pyro.set_rng_seed(args.seed)

    model = SimpleHarmonicModel(args.process_noise, args.measurement_noise)
    guide = SimpleHarmonicModel_Guide(model)

    smc = SMCFilter(model,
                    guide,
                    num_particles=args.num_particles,
                    max_plate_nesting=0)

    logging.info("Generating data")
    zs, ys = generate_data(args)

    logging.info("Filtering")

    smc.init(initial=torch.tensor([1., 0.]))
    for y in ys[1:]:
        smc.step(y)

    logging.info("At final time step:")
    z = smc.get_empirical()["z"]
    logging.info("truth: {}".format(zs[-1]))
    logging.info("mean: {}".format(z.mean))
    logging.info("std: {}".format(z.variance**0.5))
Example #4
0
def test_dense_smoke():
    num_objects = 4
    num_detections = 2
    pyro.set_rng_seed(0)
    exists_logits = torch.zeros(num_objects)
    assign_logits = logit(
        torch.tensor([
            [0.5, 0.5, 0.0, 0.0],
            [0.0, 0.5, 0.5, 0.5],
        ]))
    assert assign_logits.shape == (num_detections, num_objects)

    solver = MarginalAssignment(exists_logits, assign_logits, bp_iters=5)

    assert solver.exists_dist.batch_shape == (num_objects, )
    assert solver.exists_dist.event_shape == ()
    assert solver.assign_dist.batch_shape == (num_detections, )
    assert solver.assign_dist.event_shape == ()
    assert solver.assign_dist.probs.shape[
        -1] == num_objects + 1  # true + spurious

    # test dense matches sparse
    edges, assign_logits = dense_to_sparse(assign_logits)
    other = MarginalAssignmentSparse(num_objects,
                                     num_detections,
                                     edges,
                                     exists_logits,
                                     assign_logits,
                                     bp_iters=5)
    assert_equal(other.exists_dist.probs, solver.exists_dist.probs, prec=1e-3)
    assert_equal(other.assign_dist.probs, solver.assign_dist.probs, prec=1e-3)
Example #5
0
    def fit(self, model_name, model_param_names, data_input, init_values=None):
        # verbose is passed through from orbit.models.base_estimator
        verbose = self.verbose
        message = self.message
        learning_rate = self.learning_rate
        learning_rate_total_decay = self.learning_rate_total_decay
        num_sample = self.num_sample
        seed = self.seed
        num_steps = self.num_steps

        pyro.set_rng_seed(seed)
        Model = get_pyro_model(model_name)  # abstract
        model = Model(data_input)  # concrete

        # Perform stochastic variational inference using an auto guide.
        pyro.clear_param_store()
        guide = AutoLowRankMultivariateNormal(model)
        optim = ClippedAdam({
            "lr": learning_rate,
            "lrd": learning_rate_total_decay**(1 / num_steps)
        })
        elbo = Trace_ELBO(num_particles=self.num_particles,
                          vectorize_particles=True)
        svi = SVI(model, guide, optim, elbo)

        for step in range(num_steps):
            loss = svi.step()
            if verbose and step % message == 0:
                scale_rms = guide._loc_scale()[1].detach().pow(
                    2).mean().sqrt().item()
                print("step {: >4d} loss = {:0.5g}, scale = {:0.5g}".format(
                    step, loss, scale_rms))

        # Extract samples.
        vectorize = pyro.plate("samples",
                               num_sample,
                               dim=-1 - model.max_plate_nesting)
        with pyro.poutine.trace() as tr:
            samples = vectorize(guide)()
        with pyro.poutine.replay(trace=tr.trace):
            samples.update(vectorize(model)())

        # Convert from torch.Tensors to numpy.ndarrays.
        extract = {
            name: value.detach().squeeze().numpy()
            for name, value in samples.items()
        }

        # make sure that model param names are a subset of stan extract keys
        invalid_model_param = set(model_param_names) - set(list(
            extract.keys()))
        if invalid_model_param:
            raise EstimatorException(
                "Pyro model definition does not contain required parameters")

        # `stan.optimizing` automatically returns all defined parameters
        # filter out unnecessary keys
        extract = {param: extract[param] for param in model_param_names}

        return extract
Example #6
0
def test_posterior_finite_space_model(finite_space_model, one_point_design, true_eig):
    pyro.set_rng_seed(42)
    pyro.clear_param_store()
    # Pre-train (large learning rate)
    posterior_eig(
        finite_space_model,
        one_point_design,
        "y",
        "theta",
        num_samples=10,
        num_steps=250,
        guide=posterior_guide,
        optim=optim.Adam({"lr": 0.1}),
    )
    # Finesse (small learning rate)
    estimated_eig = posterior_eig(
        finite_space_model,
        one_point_design,
        "y",
        "theta",
        num_samples=10,
        num_steps=250,
        guide=posterior_guide,
        optim=optim.Adam({"lr": 0.01}),
        final_num_samples=1000,
    )
    assert_equal(estimated_eig, true_eig, prec=1e-2)
Example #7
0
def test_dv_finite_space_model(finite_space_model, one_point_design, true_eig):
    pyro.set_rng_seed(42)
    pyro.clear_param_store()
    donsker_varadhan_eig(
        finite_space_model,
        one_point_design,
        "y",
        "theta",
        num_samples=100,
        num_steps=250,
        T=dv_critic,
        optim=optim.Adam({"lr": 0.1}),
    )
    estimated_eig = donsker_varadhan_eig(
        finite_space_model,
        one_point_design,
        "y",
        "theta",
        num_samples=100,
        num_steps=250,
        T=dv_critic,
        optim=optim.Adam({"lr": 0.01}),
        final_num_samples=2000,
    )
    assert_equal(estimated_eig, true_eig, prec=1e-2)
Example #8
0
def test_sparse_smoke():
    num_objects = 4
    num_detections = 2
    pyro.set_rng_seed(0)
    exists_logits = torch.zeros(num_objects)
    edges = exists_logits.new_tensor([
        [0, 0, 1, 0, 1, 0],
        [0, 1, 1, 2, 2, 3],
    ],
                                     dtype=torch.long)
    assign_logits = logit(torch.tensor([0.99, 0.8, 0.2, 0.2, 0.8, 0.9]))
    assert assign_logits.shape == edges.shape[1:]

    solver = MarginalAssignmentSparse(num_objects,
                                      num_detections,
                                      edges,
                                      exists_logits,
                                      assign_logits,
                                      bp_iters=5)

    assert solver.exists_dist.batch_shape == (num_objects, )
    assert solver.exists_dist.event_shape == ()
    assert solver.assign_dist.batch_shape == (num_detections, )
    assert solver.assign_dist.event_shape == ()
    assert solver.assign_dist.probs.shape[
        -1] == num_objects + 1  # true + spurious

    # test dense matches sparse
    assign_logits = sparse_to_dense(num_objects, num_detections, edges,
                                    assign_logits)
    other = MarginalAssignment(exists_logits, assign_logits, bp_iters=5)
    assert_equal(other.exists_dist.probs, solver.exists_dist.probs, prec=1e-3)
    assert_equal(other.assign_dist.probs, solver.assign_dist.probs, prec=1e-3)
Example #9
0
def run_pyro_model(*, posterior, backend, mode, config):
    """
    Compile and run the model.
    Returns the summary Dataframe
    """
    model = posterior.model
    data = posterior.data.values()
    stanfile = model.code_file_path("stan")
    build_dir = f"_build_{backend}_{mode}"
    if backend == "numpyro":
        numpyro_model = NumPyroModel(stanfile, recompile=False, build_dir=build_dir)
        mcmc = numpyro_model.mcmc(
            samples=config.iterations,
            warmups=config.warmups,
            chains=config.chains,
            thin=config.thin,
        )
        mcmc.run(jax.random.PRNGKey(config.seed), data)
        return mcmc
    elif backend == "pyro":
        pyro.set_rng_seed(config.seed)
        pyro_model = PyroModel(stanfile, recompile=False, build_dir=build_dir)
        mcmc = pyro_model.mcmc(
            samples=config.iterations,
            warmups=config.warmups,
            chains=config.chains,
            thin=config.thin,
            mp_context="spawn"
        )
        mcmc.run(data)
        return mcmc
    else:
        assert False, "Invalid backend (should be one of pyro, numpyro, or stan)"
Example #10
0
def test_subsample_model(auto_class):
    def model(x, y=None, batch_size=None):
        loc = pyro.param("loc", lambda: torch.tensor(0.))
        scale = pyro.param("scale",
                           lambda: torch.tensor(1.),
                           constraint=constraints.positive)
        with pyro.plate("batch", len(x), subsample_size=batch_size):
            batch_x = pyro.subsample(x, event_dim=0)
            batch_y = pyro.subsample(y, event_dim=0) if y is not None else None
            mean = loc + scale * batch_x
            sigma = pyro.sample("sigma", dist.LogNormal(0., 1.))
            return pyro.sample("obs", dist.Normal(mean, sigma), obs=batch_y)

    guide = auto_class(model)

    full_size = 50
    batch_size = 20
    pyro.set_rng_seed(123456789)
    x = torch.randn(full_size)
    with torch.no_grad():
        y = model(x)
    assert y.shape == x.shape

    pyro.get_param_store().clear()
    pyro.set_rng_seed(123456789)
    svi = SVI(model, guide, Adam({"lr": 0.02}), Trace_ELBO())
    for step in range(5):
        svi.step(x, y, batch_size=batch_size)
Example #11
0
def test_to_pyro_module_():

    pyro.set_rng_seed(123)
    actual = nn.Sequential(
        nn.Linear(28 * 28, 200),
        nn.Sigmoid(),
        nn.Linear(200, 200),
        nn.Sigmoid(),
        nn.Linear(200, 10),
    )
    to_pyro_module_(actual)
    pyro.clear_param_store()

    pyro.set_rng_seed(123)
    expected = PyroModule[nn.Sequential](
        PyroModule[nn.Linear](28 * 28, 200),
        PyroModule[nn.Sigmoid](),
        PyroModule[nn.Linear](200, 200),
        PyroModule[nn.Sigmoid](),
        PyroModule[nn.Linear](200, 10),
    )
    pyro.clear_param_store()

    def assert_identical(a, e):
        assert type(a) is type(e)
        if isinstance(a, dict):
            assert set(a) == set(e)
            for key in a:
                assert_identical(a[key], e[key])
        elif isinstance(a, nn.Module):
            assert_identical(a.__dict__, e.__dict__)
        elif isinstance(a, (str, int, float, torch.Tensor)):
            assert_equal(a, e)

    assert_identical(actual, expected)

    # check output
    data = torch.randn(28 * 28)
    actual_out = actual(data)
    pyro.clear_param_store()
    expected_out = expected(data)
    assert_equal(actual_out, expected_out)

    # check randomization
    def randomize(model):
        for m in model.modules():
            for name, value in list(m.named_parameters(recurse=False)):
                setattr(
                    m,
                    name,
                    PyroSample(
                        prior=dist.Normal(0, 1)
                        .expand(value.shape)
                        .to_event(value.dim())
                    ),
                )

    randomize(actual)
    randomize(expected)
    assert_identical(actual, expected)
def main(model, guide, args):
    # init
    if args.seed is not None: pyro.set_rng_seed(args.seed)
    logger = get_logger(args.log, __name__)
    logger.info(args)

    # setup svi
    opt = pyro.optim.Adam({'lr': args.learning_rate})
    csis = pyro.infer.CSIS(model.main,
                           guide.main,
                           opt,
                           num_inference_samples=args.num_infer_samples)

    # train
    times = [time.time()]
    logger.info("\nstep\t" + "E_p(x,y)[log q(x,y)]\t" + "time(sec)")

    for i in range(1, args.num_steps + 1):
        loss = csis.step()

        if (args.eval_frequency > 0
                and i % args.eval_frequency == 0) or (i == 1):
            times.append(time.time())
            logger.info(f"{i:06d}\t"
                        f"{-loss:.4f}  \t"
                        f"{times[-1]-times[-2]:.3f}")
Example #13
0
    def run(self, *args, **kwargs):
        pyro.set_rng_seed(self.rng_seed)
        torch.set_default_tensor_type(self.default_tensor_type)
        # XXX we clone CUDA tensor args to resolve the issue "Invalid device pointer"
        # at https://github.com/pytorch/pytorch/issues/10375
        args = [
            arg.clone().detach() if
            (torch.is_tensor(arg) and arg.is_cuda) else arg for arg in args
        ]
        kwargs = kwargs
        logger = logging.getLogger("pyro.infer.mcmc")
        logger_id = "CHAIN:{}".format(self.chain_id)
        log_queue = self.log_queue
        logger = initialize_logger(logger, logger_id, None, log_queue)
        logging_hook = _add_logging_hook(logger, None, self.hook)

        try:
            for sample in _gen_samples(self.kernel, self.warmup_steps,
                                       self.num_samples, logging_hook, None,
                                       *args, **kwargs):
                self.result_queue.put_nowait((self.chain_id, sample))
                self.event.wait()
                self.event.clear()
            self.result_queue.put_nowait((self.chain_id, None))
        except Exception as e:
            logger.exception(e)
            self.result_queue.put_nowait((self.chain_id, e))
Example #14
0
def main(args):
    pyro.set_rng_seed(0)
    pyro.enable_validation(__debug__)

    optim = Adam({"lr": 0.1})
    inference = SVI(model, guide, optim, loss=Trace_ELBO())

    # Data is an arbitrary json-like structure with tensors at leaves.
    one = torch.tensor(1.0)
    data = {
        "foo": one,
        "bar": [0 * one, 1 * one, 2 * one],
        "baz": {
            "noun": {
                "concrete": 4 * one,
                "abstract": 6 * one,
            },
            "verb": 2 * one,
        },
    }

    print('Step\tLoss')
    loss = 0.0
    for step in range(args.num_epochs):
        loss += inference.step(data)
        if step and step % 10 == 0:
            print('{}\t{:0.5g}'.format(step, loss))
            loss = 0.0

    print('Parameters:')
    for name, value in sorted(pyro.get_param_store().items()):
        print('{} = {}'.format(name, value.detach().cpu().numpy()))
def main(args):
    # pyro.enable_validation(True)
    
    logging.info('Generating data')
    pyro.set_rng_seed(0)
    # We can generate synthetic data directly by calling the model.
    true_topic_weights, true_topic_words, data = model_original(args=args)

    # We'll train using SVI.
    logging.info('-' * 40)
    logging.info('Training on {} documents'.format(args.num_docs))
    # wy: currently don't do enumeration.
    # # Elbo = JitTraceEnum_ELBO if args.jit else TraceEnum_ELBO
    # Elbo = TraceEnum_ELBO
    Elbo = TraceGraph_ELBO
    elbo = Elbo(max_plate_nesting=2)
    optim = Adam({'lr': args.learning_rate})
    svi = SVI(model, guide, optim, elbo)
    logging.info('Step\tLoss')
    for step in range(args.num_steps):
        loss = svi.step(data, args=args, batch_size=args.batch_size)
        if step % 10 == 0:
            logging.info('{: >5d}\t{}'.format(step, loss))
    loss = elbo.loss(model, guide, data, args=args)
    logging.info('final loss = {}'.format(loss))
Example #16
0
def main(args):
    logging.info('Generating data')
    pyro.set_rng_seed(0)
    pyro.clear_param_store()
    pyro.enable_validation(__debug__)

    # We can generate synthetic data directly by calling the model.
    true_topic_weights, true_topic_words, data = model(args=args)

    # We'll train using SVI.
    logging.info('-' * 40)
    logging.info('Training on {} documents'.format(args.num_docs))
    predictor = make_predictor(args)
    guide = functools.partial(parametrized_guide, predictor)
    Elbo = JitTraceEnum_ELBO if args.jit else TraceEnum_ELBO
    elbo = Elbo(max_plate_nesting=2)
    optim = ClippedAdam({'lr': args.learning_rate})
    svi = SVI(model, guide, optim, elbo)
    logging.info('Step\tLoss')
    for step in range(args.num_steps):
        loss = svi.step(data, args=args, batch_size=args.batch_size)
        if step % 10 == 0:
            logging.info('{: >5d}\t{}'.format(step, loss))
    loss = elbo.loss(model, guide, data, args=args)
    logging.info('final loss = {}'.format(loss))
Example #17
0
def test_marginal_likelihood_linear_model(linear_model, one_point_design):
    pyro.set_rng_seed(42)
    pyro.clear_param_store()
    # Pre-train (large learning rate)
    marginal_likelihood_eig(linear_model,
                            one_point_design,
                            "y",
                            "w",
                            num_samples=10,
                            num_steps=250,
                            marginal_guide=marginal_guide,
                            cond_guide=likelihood_guide,
                            optim=optim.Adam({"lr": 0.1}))
    # Finesse (small learning rate)
    estimated_eig = marginal_likelihood_eig(linear_model,
                                            one_point_design,
                                            "y",
                                            "w",
                                            num_samples=10,
                                            num_steps=250,
                                            marginal_guide=marginal_guide,
                                            cond_guide=likelihood_guide,
                                            optim=optim.Adam({"lr": 0.01}),
                                            final_num_samples=500)
    expected_eig = linear_model_ground_truth(linear_model, one_point_design,
                                             "y", "w")
    assert_equal(estimated_eig, expected_eig, prec=5e-2)
Example #18
0
def test_vnmc_linear_model(linear_model, one_point_design):
    pyro.set_rng_seed(42)
    pyro.clear_param_store()
    # Pre-train (large learning rate)
    vnmc_eig(linear_model,
             one_point_design,
             "y",
             "w",
             num_samples=[9, 3],
             num_steps=250,
             guide=posterior_guide,
             optim=optim.Adam({"lr": 0.1}))
    # Finesse (small learning rate)
    estimated_eig = vnmc_eig(linear_model,
                             one_point_design,
                             "y",
                             "w",
                             num_samples=[9, 3],
                             num_steps=250,
                             guide=posterior_guide,
                             optim=optim.Adam({"lr": 0.01}),
                             final_num_samples=[500, 100])
    expected_eig = linear_model_ground_truth(linear_model, one_point_design,
                                             "y", "w")
    assert_equal(estimated_eig, expected_eig, prec=5e-2)
Example #19
0
def poisson_gamma_model(reparameterized, Elbo):
    pyro.set_rng_seed(0)
    alpha0 = torch.tensor(1.0)
    beta0 = torch.tensor(1.0)
    data = torch.tensor([1.0, 2.0, 3.0])
    n_data = len(data)
    data_sum = data.sum(0)
    alpha_n = alpha0 + data_sum  # posterior alpha
    beta_n = beta0 + torch.tensor(float(n_data))  # posterior beta
    log_alpha_n = torch.log(alpha_n)
    log_beta_n = torch.log(beta_n)

    pyro.clear_param_store()
    Gamma = dist.Gamma if reparameterized else fakes.NonreparameterizedGamma

    def model():
        lambda_latent = pyro.sample("lambda_latent", Gamma(alpha0, beta0))
        with pyro.plate("data", n_data):
            pyro.sample("obs", dist.Poisson(lambda_latent), obs=data)
        return lambda_latent

    def guide():
        alpha_q_log = pyro.param("alpha_q_log", log_alpha_n + 0.17)
        beta_q_log = pyro.param("beta_q_log", log_beta_n - 0.143)
        alpha_q, beta_q = torch.exp(alpha_q_log), torch.exp(beta_q_log)
        pyro.sample("lambda_latent", Gamma(alpha_q, beta_q))

    adam = optim.Adam({"lr": .0002, "betas": (0.97, 0.999)})
    svi = SVI(model, guide, adam, loss=Elbo())
    for k in range(3000):
        svi.step()
Example #20
0
def main(args):
    logging.info(f"CUDA enabled: {torch.cuda.is_available()}")
    logging.info('Generating data')
    pyro.set_rng_seed(0)
    pyro.clear_param_store()
    pyro.enable_validation(__debug__)

    # We can generate synthetic data directly by calling the model.
    data = model(args=args)

    doc_word_data = data["doc_word_data"]
    category_data = data["category_data"]

    # We'll train using SVI.
    logging.info('-' * 40)
    logging.info('Training on {} documents'.format(args.num_docs))
    Elbo = JitTraceEnum_ELBO if args.jit else TraceEnum_ELBO
    elbo = Elbo(max_plate_nesting=2)
    optim = ClippedAdam({'lr': args.learning_rate})
    svi = SVI(model, parametrized_guide, optim, elbo)
    logging.info('Step\tLoss')
    for step in tqdm(range(args.num_steps)):
        loss = svi.step(doc_word_data=doc_word_data,
                        category_data=category_data,
                        args=args,
                        batch_size=args.batch_size)
        if step % 10 == 0:
            logging.info('{: >5d}\t{}'.format(step, loss))
    loss = elbo.loss(model,
                     parametrized_guide,
                     doc_word_data=doc_word_data,
                     category_data=category_data,
                     args=args)
    logging.info('final loss = {}'.format(loss))
    print("debug string")
Example #21
0
def vsgp_multiclass(num_steps, whiten):
    # adapted from http://gpflow.readthedocs.io/en/latest/notebooks/multiclass.html
    pyro.set_rng_seed(0)
    X = torch.rand(100, 1)
    K = (-0.5 * (X - X.t()).pow(2) / 0.01).exp() + torch.eye(100) * 1e-6
    f = K.cholesky().matmul(torch.randn(100, 3))
    y = f.argmax(dim=-1)

    kernel = gp.kernels.Sum(
        gp.kernels.Matern32(1),
        gp.kernels.WhiteNoise(1, variance=torch.tensor(0.01)))
    likelihood = gp.likelihoods.MultiClass(num_classes=3)
    Xu = X[::5].clone()

    gpmodule = gp.models.VariationalSparseGP(X,
                                             y,
                                             kernel,
                                             Xu,
                                             likelihood,
                                             latent_shape=torch.Size([3]),
                                             whiten=whiten)

    gpmodule.Xu.requires_grad_(False)
    gpmodule.kernel.kern1.variance_unconstrained.requires_grad_(False)

    optimizer = torch.optim.Adam(gpmodule.parameters(), lr=0.0001)
    gp.util.train(gpmodule, optimizer, num_steps=num_steps)
Example #22
0
    def evaluate(self, test_loader, device, n_samples=10, seeds_list=None):
        self.device = device
        self.basenet.device = device
        self.to(device)
        self.basenet.to(device)

        random.seed(0)
        pyro.set_rng_seed(0)

        bnn_seeds = list(
            range(n_samples)) if seeds_list is None else seeds_list

        with torch.no_grad():

            correct_predictions = 0.0
            for x_batch, y_batch in test_loader:

                x_batch = x_batch.to(device)
                outputs = self.forward(x_batch,
                                       n_samples=n_samples,
                                       seeds=bnn_seeds)
                predictions = outputs.argmax(-1)
                labels = y_batch.to(device).argmax(-1)
                correct_predictions += (predictions == labels).sum().item()

            accuracy = 100 * correct_predictions / len(test_loader.dataset)
            print("Accuracy: %.2f%%" % (accuracy))
            return accuracy
Example #23
0
def seed_random(_seed):
    pyro.set_rng_seed(_seed)
    torch.manual_seed(_seed)
    # TODO: making cuDNN deterministic seems to have no effect ?? Dropout layer will be different each time
    torch.cuda.manual_seed_all(_seed)
    torch.backends.cudnn.deterministic = True
    np.random.seed(_seed)
Example #24
0
def main(*,
         raw_expr,
         encoded_expr,
         save_file,
         topics=32,
         hidden_layers=128,
         learning_rate=1e-3,
         epochs=100,
         batch_size=32,
         posterior_samples=20,
         initial_counts=50):

    raw_expr = np.load(raw_expr)
    encoded_expr = np.load(encoded_expr)

    seed = 2556
    torch.manual_seed(seed)
    pyro.set_rng_seed(seed)

    rna_topic = scVLXCM(raw_expr.shape[-1],
                        num_topics=topics,
                        dropout=0.2,
                        hidden=hidden_layers,
                        initial_counts=initial_counts)

    rna_topic.train(raw_expr=raw_expr,
                    encoded_expr=encoded_expr,
                    num_epochs=epochs,
                    batch_size=batch_size,
                    learning_rate=learning_rate,
                    posterior_samples=posterior_samples)

    rna_topic.write_trace(save_file)
Example #25
0
def test_serialization(jit, feature_dim, outcome_dist):
    x, t, y = generate_data(num_data=32, feature_dim=feature_dim)
    if outcome_dist == "exponential":
        y.clamp_(min=1e-20)
    cevae = CEVAE(feature_dim, outcome_dist=outcome_dist, num_samples=1000, hidden_dim=32)
    cevae.fit(x, t, y, num_epochs=4, batch_size=8)
    pyro.set_rng_seed(0)
    expected_ite = cevae.ite(x)

    if jit:
        traced_cevae = cevae.to_script_module()
        f = io.BytesIO()
        torch.jit.save(traced_cevae, f)
        f.seek(0)
        loaded_cevae = torch.jit.load(f)
    else:
        f = io.BytesIO()
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore", category=UserWarning)
            torch.save(cevae, f)
        f.seek(0)
        loaded_cevae = torch.load(f)

    pyro.set_rng_seed(0)
    actual_ite = loaded_cevae.ite(x)
    assert_close(actual_ite, expected_ite, atol=0.1)
Example #26
0
def main():
    inputs,calendar = load_input()
    logger.info('Inference')
    covariates, covariate_dim, data = inputs.values()
    data, covariates = map(jax_to_torch,[data,covariates])
    data = torch.log(1+data.double())
    assert pyro.__version__.startswith('1.3.1')
    pyro.enable_validation(True)
    T0 = 0  # begining
    T2 = data.size(-2)  # end
    T1 = T2 - 500  # train/test split
    pyro.set_rng_seed(1)
    pyro.clear_param_store()
    data = data.permute(-2,-1)
    covariates = covariates.reshape(data.size(-1),T2,-1)
    # covariates = torch.zeros(len(data), 0)  # empty
    forecaster = Forecaster(Model4(), data[:T1], covariates[:,:T1], learning_rate=0.09,num_steps=2000)
    samples = forecaster(data[:T1], covariates[:,:T2], num_samples=336)
    samples.clamp_(min=0)  # apply domain knowledge: the samples must be positive
    p10, p50, p90 = quantile(samples[:, 0], [0.1, 0.5, 0.9]).squeeze(-1)
    crps = eval_crps(samples, data[T1:T2])
    print(samples.shape, p10.shape)
    fig, axes = plt.subplots(data.size(-1), 1, figsize=(9, 10), sharex=True)
    plt.subplots_adjust(hspace=0)
    axes[0].set_title("Sales (CRPS = {:0.3g})".format(crps))
    for i, ax in enumerate(axes):
        ax.fill_between(torch.arange(T1, T2), p10[:, i], p90[:, i], color="red", alpha=0.3)
        ax.plot(torch.arange(T1, T2), p50[:, i], 'r-', lw=1, label='forecast')
        ax.plot(torch.arange(T0, T2),data[: T2, i], 'k-', lw=1, label='truth')
        ax.set_ylabel(f"item: {i}")
    axes[0].legend(loc="best")
    plt.show()
    plt.savefig('figures/pyro_forecast.png')
Example #27
0
def test_nmc_eig_finite_space_model(finite_space_model, one_point_design, true_eig):
    pyro.set_rng_seed(42)
    pyro.clear_param_store()
    estimated_eig = nmc_eig(
        finite_space_model, one_point_design, "y", "theta", M=40, N=40 * 40
    )
    assert_equal(estimated_eig, true_eig, prec=1e-2)
Example #28
0
def initialize(seed, model, scheduler, model_args):
    pyro.set_rng_seed(seed)
    pyro.clear_param_store()
    guide = autoguide.AutoDiagonalNormal(model)
    svi = SVI(model, guide, scheduler, loss=Trace_ELBO())
    loss = svi.loss(model, guide, **model_args)
    return loss, guide, svi
Example #29
0
def main(args):
    pyro.enable_validation(__debug__)
    if args.cuda:
        torch.set_default_tensor_type('torch.cuda.FloatTensor')

    # Generate synthetic data.
    pyro.set_rng_seed(args.seed)
    x_train, t_train, y_train, _ = generate_data(args)

    # Train.
    pyro.set_rng_seed(args.seed)
    pyro.clear_param_store()
    cevae = CEVAE(feature_dim=args.feature_dim,
                  latent_dim=args.latent_dim,
                  hidden_dim=args.hidden_dim,
                  num_layers=args.num_layers,
                  num_samples=10)
    cevae.fit(x_train, t_train, y_train,
              num_epochs=args.num_epochs,
              batch_size=args.batch_size,
              learning_rate=args.learning_rate,
              learning_rate_decay=args.learning_rate_decay,
              weight_decay=args.weight_decay)

    # Evaluate.
    x_test, t_test, y_test, true_ite = generate_data(args)
    true_ate = true_ite.mean()
    print("true ATE = {:0.3g}".format(true_ate.item()))
    naive_ate = y_test[t_test == 1].mean() - y_test[t_test == 0].mean()
    print("naive ATE = {:0.3g}".format(naive_ate))
    if args.jit:
        cevae = cevae.to_script_module()
    est_ite = cevae.ite(x_test)
    est_ate = est_ite.mean()
    print("estimated ATE = {:0.3g}".format(est_ate.item()))
Example #30
0
def main(num_vi_steps, num_bo_steps, seed):

    pyro.set_rng_seed(seed)
    pyro.clear_param_store()

    est_ape = partial(estimated_ape, num_vi_steps=num_vi_steps)
    est_ape.__doc__ = "Estimated APE by VI"

    estimators = [true_ape, est_ape]
    noises = [0.0001, 0.25]
    num_acqs = [2, 10]

    for f, noise, num_acquisitions in zip(estimators, noises, num_acqs):
        X = torch.tensor([25., 75.])
        y = f(X)
        gpmodel = gp.models.GPRegression(X,
                                         y,
                                         gp.kernels.Matern52(
                                             input_dim=1,
                                             lengthscale=torch.tensor(10.)),
                                         noise=torch.tensor(noise),
                                         jitter=1e-6)
        gpbo = GPBayesOptimizer(constraints.interval(0, 100),
                                gpmodel,
                                num_acquisitions=num_acquisitions)
        pyro.clear_param_store()
        for i in range(num_bo_steps):
            result = gpbo.get_step(f, None, verbose=True)

        print(f.__doc__)
        print(result)
Example #31
0
def main(args):
    pyro.set_rng_seed(args.seed)
    pyro.enable_validation(__debug__)

    model = SimpleHarmonicModel(args.process_noise, args.measurement_noise)
    guide = SimpleHarmonicModel_Guide(model)

    smc = SMCFilter(model,
                    guide,
                    num_particles=args.num_particles,
                    max_plate_nesting=0)

    logging.info('Generating data')
    zs, ys = generate_data(args)

    logging.info('Filtering')
    smc.init(initial=torch.tensor([1., 0.]))
    for y in ys[1:]:
        smc.step(y)

    logging.info('Marginals')
    empirical = smc.get_empirical()
    for t in range(1, 1 + args.num_timesteps):
        z = empirical["z_{}".format(t)]
        logging.info("{}\t{}\t{}\t{}".format(t, zs[t], z.mean, z.variance))
Example #32
0
def pytest_runtest_setup(item):
    pyro.clear_param_store()
    if item.get_marker("disable_validation"):
        pyro.enable_validation(False)
    else:
        pyro.enable_validation(True)
    test_initialize_marker = item.get_marker("init")
    if test_initialize_marker:
        rng_seed = test_initialize_marker.kwargs["rng_seed"]
        pyro.set_rng_seed(rng_seed)
Example #33
0
def test_replay(model, subsample_size):
    pyro.set_rng_seed(0)

    traced_model = poutine.trace(model)
    original = traced_model(subsample_size)

    replayed = poutine.replay(model, trace=traced_model.trace)(subsample_size)
    assert replayed == original

    if subsample_size < 20:
        different = traced_model(subsample_size)
        assert different != original
Example #34
0
def setup(args):
    pyro.set_rng_seed(args.rng_seed)
    train_loader = util.get_data_loader(dataset_name='MNIST',
                                        batch_size=args.batch_size,
                                        is_training_set=True,
                                        shuffle=True)
    test_loader = util.get_data_loader(dataset_name='MNIST',
                                       batch_size=args.batch_size,
                                       is_training_set=False,
                                       shuffle=True)
    global OUTPUT_DIR
    OUTPUT_DIR = os.path.join(RESULTS_DIR, args.impl)
    if not os.path.exists(OUTPUT_DIR):
        os.makedirs(OUTPUT_DIR)
    pyro.clear_param_store()
    return train_loader, test_loader
Example #35
0
def main(args):
    pyro.set_rng_seed(0)
    pyro.enable_validation()

    optim = Adam({"lr": 0.1})
    inference = SVI(model, guide, optim, loss=Trace_ELBO())
    data = torch.tensor([0.0, 1.0, 2.0, 20.0, 30.0, 40.0])
    k = 2

    print('Step\tLoss')
    loss = 0.0
    for step in range(args.num_epochs):
        if step and step % 10 == 0:
            print('{}\t{:0.5g}'.format(step, loss))
            loss = 0.0
        loss += inference.step(data, k)

    print('Parameters:')
    for name in sorted(pyro.get_param_store().get_all_param_names()):
        print('{} = {}'.format(name, pyro.param(name).detach().cpu().numpy()))
Example #36
0
        with torch.no_grad():
            test(args, test_loader, gpmodel)
        print("Amount of time spent for epoch {}: {}s\n".format(epoch, int(time.time() - start_time)))


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Pyro GP MNIST Example')
    parser.add_argument('--data-dir', type=str, default='../data', metavar='PATH',
                        help='default directory to cache MNIST data')
    parser.add_argument('--num-inducing', type=int, default=70, metavar='N',
                        help='number of inducing input (default: 70)')
    parser.add_argument('--batch-size', type=int, default=64, metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs', type=int, default=10, metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--cuda', action='store_true', default=False,
                        help='enables CUDA training')
    parser.add_argument('--seed', type=int, default=1, metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument('--log-interval', type=int, default=10, metavar='N',
                        help='how many batches to wait before logging training status')
    args = parser.parse_args()

    pyro.set_rng_seed(args.seed)

    main(args)
Example #37
0
def main(**kwargs):

    args = argparse.Namespace(**kwargs)

    if 'save' in args:
        if os.path.exists(args.save):
            raise RuntimeError('Output file "{}" already exists.'.format(args.save))

    if args.seed is not None:
        pyro.set_rng_seed(args.seed)

    X, true_counts = load_data()
    X_size = X.size(0)
    if args.cuda:
        X = X.cuda()

    # Build a function to compute z_pres prior probabilities.
    if args.z_pres_prior_raw:
        def base_z_pres_prior_p(t):
            return args.z_pres_prior
    else:
        base_z_pres_prior_p = make_prior(args.z_pres_prior)

    # Wrap with logic to apply any annealing.
    def z_pres_prior_p(opt_step, time_step):
        p = base_z_pres_prior_p(time_step)
        if args.anneal_prior == 'none':
            return p
        else:
            decay = dict(lin=lin_decay, exp=exp_decay)[args.anneal_prior]
            return decay(p, args.anneal_prior_to, args.anneal_prior_begin,
                         args.anneal_prior_duration, opt_step)

    model_arg_keys = ['window_size',
                      'rnn_hidden_size',
                      'decoder_output_bias',
                      'decoder_output_use_sigmoid',
                      'baseline_scalar',
                      'encoder_net',
                      'decoder_net',
                      'predict_net',
                      'embed_net',
                      'bl_predict_net',
                      'non_linearity',
                      'pos_prior_mean',
                      'pos_prior_sd',
                      'scale_prior_mean',
                      'scale_prior_sd']
    model_args = {key: getattr(args, key) for key in model_arg_keys if key in args}
    air = AIR(
        num_steps=args.model_steps,
        x_size=50,
        use_masking=not args.no_masking,
        use_baselines=not args.no_baselines,
        z_what_size=args.encoder_latent_size,
        use_cuda=args.cuda,
        **model_args
    )

    if args.verbose:
        print(air)
        print(args)

    if 'load' in args:
        print('Loading parameters...')
        air.load_state_dict(torch.load(args.load))

    vis = visdom.Visdom(env=args.visdom_env)
    # Viz sample from prior.
    if args.viz:
        z, x = air.prior(5, z_pres_prior_p=partial(z_pres_prior_p, 0))
        vis.images(draw_many(x, tensor_to_objs(latents_to_tensor(z))))

    def per_param_optim_args(module_name, param_name):
        lr = args.baseline_learning_rate if 'bl_' in param_name else args.learning_rate
        return {'lr': lr}

    svi = SVI(air.model, air.guide,
              optim.Adam(per_param_optim_args),
              loss=TraceGraph_ELBO())

    # Do inference.
    t0 = time.time()
    examples_to_viz = X[5:10]

    for i in range(1, args.num_steps + 1):

        loss = svi.step(X, args.batch_size, z_pres_prior_p=partial(z_pres_prior_p, i))

        if args.progress_every > 0 and i % args.progress_every == 0:
            print('i={}, epochs={:.2f}, elapsed={:.2f}, elbo={:.2f}'.format(
                i,
                (i * args.batch_size) / X_size,
                (time.time() - t0) / 3600,
                loss / X_size))

        if args.viz and i % args.viz_every == 0:
            trace = poutine.trace(air.guide).get_trace(examples_to_viz, None)
            z, recons = poutine.replay(air.prior, trace=trace)(examples_to_viz.size(0))
            z_wheres = tensor_to_objs(latents_to_tensor(z))

            # Show data with inferred objection positions.
            vis.images(draw_many(examples_to_viz, z_wheres))
            # Show reconstructions of data.
            vis.images(draw_many(recons, z_wheres))

        if args.eval_every > 0 and i % args.eval_every == 0:
            # Measure accuracy on subset of training data.
            acc, counts, error_z, error_ix = count_accuracy(X, true_counts, air, 1000)
            print('i={}, accuracy={}, counts={}'.format(i, acc, counts.numpy().tolist()))
            if args.viz and error_ix.size(0) > 0:
                vis.images(draw_many(X[error_ix[0:5]], tensor_to_objs(error_z[0:5])),
                           opts=dict(caption='errors ({})'.format(i)))

        if 'save' in args and i % args.save_every == 0:
            print('Saving parameters...')
            torch.save(air.state_dict(), args.save)
Example #38
0
hyper-parameters) of running HMC on different problems.

[1] Carpenter B. (2016), ["Hierarchical Partial Pooling for Repeated Binary Trials"]
    (http://mc-stan.org/users/documentation/case-studies/pool-binary-trials.html).
[2] Efron B., Morris C. (1975), "Data analysis using Stein's estimator and its
    generalizations", J. Amer. Statist. Assoc., 70, 311-319.
[3] Neal, R. (2012), "MCMC using Hamiltonian Dynamics",
    (https://arxiv.org/pdf/1206.1901.pdf)
[4] Hoffman, M. D. and Gelman, A. (2014), "The No-U-turn sampler: Adaptively setting
    path lengths in Hamiltonian Monte Carlo", (https://arxiv.org/abs/1111.4246)
"""

logging.basicConfig(format='%(message)s', level=logging.INFO)
# Enable validation checks
pyro.enable_validation(True)
pyro.set_rng_seed(1)
DATA_URL = "https://d2fefpcigoriu7.cloudfront.net/datasets/EfronMorrisBB.txt"


# ===================================
#               MODELS
# ===================================


def fully_pooled(at_bats):
    """
    Number of hits in $K$ at bats for each player has a Binomial
    distribution with a common probability of success, $\phi$.

    :param (torch.Tensor) at_bats: Number of at bats for each player.
    :return: Number of hits predicted by the model.
Example #39
0
def test_custom_subsample(model):
    pyro.set_rng_seed(0)

    subsample = [1, 3, 5, 7]
    assert model(subsample) == subsample
    assert poutine.trace(model)(subsample) == subsample
Example #40
0
def pytest_runtest_setup(item):
    test_initialize_marker = item.get_marker("init")
    if test_initialize_marker:
        rng_seed = test_initialize_marker.kwargs["rng_seed"]
        pyro.set_rng_seed(rng_seed)
            lam, v = np.linalg.eig(cov)
            lam = np.sqrt(lam)
            ell = Ellipse(xy=(x[sig_ix], y[sig_ix]),
                          width=lam[0]*4, height=lam[1]*4,
                          angle=np.rad2deg(np.arccos(v[0, 0])),
                          color='blue')
            ell.set_facecolor('none')
            ax.add_artist(ell)

    # Save figure
    fig.savefig(figname)


if __name__ == "__main__":
    pyro.enable_validation(True)
    pyro.set_rng_seed(42)

    # Create our model with a fixed number of components
    K = 2

    data = get_samples()

    global_guide = AutoDelta(poutine.block(model, expose=['weights', 'locs', 'scales']))
    global_guide = config_enumerate(global_guide, 'parallel')
    _, svi = initialize(data)

    true_colors = [0] * 100 + [1] * 100
    plot(data, colors=true_colors, figname='pyro_init.png')

    for i in range(151):
        svi.step(data)
Example #42
0
import torch

import pyro
from pyro.contrib.gp.kernels import Matern52, WhiteNoise
from pyro.contrib.gp.util import conditional
from tests.common import assert_equal

T = namedtuple("TestConditional", ["Xnew", "X", "kernel", "f_loc", "f_scale_tril",
                                   "loc", "cov"])

Xnew = torch.tensor([[2., 3.], [4., 6.]])
X = torch.tensor([[1., 5.], [2., 1.], [3., 2.]])
kernel = Matern52(input_dim=2)
Kff = kernel(X) + torch.eye(3) * 1e-6
Lff = Kff.potrf(upper=False)
pyro.set_rng_seed(123)
f_loc = torch.rand(3)
f_scale_tril = torch.rand(3, 3).tril(-1) + torch.rand(3).exp().diag()
f_cov = f_scale_tril.matmul(f_scale_tril.t())

TEST_CASES = [
    T(
        Xnew, X, kernel, torch.zeros(3), Lff, torch.zeros(2), None
    ),
    T(
        Xnew, X, kernel, torch.zeros(3), None, torch.zeros(2), None
    ),
    T(
        Xnew, X, kernel, f_loc, Lff, None, kernel(Xnew)
    ),
    T(