예제 #1
0
파일: scm.py 프로젝트: bel2scm/bel2scm
    def update_noise_svi(self, conditioned_model):

        """
        this performs stochastic variational inference for noise
        Args:
            conditioned_model:
            noise:
        Returns: Not sure now
        """

        exogenous_dist_dict = self.exogenous_dist_dict

        def guide(exogenous_dist_dict):
            mu_constraints = constraints.interval(-3., 3.)
            sigma_constraints = constraints.interval(.0001, 3)

            for exg_name, exg_dist in exogenous_dist_dict.items():
                # mu_guide = pyro.param("mu_{}".format(exg_name), torch.tensor(exg_dist.loc), constraint=mu_constraints)
                # sigma_guide = pyro.param("sigma_{}".format(exg_name), torch.tensor(exg_dist.scale), constraint=sigma_constraints)

                mu_guide = pyro.param("mu_{}".format(exg_name), torch.tensor(0.0), constraint=mu_constraints)
                sigma_guide = pyro.param("sigma_{}".format(exg_name), torch.tensor(1.0),
                                         constraint=sigma_constraints)

                # [Todo] support the binary parent
                noise_dist = pyro.distributions.Normal
                pyro.sample(exg_name, noise_dist(mu_guide, sigma_guide))

        pyro.clear_param_store()

        svi = SVI(
            model=conditioned_model,
            guide=guide,
            optim=SGD({"lr": 0.001, "momentum": 0.1}),
            loss=Trace_ELBO()
            # optim=Adam({"lr": 0.005, "betas": (0.95, 0.999)}),
            # loss=Trace_ELBO(retain_graph=True)
        )
        losses = []
        num_steps = 1000
        samples = defaultdict(list)
        for t in range(num_steps):
            losses.append(svi.step(exogenous_dist_dict))
            for noise in exogenous_dist_dict.keys():
                mu = 'mu_{}'.format(noise)
                sigma = 'sigma_{}'.format(noise)
                samples[mu].append(pyro.param(mu).item())
                samples[sigma].append(pyro.param(sigma).item())
        means = {k: statistics.mean(v) for k, v in samples.items()}

        updated_noise = {}

        # [Todo] support the binary parent
        noise_distribution = pyro.distributions.Normal
        for n in exogenous_dist_dict.keys():
            updated_noise[n] = noise_distribution(means["mu_{}".format(n)], means["sigma_{}".format(n)])

        return updated_noise, losses
예제 #2
0
    def fit(self,
            model_name,
            model_param_names,
            data_input,
            fitter=None,
            init_values=None):
        verbose = self.verbose
        message = self.message
        learning_rate = self.learning_rate
        seed = self.seed
        num_steps = self.num_steps
        learning_rate_total_decay = self.learning_rate_total_decay

        pyro.set_rng_seed(seed)
        if fitter is None:
            fitter = get_pyro_model(model_name)  # abstract
        model = fitter(data_input)  # concrete

        # Perform MAP inference using an AutoDelta guide.
        pyro.clear_param_store()
        guide = AutoDelta(model)
        optim = ClippedAdam({
            "lr": learning_rate,
            "lrd": learning_rate_total_decay**(1 / num_steps),
            "betas": (0.5, 0.8)
        })
        elbo = Trace_ELBO()
        loss_elbo = list()
        svi = SVI(model, guide, optim, elbo)
        for step in range(num_steps):
            loss = svi.step()
            loss_elbo.append(loss)
            if verbose and step % message == 0:
                print("step {: >4d} loss = {:0.5g}".format(step, loss))

        # Extract point estimates.
        values = guide()
        values.update(pyro.poutine.condition(model, values)())

        # Convert from torch.Tensors to numpy.ndarrays.
        extract = {
            name: value.detach().numpy()
            for name, value in values.items()
        }

        # make sure that model param names are a subset of stan extract keys
        invalid_model_param = set(model_param_names) - set(list(
            extract.keys()))
        if invalid_model_param:
            raise EstimatorException(
                "Pyro model definition does not contain required parameters")

        # `stan.optimizing` automatically returns all defined parameters
        # filter out unnecessary keys
        posteriors = {param: extract[param] for param in model_param_names}
        training_metrics = {'loss_elbo': np.array(loss_elbo)}

        return posteriors, training_metrics
예제 #3
0
    def train(self, num_iters):
        optim = Adam({"lr": 0.0001})
        svi = SVI(self.model, self.guide, optim, loss=Trace_ELBO())
        losses = []

        pyro.clear_param_store()
        for j in tqdm(range(num_iters)):
            loss = svi.step()
            losses.append(loss)
예제 #4
0
 def fit(self, x_data, y_data, prior_conc, scale, num_iter=4000):
     self.prior_conc = prior_conc
     self.scale = scale
     pyro.clear_param_store()
     svi = SVI(self._model, self._guide, Adam(), Trace_ELBO())
     iters = tqdm(range(num_iter))
     for iter in iters:
         elbo = svi.step(x_data, y_data)
         iters.set_description('ELBO: ' + str(elbo.item()))
예제 #5
0
def train(device, dataloaders, dataset_sizes, learning_rate, num_epochs,
          early_stop_patience, model_path, pre_trained_baseline_net):

    # clear param store
    pyro.clear_param_store()

    cvae_net = CVAE(200, 500, 500, pre_trained_baseline_net)
    cvae_net.to(device)
    optimizer = pyro.optim.Adam({"lr": learning_rate})
    svi = SVI(cvae_net.model, cvae_net.guide, optimizer, loss=Trace_ELBO())

    best_loss = np.inf
    early_stop_count = 0
    Path(model_path).parent.mkdir(parents=True, exist_ok=True)

    for epoch in range(num_epochs):
        # Each epoch has a training and validation phase
        for phase in ['train', 'val']:
            running_loss = 0.0
            num_preds = 0

            # Iterate over data.
            bar = tqdm(dataloaders[phase],
                       desc='CVAE Epoch {} {}'.format(epoch, phase).ljust(20))
            for i, batch in enumerate(bar):
                inputs = batch['input'].to(device)
                outputs = batch['output'].to(device)

                if phase == 'train':
                    loss = svi.step(inputs, outputs)
                else:
                    loss = svi.evaluate_loss(inputs, outputs)

                # statistics
                running_loss += loss / inputs.size(0)
                num_preds += 1
                if i % 10 == 0:
                    bar.set_postfix(loss='{:.2f}'.format(running_loss / num_preds),
                                    early_stop_count=early_stop_count)

            epoch_loss = running_loss / dataset_sizes[phase]
            # deep copy the model
            if phase == 'val':
                if epoch_loss < best_loss:
                    best_loss = epoch_loss
                    torch.save(cvae_net.state_dict(), model_path)
                    early_stop_count = 0
                else:
                    early_stop_count += 1

        if early_stop_count >= early_stop_patience:
            break

    # Save model weights
    cvae_net.load_state_dict(torch.load(model_path))
    cvae_net.eval()
    return cvae_net
예제 #6
0
def deepstan_svi_sampler(model_code):
    model_guided = PyroModel(model_code)
    optimizer = Adam({"lr": 0.005, "betas": (0.95, 0.999)})
    svi = model_guided.svi(optimizer, Trace_ELBO())
    for step in tqdm.tqdm(range(svi_steps)):
        svi.step({})
    samples = pd.Series([
        float(model_guided.module.guide()["theta"]) for _ in range(iterations)
    ])
    return samples
예제 #7
0
def test_laplace_linear_model(linear_model, one_point_design):
    pyro.set_rng_seed(42)
    pyro.clear_param_store()
    # You can use 1 final sample here because linear models have a posterior entropy that is independent of `y`
    estimated_eig = laplace_eig(linear_model, one_point_design, "y", "w",
                                guide=laplace_guide, num_steps=250, final_num_samples=1,
                                optim=optim.Adam({"lr": 0.05}),
                                loss=Trace_ELBO().differentiable_loss)
    expected_eig = linear_model_ground_truth(linear_model, one_point_design, "y", "w")
    assert_equal(estimated_eig, expected_eig, prec=5e-2)
    def train(self, epochs, optimizer):

        svi = SVI(self.model, self.guide, optimizer, loss=Trace_ELBO())
        pyro.clear_param_store()

        for epoch in range(epochs):
            loss = svi.step(self.X_train, self.y_train)
            if epoch % 100 == 0:
                print("[iteration %04d] loss: %.4f" % (epoch, loss))
        return
예제 #9
0
파일: bnn.py 프로젝트: TyXe-BDL/TyXe
 def __init__(self,
              net,
              prior,
              guide_builder=None,
              name="",
              closed_form_kl=True):
     super().__init__(net, prior, guide_builder=guide_builder, name=name)
     self.cached_output = None
     self.cached_kl_loss = None
     self._loss = TraceMeanField_ELBO() if closed_form_kl else Trace_ELBO()
예제 #10
0
def train(model, guide, lr=0.01):
    pyro.clear_param_store()
    adam = pyro.optim.Adam({"lr": lr})
    svi = SVI(model, guide, adam, loss=Trace_ELBO())

    n_steps = 101
    for step in range(n_steps):
        loss = svi.step(data)
        if step % 50 == 0:
            print('[iter {}]  loss: {:.4f}'.format(step, loss))
예제 #11
0
    def fit(self, X, y):

        # create the guide
        self.guide = self._create_guide(self.model)

        # initialise the svi
        self.svi = SVI(self.model, self.guide, self.optimiser, Trace_ELBO())

        # train the model
        self._train(X, y)
예제 #12
0
    def __init__(self, model, data, covariates, *,
                 guide=None,
                 init_loc_fn=init_to_sample,
                 init_scale=0.1,
                 create_plates=None,
                 optim=None,
                 learning_rate=0.01,
                 betas=(0.9, 0.99),
                 learning_rate_decay=0.1,
                 clip_norm=10.0,
                 dct_gradients=False,
                 subsample_aware=False,
                 num_steps=1001,
                 num_particles=1,
                 vectorize_particles=True,
                 warm_start=False,
                 log_every=100):
        assert data.size(-2) == covariates.size(-2)
        super().__init__()
        self.model = model
        if guide is None:
            guide = AutoNormal(self.model, init_loc_fn=init_loc_fn, init_scale=init_scale,
                               create_plates=create_plates)
        self.guide = guide

        # Initialize.
        if warm_start:
            model = PrefixWarmStartMessenger()(model)
            guide = PrefixWarmStartMessenger()(guide)
        if dct_gradients:
            model = MarkDCTParamMessenger("time")(model)
            guide = MarkDCTParamMessenger("time")(guide)
        elbo = Trace_ELBO(num_particles=num_particles,
                          vectorize_particles=vectorize_particles)
        elbo._guess_max_plate_nesting(model, guide, (data, covariates), {})
        elbo.max_plate_nesting = max(elbo.max_plate_nesting, 1)  # force a time plate

        losses = []
        if num_steps:
            if optim is None:
                optim = DCTAdam({"lr": learning_rate, "betas": betas,
                                 "lrd": learning_rate_decay ** (1 / num_steps),
                                 "clip_norm": clip_norm,
                                 "subsample_aware": subsample_aware})
            svi = SVI(self.model, self.guide, optim, elbo)
            for step in range(num_steps):
                loss = svi.step(data, covariates) / data.numel()
                if log_every and step % log_every == 0:
                    logger.info("step {: >4d} loss = {:0.6g}".format(step, loss))
                    print("step {: >4d} loss = {:0.6g}".format(step, loss))
                losses.append(loss)

        self.guide.create_plates = None  # Disable subsampling after training.
        self.max_plate_nesting = elbo.max_plate_nesting
        self.losses = losses
예제 #13
0
    def fit(self, model_name, model_param_names, data_input, init_values=None):
        verbose = self.verbose
        message = self.message
        learning_rate = self.learning_rate
        num_sample = self.num_sample
        seed = self.seed
        num_steps = self.num_steps

        pyro.set_rng_seed(seed)
        Model = get_pyro_model(model_name)  # abstract
        model = Model(data_input)  # concrete

        # Perform stochastic variational inference using an auto guide.
        pyro.clear_param_store()
        guide = AutoLowRankMultivariateNormal(model)
        optim = ClippedAdam({"lr": learning_rate})
        elbo = Trace_ELBO(num_particles=100, vectorize_particles=True)
        svi = SVI(model, guide, optim, elbo)

        for step in range(num_steps):
            loss = svi.step()
            if verbose and step % message == 0:
                scale_rms = guide._loc_scale()[1].detach().pow(
                    2).mean().sqrt().item()
                print("step {: >4d} loss = {:0.5g}, scale = {:0.5g}".format(
                    step, loss, scale_rms))

        # Extract samples.
        vectorize = pyro.plate("samples",
                               num_sample,
                               dim=-1 - model.max_plate_nesting)
        with pyro.poutine.trace() as tr:
            samples = vectorize(guide)()
        with pyro.poutine.replay(trace=tr.trace):
            samples.update(vectorize(model)())

        # Convert from torch.Tensors to numpy.ndarrays.
        extract = {
            name: value.detach().squeeze().numpy()
            for name, value in samples.items()
        }

        # make sure that model param names are a subset of stan extract keys
        invalid_model_param = set(model_param_names) - set(list(
            extract.keys()))
        if invalid_model_param:
            raise EstimatorException(
                "Stan model definition does not contain required parameters")

        # `stan.optimizing` automatically returns all defined parameters
        # filter out unecessary keys
        extract = {param: extract[param] for param in model_param_names}

        return extract
예제 #14
0
    def __init_guide(self, guide):
        print("setup guide")

        optimizer = pyro.optim.Adam({"lr": 1e-2})  # optimiser for stochastic optimization, use default parameters

        svi = SVI(self.model, guide, optimizer, loss=Trace_ELBO())  # Stochastic Variational Inference

        pyro.clear_param_store()
        [svi.step(self.boards_trainings_data, self.winners_trainings_data) for i in trange(5000)]
        guide.requires_grad_(False)
        return guide
def variational_fit(model, guide, data, num_epochs=2500, lr=0.001):
    """ Use Stochastic Variational Inference for inferring latent variables """
    svi = pyro.infer.SVI(model=model,
                         guide=guide,
                         optim=pyro.optim.Adam({'lr': lr}),
                         loss=Trace_ELBO())
    losses = []
    for i in tqdm(range(num_epochs)):
        losses.append(svi.step(data))

    return losses, dict(pyro.get_param_store())
예제 #16
0
def test_subsample_smoke(Reparam, subsample):
    def model():
        with poutine.reparam(config={"x": Reparam()}):
            with pyro.plate("plate", 10):
                return pyro.sample("x", dist.Stable(1.5, 0))

    def create_plates():
        return pyro.plate("plate", 10, subsample_size=3)

    guide = AutoNormal(model, create_plates=create_plates if subsample else None)
    Trace_ELBO().loss(model, guide)  # smoke test
예제 #17
0
    def _traces(self, *args, **kwargs):
        # find good initial trace
        model_trace = poutine.trace(self.model).get_trace(*args, **kwargs)
        best_log_prob = model_trace.log_prob_sum()
        for i in range(20):
            trace = poutine.trace(self.model).get_trace(*args, **kwargs)
            log_prob = trace.log_prob_sum()
            if log_prob > best_log_prob:
                best_log_prob = log_prob
                model_trace = trace

        # lift model
        prior, unpacked = {}, {}
        param_constraints = pyro.get_param_store().get_state()["constraints"]
        for name, node in model_trace.nodes.items():
            if node["type"] == "param":
                if param_constraints[name] is constraints.positive:
                    prior[name] = dist.HalfCauchy(2)
                else:
                    prior[name] = dist.Normal(0, 10)
                unpacked[name] = pyro.param(name).unconstrained()
            elif name in self.start:
                unpacked[name] = self.start[name]
            elif node["type"] == "sample" and not node["is_observed"]:
                unpacked[name] = transform_to(node["fn"].support).inv(
                    node["value"])
        lifted_model = poutine.lift(self.model, prior)

        # define guide
        packed = torch.cat(
            [v.clone().detach().reshape(-1) for v in unpacked.values()])
        pyro.param("auto_loc", packed)
        delta_guide = AutoLaplaceApproximation(lifted_model)

        # train guide
        optimizer = torch.optim.LBFGS(
            (pyro.param("auto_loc").unconstrained(), ), lr=0.1, max_iter=500)
        loss_and_grads = Trace_ELBO().loss_and_grads

        def closure():
            optimizer.zero_grad()
            return loss_and_grads(lifted_model, delta_guide, *args, **kwargs)

        optimizer.step(closure)
        guide = delta_guide.laplace_approximation(*args, **kwargs)

        # get posterior
        for i in range(self.num_samples):
            guide_trace = poutine.trace(guide).get_trace(*args, **kwargs)
            model_poutine = poutine.trace(
                poutine.replay(lifted_model, trace=guide_trace))
            yield model_poutine.get_trace(*args, **kwargs), 1.0

        pyro.clear_param_store()
예제 #18
0
def get_pyro_model(return_all=False):
    regression_model = RegressionModel(p=1)
    model = model_fn(regression_model)
    guide = AutoDiagonalNormal(model)
    # guide = guide_fn(regression_model)
    optimizer = Adam({'lr': 0.05})
    svi = SVI(model, guide, optimizer, loss=Trace_ELBO(), num_samples=1000)
    if return_all:
        return svi, model, guide
    else:
        return svi
예제 #19
0
def test_posterior_predictive_svi_auto_delta_guide(parallel):
    true_probs = torch.ones(5) * 0.7
    num_trials = torch.ones(5) * 1000
    num_success = dist.Binomial(num_trials, true_probs).sample()
    conditioned_model = poutine.condition(model, data={"obs": num_success})
    guide = AutoDelta(conditioned_model)
    svi = SVI(conditioned_model, guide, optim.Adam(dict(lr=1.0)), Trace_ELBO())
    for i in range(1000):
        svi.step(num_trials)
    posterior_predictive = Predictive(model, guide=guide, num_samples=10000, parallel=parallel)
    marginal_return_vals = posterior_predictive.get_samples(num_trials)["obs"]
    assert_close(marginal_return_vals.mean(dim=0), torch.ones(5) * 700, rtol=0.05)
예제 #20
0
def train(model, guide, dataloader, num_epochs=50):
    optim = Adam({"lr": 0.01})
    svi = SVI(model, guide, optim, loss=Trace_ELBO())
    for epoch in range(num_epochs):
        print(f"Training epoch: {epoch + 1}")
        tqdm_dataloader = tqdm(dataloader)
        for i, (X, y) in enumerate(tqdm_dataloader):
            X = X.view((X.shape[0], -1))
            y = y.view((y.shape[0], -1))
            loss = svi.step(X, y)
            if i % 10 == 0:
                tqdm_dataloader.set_description(f"Loss: {loss}")
예제 #21
0
    def __init__(self, binary_features, continuous_features, z_dim, hidden_dim,
                 hidden_layers, optimizer, activation, cuda):
        pyro.clear_param_store()
        vae = VAE(binary_features, continuous_features, z_dim, hidden_dim,
                  hidden_layers, activation, cuda)
        vae = vae.double()
        self.vae = vae
        self.svi = SVI(vae.model, vae.guide, optimizer, loss=Trace_ELBO())
        self.cuda = cuda

        self.train_stats = Statistics()
        self.test_stats = Statistics()
예제 #22
0
    def do_test_auto(self, N, reparameterized, n_steps):
        logger.debug("\nGoing to do AutoGaussianChain test...")
        pyro.clear_param_store()
        self.setUp()
        self.setup_chain(N)
        self.compute_target(N)
        self.guide = AutoMultivariateNormal(self.model)
        logger.debug(
            "target auto_loc: {}".format(
                self.target_auto_mus[1:].detach().cpu().numpy()
            )
        )
        logger.debug(
            "target auto_diag_cov: {}".format(
                self.target_auto_diag_cov[1:].detach().cpu().numpy()
            )
        )

        # TODO speed up with parallel num_particles > 1
        adam = optim.Adam({"lr": 0.01, "betas": (0.95, 0.999)})
        elbo = Trace_ELBO(num_particles=100, vectorize_particles=True)
        svi = SVI(self.model, self.guide, adam, elbo)

        for k in range(n_steps):
            loss = svi.step(reparameterized)
            assert np.isfinite(loss), loss

            if k % 100 == 0 and k > 0 or k == n_steps - 1:
                logger.debug(
                    "[step {}] guide mean parameter: {}".format(
                        k, self.guide.loc.detach().cpu().numpy()
                    )
                )
                L = self.guide.scale_tril * self.guide.scale[:, None]
                diag_cov = torch.mm(L, L.t()).diag()
                logger.debug(
                    "[step {}] auto_diag_cov: {}".format(
                        k, diag_cov.detach().cpu().numpy()
                    )
                )

        assert_equal(
            self.guide.loc.detach(),
            self.target_auto_mus[1:],
            prec=0.05,
            msg="guide mean off",
        )
        assert_equal(
            diag_cov,
            self.target_auto_diag_cov[1:],
            prec=0.07,
            msg="guide covariance off",
        )
예제 #23
0
    def fit(self, models, items, responses, num_epochs):
        optim = Adam({'lr': 0.1})
        if self.priors == 'vague':
            svi = SVI(self.model_vague,
                      self.guide_vague,
                      optim,
                      loss=Trace_ELBO())
        else:
            svi = SVI(self.model_hierarchical,
                      self.guide_hierarchical,
                      optim,
                      loss=Trace_ELBO())

        pyro.clear_param_store()
        for j in range(num_epochs):
            loss = svi.step(models, items, responses)
            if j % 100 == 0 and self.verbose:
                print("[epoch %04d] loss: %.4f" % (j + 1, loss))

        print("[epoch %04d] loss: %.4f" % (j + 1, loss))
        values = ['loc_diff', 'scale_diff', 'loc_ability', 'scale_ability']
def auto_variational_fit(model, data, num_epochs=2500, lr=0.001):
    """ Use Stochastic Variational Inference for inferring latent variables """
    guide = AutoMultivariateNormal(model)
    svi = pyro.infer.SVI(model=model,
                         guide=guide,
                         optim=pyro.optim.Adam({'lr': lr}),
                         loss=Trace_ELBO())
    losses = []
    for i in tqdm(range(num_epochs)):
        losses.append(svi.step(data))

    return losses, guide.get_posterior()
예제 #25
0
def test_posterior_predictive_svi_one_hot():
    pseudocounts = torch.ones(3) * 0.1
    true_probs = torch.tensor([0.15, 0.6, 0.25])
    classes = dist.OneHotCategorical(true_probs).sample((10000,))
    guide = AutoDelta(one_hot_model)
    svi = SVI(one_hot_model, guide, optim.Adam(dict(lr=0.1)), Trace_ELBO())
    for i in range(1000):
        svi.step(pseudocounts, classes=classes)
    posterior_samples = Predictive(guide, num_samples=10000).get_samples(pseudocounts)
    posterior_predictive = Predictive(one_hot_model, posterior_samples)
    marginal_return_vals = posterior_predictive.get_samples(pseudocounts)["obs"]
    assert_close(marginal_return_vals.mean(dim=0), true_probs.unsqueeze(0), rtol=0.1)
def forecast(X,y):
    
    # Creating the RBF kernel with the desired lengthscale and variance using pyro.
    
    k1 = gp.kernels.RBF(input_dim=2, lengthscale=torch.tensor(50.0),\
                   variance = torch.tensor(0.5))
    pyro.enable_validation(True)       
    optim = Adam({"lr": 0.01}) 
    pyro.clear_param_store()
    
    # Creating an array with the last value entered and the 7 weeks ahead (in days).
    
    plus_arr = np.max(X)+np.array([7.,14.,21.,28.,35.,42.,49.,56.,63.,70.])
    
    # Changing numpy arrays into pytorch tensors (Faster for machine learning).

    X2 = (torch.from_numpy(X))
    y2 = (torch.from_numpy(y-np.mean(y)))
    
    # Adding the new prediction dates into the array and then transforming into pytorch tensor.
    
    Xtest_use = np.append(X,plus_arr)
    Xtest_use2 = (torch.from_numpy(Xtest_use))


    # Running the GP RBF kernel model on the known data
    
    gpr = gp.models.GPRegression(X2, y2,k1, noise=torch.tensor(0.01))
    
    # Stochastic variational inference to optimise the loss function
    # Esentially minimising the errors on the model

    svi = SVI(gpr.model, gpr.guide, optim, loss=Trace_ELBO())
    losses = []
    
    # Choosing how many times to iterate over the optimisiation

    num_steps = 10

    for k in range(num_steps):
        losses.append(svi.step())

    # Putting the results into numpy arrays to be outputted.

    with torch.no_grad():
      if type(gpr) == gp.models.VariationalSparseGP:
        mean, cov = gpr(Xtest_use2, full_cov=True)
      else:
        mean, cov = gpr(Xtest_use2, full_cov=False, noiseless=False) 

    mean = mean.detach().numpy()+np.mean(y)
    
    return mean, Xtest_use 
예제 #27
0
def test_end_to_end(model):
    # Test training.
    model = AutoReparam()(model)
    guide = AutoNormal(model)
    svi = SVI(model, guide, Adam({"lr": 1e-9}), Trace_ELBO())
    for step in range(3):
        svi.step()

    # Test prediction.
    predictive = Predictive(model, guide=guide, num_samples=2)
    samples = predictive()
    assert set("abc").issubset(samples.keys())
예제 #28
0
파일: gf_scm.py 프로젝트: xuel12/ode2scm
    def update_noise_svi(self, observed_steady_state, initial_noise):
        def guide(noise):
            noise_terms = list(noise.keys())
            mu_constraints = constraints.interval(-3., 3.)
            sigma_constraints = constraints.interval(.0001, 3)
            mu = {
                k: pyro.param('{}_mu'.format(k),
                              tensor(0.),
                              constraint=mu_constraints)
                for k in noise_terms
            }
            sigma = {
                k: pyro.param('{}_sigma'.format(k),
                              tensor(1.),
                              constraint=sigma_constraints)
                for k in noise_terms
            }
            for noise in noise_terms:
                sample(noise, Normal(mu[noise], sigma[noise]))

        observation_model = condition(self.noisy_model, observed_steady_state)
        pyro.clear_param_store()
        svi = SVI(model=observation_model,
                  guide=guide,
                  optim=SGD({
                      "lr": 0.001,
                      "momentum": 0.1
                  }),
                  loss=Trace_ELBO())

        losses = []
        num_steps = 1000
        samples = defaultdict(list)
        for t in range(num_steps):
            losses.append(svi.step(initial_noise))
            for noise in initial_noise.keys():
                mu = '{}_mu'.format(noise)
                sigma = '{}_sigma'.format(noise)
                samples[mu].append(pyro.param(mu).item())
                samples[sigma].append(pyro.param(sigma).item())
        means = {k: statistics.mean(v) for k, v in samples.items()}
        updated_noise = {
            'N_SOS': Normal(means['N_SOS_mu'], means['N_SOS_sigma']),
            'N_Ras': Normal(means['N_Ras_mu'], means['N_Ras_sigma']),
            'N_PI3K': Normal(means['N_PI3K_mu'], means['N_PI3K_sigma']),
            'N_AKT': Normal(means['N_AKT_mu'], means['N_AKT_sigma']),
            'N_Raf': Normal(means['N_Raf_mu'], means['N_Raf_sigma']),
            'N_Mek': Normal(means['N_Mek_mu'], means['N_Mek_sigma']),
            'N_Erk': Normal(means['N_Erk_mu'], means['N_Erk_sigma'])
        }

        return updated_noise, losses
예제 #29
0
def get_pyro_model(return_all=True):
    nn_model = NN_Model(input_size=100, hidden_size=400, output_size=1)
    model = model_fn(nn_model)
    guide = guide_fn(nn_model)
    AdamArgs = { 'lr': 3e-3 }
    optimizer = torch.optim.Adam
    scheduler = pyro.optim.ExponentialLR({'optimizer': optimizer, 'optim_args': AdamArgs, 'gamma': 0.99995 })
    svi = SVI(model, guide, scheduler, loss=Trace_ELBO(), num_samples=1000)
    
    if return_all:
        return svi, model, guide
    else:
        return svi
예제 #30
0
    def run_inference(self, X, y):
        N = X.shape[0]

        pyro.clear_param_store()
        optim = Adam({"lr": 0.002})
        svi = SVI(self.model, self.guide, optim, loss=Trace_ELBO())
        for j in range(1000):
            epoch_loss = svi.step(X, y)
            #set_trace()
            if j % 100 == 0:
                print("[{}] Loss: {}".format(j, epoch_loss / float(N)))
        for name in pyro.get_param_store().get_all_param_names():
            print("[{}]: {}".format(name, pyro.param(name).data.numpy()))