is_observed=True)
        test_images = EmpiricalVariable(
            dataset["data"][ind[dataset_size:], :].astype("float32"),
            indices=test_indices,
            name="x_test",
            is_observed=True)
        test_labels = EmpiricalVariable(
            dataset["target"][ind[dataset_size:]].astype("int32"),
            indices=test_indices,
            name="labels",
            is_observed=True)
        test_model = ProbabilisticModel([test_images, test_labels])

        for model_index in range(num_particles):
            s = 0
            model.set_posterior_model(
                inference_method.sampler_model[model_index])
            scores_0 = []
            test_image_list = []
            test_label_list = []
            for _ in range(num_images):
                test_sample = test_model._get_sample(1)
                test_image, test_label = test_sample[test_images], test_sample[
                    test_labels]
                test_image_list.append(test_image)
                test_label_list.append(test_label)

            for test_image, test_label in zip(test_image_list,
                                              test_label_list):
                model_output = np.reshape(np.mean(model._get_posterior_sample(
                    30, input_values={x: test_image})[k].detach().numpy(),
                                                  axis=0),
Example #2
0
Qb2 = NormalVariable(np.zeros((number_output_classes, 1)),
                     0.2 * np.ones((number_output_classes, 1)),
                     "b2",
                     learnable=True)
Qweights1 = NormalVariable(np.zeros((number_hidden_units, number_pixels)),
                           0.2 * np.ones((number_hidden_units, number_pixels)),
                           "weights1",
                           learnable=True)
Qweights2 = NormalVariable(np.zeros(
    (number_output_classes, number_hidden_units)),
                           0.2 * np.ones(
                               (number_output_classes, number_hidden_units)),
                           "weights2",
                           learnable=True)
variational_model = ProbabilisticModel([Qb1, Qb2, Qweights1, Qweights2])
model.set_posterior_model(variational_model)

# Inference
inference.perform_inference(model,
                            number_iterations=2000,
                            number_samples=50,
                            optimizer='Adam',
                            lr=0.005)  #0.05

# Test accuracy
num_images = 500
test_size = len(test)
test_indices = RandomIndices(dataset_size=test_size,
                             batch_size=1,
                             name="test_indices",
                             is_observed=True)
Example #3
0
# Initialize encoder and decoders
encoder = BF.BrancherFunction(EncoderArchitecture(image_size=image_size, latent_size=latent_size))
decoder = BF.BrancherFunction(DecoderArchitecture(latent_size=latent_size, image_size=image_size))

# Generative model
z = NormalVariable(np.zeros((latent_size,)), np.ones((latent_size,)), name="z")
decoder_output = decoder(z)
x = NormalVariable(decoder_output["mean"], decoder_output["sd"], name="x")
model = ProbabilisticModel([x, z])

# Amortized variational distribution
Qx = EmpiricalVariable(dataset, batch_size=50, name="x", is_observed=True)
encoder_output = encoder(Qx)
Qz = NormalVariable(encoder_output["mean"], encoder_output["sd"], name="z")
model.set_posterior_model(ProbabilisticModel([Qx, Qz]))

# Joint-contrastive inference
inference.perform_inference(model,
                            number_iterations=5000,
                            number_samples=1,
                            optimizer="Adam",
                            lr=0.005)
loss_list = model.diagnostics["loss curve"]

#Plot results
plt.plot(loss_list)
plt.show()

sample = model.get_sample(1)
plt.imshow(np.reshape(sample["x"][0], newshape=(28, 28)))
Example #4
0
            Qh.append(
                NormalVariable(BF.sigmoid(Qhlambda[t]) * new_h +
                               (1 - BF.sigmoid(Qhlambda[t])) * Qh_mean[t],
                               2 * driving_noise,
                               h_names[t],
                               learnable=True))

            Qz.append(
                NormalVariable(BF.sigmoid(Qzlambda[t]) * new_z +
                               (1 - BF.sigmoid(Qzlambda[t])) * Qz_mean[t],
                               2 * driving_noise,
                               z_names[t],
                               learnable=True))

        variational_posterior = ProbabilisticModel(Qx + Qh + Qz)
        AR_model.set_posterior_model(variational_posterior)

        # Inference #
        inference.perform_inference(AR_model,
                                    number_iterations=N_itr_PC,
                                    number_samples=N_smpl,
                                    optimizer=optimizer,
                                    lr=lr)

        loss_list1 = AR_model.diagnostics["loss curve"]

        # ELBO
        #ELBO1.append(float(AR_model.estimate_log_model_evidence(N_ELBO_smpl).detach().numpy()))
        #print("PC {}".format(ELBO1[-1]))

        # MSE
Example #5
0
# Observations
sample = model.get_sample(1)
data = sample.filter(regex="^score").filter(regex="^((?!scale).)*$")
model.observe(data)

# Variational model
Qgroup_means = [
    Normal(0., 4., "group_mean_{}".format(n), learnable=True)
    for n in range(N_groups)
]
Qpeople_means = [
    Normal(0., 0.1, "person_{}".format(m), learnable=True)
    for m, assignment_list in enumerate(assignment_matrix)
]
model.set_posterior_model(ProbabilisticModel(Qpeople_means + Qgroup_means))

# Inference #
N_itr = 300
N_smpl = 50
optimizer = "SGD"
lr = 0.00001
inference.perform_inference(model,
                            number_iterations=N_itr,
                            number_samples=N_smpl,
                            optimizer=optimizer,
                            lr=lr)
loss_list2 = model.diagnostics["loss curve"]

N_ELBO = 1000
ELBO2 = model.estimate_log_model_evidence(1000)
print(sample)

# Print samples from single variable
x_sample = x.get_sample(10)
print(x_sample)

# Print samples conditional on an input
in_sample = model.get_sample(10, input_values={mu: 100.})
print(in_sample)

# # Generate data
data = x_real._get_sample(number_samples=50)

# Observe data
x.observe(data[x_real][:, 0, :])

# Variational model
Qnu = LogNormalVariable(0., 1., "nu", learnable=True)
Qmu = NormalVariable(0., 1., "mu", learnable=True)
model.set_posterior_model(ProbabilisticModel([Qmu, Qnu]))

# Inference
inference.stochastic_variational_inference(model,
                                           number_iterations=100,
                                           number_samples=50,
                                           optimizer=chainer.optimizers.Adam(0.1))
loss_list = model.diagnostics["loss curve"]

# print posterior sample
post_samples = model.get_posterior_sample(10)
print(post_samples)
#samples = model._get_sample(300)
#model.calculate_log_probability(samples)

# Observations
k.observe(labels)

#observed_model = inference.get_observed_model(model)
#observed_samples = observed_model._get_sample(number_samples=1, observed=True)

# Variational Model
Qweights = NormalVariable(np.zeros((1, number_regressors)),
                          np.ones((1, number_regressors)),
                          "weights",
                          learnable=True)
model.set_posterior_model(ProbabilisticModel([Qweights]))

# Inference
inference.perform_inference(model,
                            number_iterations=200,
                            number_samples=100,
                            optimizer='Adam',
                            lr=0.05)
loss_list = model.diagnostics["loss curve"]
plt.plot(loss_list)
plt.show()

# Statistics
posterior_samples = model._get_posterior_sample(50)
weights_posterior_samples = posterior_samples[weights].cpu().detach().numpy()
Example #8
0
    encoder_output1 = DeterministicVariable(encoder1(Qx),
                                            name="encoder_output1")
    Qz3 = NormalVariable(encoder_output1["mean"],
                         encoder_output1["sd"],
                         name="z3")
    encoder_output2 = DeterministicVariable(encoder2(encoder_output1["mean"]),
                                            name="encoder_output2")
    Qz2 = NormalVariable(encoder_output2["mean"],
                         encoder_output2["sd"],
                         name="z2")
    encoder_output3 = DeterministicVariable(encoder3(encoder_output2["mean"]),
                                            name="encoder_output3")
    Qz1 = NormalVariable(encoder_output3["mean"],
                         encoder_output3["sd"],
                         name="z1")
    model.set_posterior_model(ProbabilisticModel([Qx, Qz1, Qz2, Qz3, Qlabels]))

    # Joint-contrastive inference
    inference.perform_inference(
        model,
        inference_method=ReverseKL(
            gradient_estimator=PathwiseDerivativeEstimator),
        number_iterations=num_itr,
        number_samples=1,
        optimizer="Adam",
        lr=0.0005)
    loss_list1.append(np.array(model.diagnostics["loss curve"]))

    ELBO1 = []
    for n in range(N_ELBO_ITR):
        ELBO1.append(
# Local variational models
plt.plot(loss_list)
plt.show()

# Test accuracy
num_images = 2000
test_size = len(test)
test_indices = RandomIndices(dataset_size=test_size, batch_size=1, name="test_indices", is_observed=True)
test_images = EmpiricalVariable(np.array([np.reshape(image[0], newshape=(number_pixels, 1)) for image in test]).astype("float32"),
                                indices=test_indices, name="x_test", is_observed=True)
test_labels = EmpiricalVariable(np.array([image[1]*np.ones((1, 1))
                                          for image in test]).astype("int32"), indices=test_indices, name="labels", is_observed=True)
test_model = ProbabilisticModel([test_images, test_labels])

s = 0
model.set_posterior_model(variational_samplers[0])
scores_0 = []

test_image_list = []
test_label_list = []
for _ in range(num_images):
    test_sample = test_model._get_sample(1)
    test_image, test_label = test_sample[test_images], test_sample[test_labels]
    test_image_list.append(test_image)
    test_label_list.append(test_label)

for test_image, test_label in zip(test_image_list,test_label_list):
    model_output = np.reshape(np.mean(model._get_posterior_sample(10, input_values={x: test_image})[k].data, axis=0), newshape=(10,))
    output_label = int(np.argmax(model_output))
    scores_0.append(1 if output_label == int(test_label.data) else 0)
    s += 1 if output_label == int(test_label.data) else 0
Example #10
0
def perform_inference(joint_model,
                      number_iterations,
                      number_samples=1,
                      optimizer='Adam',
                      input_values={},
                      inference_method=None,
                      posterior_model=None,
                      sampler_model=None,
                      pretraining_iterations=0,
                      **opt_params):  #TODO: input values
    """
    Summary

    Parameters
    ---------
    """
    if isinstance(joint_model, StochasticProcess):
        joint_model = joint_model.active_submodel
    if isinstance(joint_model, Variable):
        joint_model = ProbabilisticModel([joint_model])
    if not inference_method:
        warnings.warn(
            "The inference method was not specified, using the default reverse KL variational inference"
        )
        inference_method = ReverseKL()
    if not posterior_model and joint_model.posterior_model is not None:
        posterior_model = joint_model.posterior_model
    else:
        posterior_model = inference_method.construct_posterior_model(
            joint_model)
    if not sampler_model:  #TODO: clean up
        if not sampler_model:
            try:
                sampler_model = inference_method.sampler_model
            except AttributeError:
                try:
                    sampler_model = joint_model.posterior_sampler
                except AttributeError:
                    sampler_model = None

    joint_model.update_observed_submodel()

    def append_prob_optimizer(model, optimizer, **opt_params):
        prob_opt = ProbabilisticOptimizer(
            model, optimizer, **opt_params
        )  # TODO: this should be better! handling models with no params
        if prob_opt.optimizer:
            optimizers_list.append(prob_opt)

    optimizers_list = []
    if inference_method.learnable_posterior:
        append_prob_optimizer(posterior_model, optimizer, **opt_params)
    if inference_method.learnable_model:
        append_prob_optimizer(joint_model, optimizer, **opt_params)
    if inference_method.learnable_sampler:
        append_prob_optimizer(sampler_model, optimizer, **opt_params)

    loss_list = []

    inference_method.check_model_compatibility(joint_model, posterior_model,
                                               sampler_model)

    for iteration in tqdm(range(number_iterations)):
        loss = inference_method.compute_loss(joint_model, posterior_model,
                                             sampler_model, number_samples)

        if torch.isfinite(loss.detach()).all().item():
            [opt.zero_grad() for opt in optimizers_list]
            loss.backward()
            inference_method.correct_gradient(joint_model, posterior_model,
                                              sampler_model, number_samples)
            optimizers_list[0].update()
            if iteration > pretraining_iterations:
                [opt.update() for opt in optimizers_list[1:]]
            loss_list.append(loss.cpu().detach().numpy().flatten())
        else:
            warnings.warn("Numerical error, skipping sample")
        loss_list.append(loss.cpu().detach().numpy())
    joint_model.diagnostics.update({"loss curve": np.array(loss_list)})

    inference_method.post_process(
        joint_model)  #TODO: this could be implemented with a with block

    if joint_model.posterior_model is None and inference_method.learnable_posterior:
        joint_model.set_posterior_model(posterior_model)
# Observe
observable_data = sample[[x.name
                          for x in x_series] + [y.name for y in y_series]]
dynamic_causal_model.observe(observable_data)

# Variational model
Qa = LogNormalVariable(0., 0.5, name="a", learnable=True)
Qb = LogNormalVariable(0., 0.5, name="b", learnable=True)
Qc = NormalVariable(0., 0.1, name="c", learnable=True)
Qd = NormalVariable(0., 0.1, name="d", learnable=True)
Qe = NormalVariable(0., 5., name="e", learnable=True)
Qxi = LogNormalVariable(0.1, 0.1, name="xi", learnable=True)
Qchi = LogNormalVariable(0.1, 0.1, name="chi", learnable=True)
variational_posterior = ProbabilisticModel([Qa, Qb, Qc, Qd, Qe, Qxi, Qchi])
dynamic_causal_model.set_posterior_model(variational_posterior)

# Inference #
inference.perform_inference(dynamic_causal_model,
                            number_iterations=100,
                            number_samples=5,
                            optimizer='Adam',
                            lr=0.01)
loss_list = dynamic_causal_model.diagnostics["loss curve"]
plt.plot(loss_list)
plt.show()

# Plot posterior
plot_posterior(dynamic_causal_model,
               variables=["a", "b", "c", "d", "e", "xi", "chi"])
plt.show()
plt.show()

# Test accuracy
test_size = len(ind[dataset_size:])
num_images = test_size*3
test_indices = RandomIndices(dataset_size=test_size, batch_size=1, name="test_indices", is_observed=True)
test_images = EmpiricalVariable(dataset["data"][ind[dataset_size:], :].astype("float32"),
                                indices=test_indices, name="x_test", is_observed=True)
test_labels = EmpiricalVariable(dataset["target"][ind[dataset_size:]].astype("int32"),
                                indices=test_indices, name="labels", is_observed=True)
test_model = ProbabilisticModel([test_images, test_labels])


for model_index in range(num_particles):
    s = 0
    model.set_posterior_model(particles[model_index])
    scores_0 = []
    test_image_list = []
    test_label_list = []
    for _ in range(num_images):
        test_sample = test_model._get_sample(1)
        test_image, test_label = test_sample[test_images], test_sample[test_labels]
        test_image_list.append(test_image)
        test_label_list.append(test_label)

    for test_image, test_label in zip(test_image_list,test_label_list):
        model_output = np.reshape(np.mean(model._get_posterior_sample(80, input_values={x: test_image})[k].detach().numpy(), axis=0), newshape=(number_output_classes,))
        output_label = int(np.argmax(model_output))
        scores_0.append(1 if output_label == int(test_label.detach().numpy()) else 0)
        s += 1 if output_label == int(test_label.detach().numpy()) else 0
    print("Accuracy {}: {} %".format(model_index, 100*s/float(num_images)))
# Forward pass
final_activations = BF.matmul(weights, x)
k = CategoricalVariable(logits=final_activations, name="k")

# Probabilistic model
model = ProbabilisticModel([k])

# Observations
k.observe(labels)

# Variational model
initial_weights = np.random.normal(0., 1.,
                                   (number_output_classes, number_regressors))
model.set_posterior_model(
    ProbabilisticModel(
        [RootVariable(initial_weights, name="weights", learnable=True)]))

# Inference
inference.perform_inference(model,
                            inference_method=MAP(),
                            number_iterations=3000,
                            number_samples=100,
                            optimizer="SGD",
                            lr=0.0025)
loss_list = model.diagnostics["loss curve"]
plt.show()

# Test accuracy
test_size = len(ind[dataset_size:])
num_images = test_size * 3