Exemple #1
0
        # Inference #
        inference.perform_inference(AR_model,
                                    number_iterations=N_itr_PC,
                                    number_samples=N_smpl,
                                    optimizer=optimizer,
                                    lr=lr)

        loss_list1 = AR_model.diagnostics["loss curve"]

        # ELBO
        #ELBO1.append(float(AR_model.estimate_log_model_evidence(N_ELBO_smpl).detach().numpy()))
        #print("PC {}".format(ELBO1[-1]))

        # MSE
        posterior_samples = AR_model._get_posterior_sample(2000)

        x_mean1 = []
        lower_bound1 = []
        upper_bound1 = []
        for xt in x:
            x_posterior_samples = posterior_samples[xt].detach().numpy(
            ).flatten()
            mean = np.mean(x_posterior_samples)
            sd = np.sqrt(np.var(x_posterior_samples))
            x_mean1.append(mean)
            lower_bound1.append(mean - sd)
            upper_bound1.append(mean + sd)
        MSE = np.mean((np.array(ground_truth) - np.array(x_mean1))**2)
        var = 0.5 * (np.array(upper_bound1) - np.array(lower_bound1))**2
        Lk = np.mean(0.5 *
Exemple #2
0
                             is_observed=True)
test_images = EmpiricalVariable(np.reshape(test.test_data.numpy(),
                                           newshape=(test.test_data.shape[0],
                                                     number_pixels, 1)),
                                indices=test_indices,
                                name="x_test",
                                is_observed=True)
test_labels = EmpiricalVariable(test.test_labels.numpy(),
                                indices=test_indices,
                                name="labels",
                                is_observed=True)
test_model = ProbabilisticModel([test_images, test_labels])

s = 0
for _ in range(num_images):
    test_sample = test_model._get_sample(1)
    test_image, test_label = test_sample[test_images], test_sample[test_labels]
    model_output = np.reshape(np.mean(model._get_posterior_sample(
        10, input_values={x: test_image})[k].cpu().detach().numpy(),
                                      axis=0),
                              newshape=(10, ))
    s += 1 if int(np.argmax(model_output)) == int(
        test_label.cpu().detach().numpy()) else 0
print("Accuracy: {} %".format(100 * s / float(num_images)))

#weight_map = variational_model._get_sample(1)[Qweights1].data[0, 0, 0, :]
#plt.imshow(np.reshape(weight_map, (28, 28)))
#plt.show()

plt.plot(model.diagnostics["loss curve"])
plt.show()
    model.set_posterior_model(variational_model)

    # Inference

    from brancher import inference

    # Inference #
    inference.perform_inference(model,
                                number_iterations=n_itr,
                                number_samples=3,
                                optimizer="Adam",
                                lr=0.01)

    loss_list1.append(model.diagnostics["loss curve"])

    psamples = model._get_posterior_sample(1)

    images1.append([
        np.reshape(psamples[img[t]].detach().numpy(),
                   (3, image_size, image_size)) for t in range(T)
    ])
    # plt.show()

    #### 2 Mean field ####

    Qz = [
        NormalVariable(np.zeros((h_size, 1)),
                       np.ones((h_size, 1)),
                       "z0",
                       learnable=True)
    ]
Exemple #4
0
test_indices = RandomIndices(dataset_size=test_size,
                             batch_size=1,
                             name="test_indices",
                             is_observed=True)
test_images = EmpiricalVariable(np.reshape(test.test_data.numpy(),
                                           newshape=(test.test_data.shape[0],
                                                     number_pixels, 1)),
                                indices=test_indices,
                                name="x_test",
                                is_observed=True)
test_labels = EmpiricalVariable(test.test_labels.numpy(),
                                indices=test_indices,
                                name="labels",
                                is_observed=True)
test_model = ProbabilisticModel([test_images, test_labels])

s = 0
for _ in range(num_images):
    test_sample = test_model._get_sample(1)
    test_image, test_label = test_sample[test_images], test_sample[test_labels]
    model_output = model._get_posterior_sample(
        10, input_values={x: test_image})[k].cpu().detach().numpy()
    output_probs = np.reshape(np.mean(model_output, axis=0), newshape=(10, ))
    s += 1 if int(np.argmax(model_output)) == int(
        test_label.cpu().detach().numpy()) else 0
print("Accuracy: {} %".format(100 * s / float(num_images)))

#weight_map = variational_model._get_sample(1)[Qweights1].detach().numpy()[0, 0, 0, :]
#plt.imshow(np.reshape(weight_map, (28, 28)))
#plt.show()
                          "weights",
                          learnable=True)
model.set_posterior_model(ProbabilisticModel([Qweights]))

# Inference
inference.perform_inference(model,
                            number_iterations=200,
                            number_samples=100,
                            optimizer='Adam',
                            lr=0.05)
loss_list = model.diagnostics["loss curve"]
plt.plot(loss_list)
plt.show()

# Statistics
posterior_samples = model._get_posterior_sample(50)
weights_posterior_samples = posterior_samples[weights].cpu().detach().numpy()

# Two subplots, unpack the axes array immediately
f, (ax1, ax2) = plt.subplots(1, 2)
ax1.plot(np.array(loss_list))
ax1.set_title("Convergence")
ax1.set_xlabel("Iteration")
x_range = np.linspace(-2, 2, 200)
ax2.scatter(input_variable[:, 0, 0],
            input_variable[:, 1, 0],
            c=output_labels.flatten())
for w in weights_posterior_samples:
    coeff = -float(w[0, 0, 0]) / float(w[0, 0, 1])
    plt.plot(x_range, coeff * x_range, alpha=0.3)
ax2.set_xlim(-2, 2)
Exemple #6
0
# Observe data
data = np.reshape(ground_samples[y].cpu().detach().numpy(), newshape=(n, 1, 1))
y.observe(data)

# Inference
inference.perform_inference(model,
                            number_iterations=5000,
                            number_samples=100,
                            optimizer='Adam')

# Plot
#plt.plot(model.diagnostics["loss curve"])
#plt.show()

n_post_samples = 1000
post_samples = model._get_posterior_sample(n_post_samples)
s_x1 = np.reshape(x1.value.cpu().detach().numpy(), newshape=(n, ))
s_x2 = np.reshape(x2.value.cpu().detach().numpy(), newshape=(n, ))
post_mean = 0.
for k in range(n_post_samples):
    s_b = float(post_samples[b].cpu().detach().numpy()[k, :])
    s_w1 = float(post_samples[w1].cpu().detach().numpy()[k, :])
    s_w2 = float(post_samples[w2].cpu().detach().numpy()[k, :])
    s_w12 = float(post_samples[w12].cpu().detach().numpy()[k, :])
    sample_function = s_b + s_w1 * s_x1 + s_w2 * s_x2 + s_w12 * s_x1 * s_x2
    post_mean += sample_function
    plt.plot(np.reshape(x_range, newshape=(n, )),
             sample_function,
             c="b",
             alpha=0.05)
post_mean /= float(n_post_samples)
test_model = ProbabilisticModel([test_images, test_labels])

s = 0
model.set_posterior_model(variational_samplers[0])
scores_0 = []

test_image_list = []
test_label_list = []
for _ in range(num_images):
    test_sample = test_model._get_sample(1)
    test_image, test_label = test_sample[test_images], test_sample[test_labels]
    test_image_list.append(test_image)
    test_label_list.append(test_label)

for test_image, test_label in zip(test_image_list,test_label_list):
    model_output = np.reshape(np.mean(model._get_posterior_sample(10, input_values={x: test_image})[k].data, axis=0), newshape=(10,))
    output_label = int(np.argmax(model_output))
    scores_0.append(1 if output_label == int(test_label.data) else 0)
    s += 1 if output_label == int(test_label.data) else 0
print("Accuracy 0: {} %".format(100*s/float(num_images)))

s = 0
model.set_posterior_model(variational_samplers[1])
scores_1 = []
for test_image, test_label in zip(test_image_list,test_label_list):
    model_output = np.reshape(np.mean(model._get_posterior_sample(10, input_values={x: test_image})[k].data, axis=0), newshape=(10,))
    output_label = int(np.argmax(model_output))
    scores_1.append(1 if output_label == int(test_label.data) else 0)
    s += 1 if output_label == int(test_label.data) else 0
print("Accuracy 1: {} %".format(100*s/float(num_images)))
Exemple #8
0

for model_index in range(num_particles):
    s = 0
    model.set_posterior_model(inference_method.sampler_model[model_index])
    scores_0 = []
    test_image_list = []
    test_label_list = []
    for _ in range(num_images):
        test_sample = test_model._get_sample(1)
        test_image, test_label = test_sample[test_images], test_sample[test_labels]
        test_image_list.append(test_image)
        test_label_list.append(test_label)

    for test_image, test_label in zip(test_image_list,test_label_list):
        model_output = np.reshape(np.mean(model._get_posterior_sample(30, input_values={x: test_image})[k].detach().numpy(), axis=0), newshape=(number_output_classes,))
        output_label = int(np.argmax(model_output))
        scores_0.append(1 if output_label == int(test_label.detach().numpy()) else 0)
        s += 1 if output_label == int(test_label.detach().numpy()) else 0
    print("Accuracy {}: {} %, weight: {}".format(model_index, 100*s/float(num_images), inference_method.weights[model_index]))

s = 0
scores_ne = []
for test_image, test_label in zip(test_image_list,test_label_list):
    model_output_list = []
    for model_index in range(num_particles):
        model.set_posterior_model(inference_method.sampler_model[model_index])
        model_output_list.append(np.reshape(np.mean(model._get_posterior_sample(30, input_values={x: test_image})[k].detach().numpy(), axis=0), newshape=(number_output_classes,)))

    model_output = sum([output*w for output, w in zip(model_output_list, inference_method.weights)])
Exemple #9
0
# Observe data
k.observe(data[k_real][:, 0, :])

# Variational distribution
Qp = BetaVariable(1., 1., "p", learnable=True)
model.set_posterior_model(ProbabilisticModel([Qp]))

# Inference
inference.perform_inference(model,
                            number_iterations=3000,
                            number_samples=100,
                            lr=0.01,
                            optimizer='Adam')
loss_list = model.diagnostics["loss curve"]

# Statistics
p_posterior_samples = model._get_posterior_sample(
    2000)[p].cpu().detach().numpy().flatten()

# Two subplots, unpack the axes array immediately
f, (ax1, ax2) = plt.subplots(1, 2)
ax1.plot(np.array(loss_list))
ax1.set_title("Convergence")
ax1.set_xlabel("Iteration")
ax2.hist(p_posterior_samples, 25)
ax2.axvline(x=p_real, lw=2, c="r")
ax2.set_title("Posterior samples (b)")
ax2.set_xlim(0, 1)
plt.show()
                                      covariance_matrix=np.identity(number_regressors),
                                      name="weights", learnable=True)
variational_model = ProbabilisticModel([Qweights])
model.set_posterior_model(variational_model)

# Inference
inference.perform_inference(model,
                            number_iterations=3000,
                            number_samples=50,
                            optimizer='Adam',
                            lr=0.001)

loss_list = model.diagnostics["loss curve"]

# Statistics
posterior_samples = model._get_posterior_sample(1000)
weights_posterior_samples = posterior_samples[weights].detach().numpy()

# Two subplots, unpack the axes array immediately
f, (ax1, ax2, ax3) = plt.subplots(1, 3)
ax1.plot(np.array(loss_list))
ax1.set_title("Convergence")
ax1.set_xlabel("Iteration")
ax2.scatter(input_variable[:, 0, 0], input_variable[:, 1, 0], c=labels.flatten())
for w in weights_posterior_samples:
    coeff = -float(w[0, 0, 0])/float(w[0, 0, 1])
    x_range = np.linspace(-2, 2, 200)
    ax2.plot(x_range, coeff*x_range, alpha=0.1)
ax2.set_xlim(-2, 2)
ax2.set_ylim(-2, 2)
ax3.scatter(weights_posterior_samples[:, 0, 0, 0], weights_posterior_samples[:, 0, 0, 1], alpha=0.5)
Exemple #11
0
data = k_real._get_sample(number_samples=50)

# Observe data
k.observe(data[k_real][:, 0, :])

# Variational distribution
Qp = LogitNormalVariable(0.2, 2., "p", learnable=True)
model.set_posterior_model(ProbabilisticModel([Qp]))

# Inference
inference.stochastic_variational_inference(
    model,
    number_iterations=150,
    number_samples=100,
    optimizer=chainer.optimizers.Adam(0.05))
loss_list = model.diagnostics["loss curve"]

# Statistics
p_posterior_samples = model._get_posterior_sample(2000)[p].data.flatten()

# Two subplots, unpack the axes array immediately
f, (ax1, ax2) = plt.subplots(1, 2)
ax1.plot(np.array(loss_list))
ax1.set_title("Convergence")
ax1.set_xlabel("Iteration")
ax2.hist(p_posterior_samples, 25)
ax2.axvline(x=p_real, lw=2, c="r")
ax2.set_title("Posterior samples (b)")
ax2.set_xlim(0, 1)
plt.show()
                             is_observed=True)
test_images = EmpiricalVariable(np.array([
    np.reshape(image[0], newshape=(number_pixels, 1)) for image in test
]).astype("float32"),
                                indices=test_indices,
                                name="x_test",
                                is_observed=True)
test_labels = EmpiricalVariable(np.array(
    [image[1] * np.ones((1, 1)) for image in test]).astype("int32"),
                                indices=test_indices,
                                name="labels",
                                is_observed=True)
test_model = ProbabilisticModel([test_images, test_labels])

s = 0
for _ in range(num_images):
    test_sample = test_model._get_sample(1)
    test_image, test_label = test_sample[test_images], test_sample[test_labels]
    model_output = np.reshape(np.mean(model._get_posterior_sample(
        10, input_values={x: test_image})[k].data,
                                      axis=0),
                              newshape=(10, ))
    s += 1 if int(np.argmax(model_output)) == int(test_label.data) else 0
print("Accuracy: {} %".format(100 * s / float(num_images)))

#weight_map = variational_model._get_sample(1)[Qweights1].data[0, 0, 0, :]
#plt.imshow(np.reshape(weight_map, (28, 28)))
#plt.show()

plt.plot(model.diagnostics["loss curve"])
plt.show()
Exemple #13
0
lr = 0.0005
inference.perform_inference(AR_model,
                            number_iterations=N_itr,
                            number_samples=N_smpl,
                            optimizer=optimizer,
                            lr=lr)

loss_list1 = AR_model.diagnostics["loss curve"]

# ELBO
N_ELBO_smpl = 1000
ELBO = AR_model.estimate_log_model_evidence(N_ELBO_smpl)
print("The ELBO is {}".format(ELBO))

# Statistics
posterior_samples1 = AR_model._get_posterior_sample(2000)
omega_posterior_samples1 = posterior_samples1[omega].detach().numpy().flatten()
omega_mean1 = np.mean(omega_posterior_samples1)
omega_sd1 = np.sqrt(np.var(omega_posterior_samples1))

x_mean1 = []
lower_bound1 = []
upper_bound1 = []
for xt in x:
    x_posterior_samples1 = posterior_samples1[xt].detach().numpy().flatten()
    mean1 = np.mean(x_posterior_samples1)
    sd1 = np.sqrt(np.var(x_posterior_samples1))
    x_mean1.append(mean1)
    lower_bound1.append(mean1 - sd1)
    upper_bound1.append(mean1 + sd1)
print("The estimated coefficient is: {} +- {}".format(omega_mean1,