# Real model
nu_real = 1.
mu_real = -2.
x_real = NormalVariable(mu_real, nu_real, "x_real")

# Normal model
nu = LogNormalVariable(0., 1., "nu")
mu = NormalVariable(0., 10., "mu")
x = NormalVariable(mu, nu, "x")
model = ProbabilisticModel([x])

print(model)

# Print samples
sample = model.get_sample(10)
print(sample)

# Print samples from single variable
x_sample = x.get_sample(10)
print(x_sample)

# Print samples conditional on an input
in_sample = model.get_sample(10, input_values={mu: 100.})
print(in_sample)

# # Generate data
data = x_real._get_sample(number_samples=50)

# Observe data
x.observe(data[x_real][:, 0, :])
示例#2
0
model = ProbabilisticModel([x, z])

# Amortized variational distribution
Qx = EmpiricalVariable(dataset, batch_size=50, name="x", is_observed=True)
encoder_output = encoder(Qx)
Qz = NormalVariable(encoder_output["mean"], encoder_output["sd"], name="z")
model.set_posterior_model(ProbabilisticModel([Qx, Qz]))

# Joint-contrastive inference
inference.perform_inference(model,
                            number_iterations=5000,
                            number_samples=1,
                            optimizer="Adam",
                            lr=0.005)
loss_list = model.diagnostics["loss curve"]

#Plot results
plt.plot(loss_list)
plt.show()

sample = model.get_sample(1)
plt.imshow(np.reshape(sample["x"][0], newshape=(28, 28)))
plt.show()

sample = model.get_sample(1)
plt.imshow(np.reshape(sample["x"][0], newshape=(28, 28)))
plt.show()

sample = model.get_sample(1)
plt.imshow(np.reshape(sample["x"][0], newshape=(28, 28)))
plt.show()
示例#3
0
import numpy as np

from brancher.variables import ProbabilisticModel
from brancher.standard_variables import BetaVariable, BinomialVariable
from brancher import inference
from brancher.visualizations import plot_posterior

# betaNormal/Binomial model
number_tosses = 1
p = BetaVariable(1., 1., "p")
k = BinomialVariable(number_tosses, probs=p, name="k")
model = ProbabilisticModel([k, p])

# Generate data
p_real = 0.8
data = model.get_sample(number_samples=30, input_values={p: p_real})

# Observe data
k.observe(data)

# Inference
inference.perform_inference(model,
                            number_iterations=1000,
                            number_samples=500,
                            lr=0.1,
                            optimizer='SGD')
loss_list = model.diagnostics["loss curve"]

#Plot loss
plt.plot(loss_list)
plt.title("Loss (negative ELBO)")
示例#4
0
# Joint-contrastive inference
inference.perform_inference(
    model,
    inference_method=ReverseKL(gradient_estimator=PathwiseDerivativeEstimator),
    number_iterations=1000,
    number_samples=1,
    optimizer="Adam",
    lr=0.001)
loss_list = model.diagnostics["loss curve"]

#Plot results
plt.plot(loss_list)
plt.show()

sigmoid = lambda x: 1 / (np.exp(-x) + 1)
image_grid = []
z_range = np.linspace(-3, 3, 30)
for z1 in z_range:
    image_row = []
    for z2 in z_range:
        sample = model.get_sample(1, input_values={z: np.array([z1, z2])})
        image = sigmoid(
            np.reshape(sample["decoder_output"].values[0]["mean"],
                       newshape=(28, 28)))
        image_row += [image]
    image_grid += [np.concatenate(image_row, axis=0)]
image_grid = np.concatenate(image_grid, axis=1)
plt.imshow(image_grid)
plt.colorbar()
plt.show()
                       "muy_{}".format(t + 1),
                       is_observed=True))

    mx.append(new_mx)
    my.append(new_my)
    sx.append(new_sx)
    sy.append(new_sy)

model = ProbabilisticModel(w + r + mux + muy)
variational_filter = ProbabilisticModel(Qmux + Qmuy)

# Variational model

print(model.get_average_reward(10))

# Train control
num_itr = 3000
inference.perform_inference(model,
                            posterior_model=variational_filter,
                            number_iterations=num_itr,
                            number_samples=9,
                            optimizer="Adam",
                            lr=0.01)
reward_list = model.diagnostics[
    "reward curve"]  #TODO: Very important. Solve the trained determinant problem. (it should be possible to specify which parameter is trainable)

print(model.get_sample(20)[["r"]])

plt.plot(reward_list)
plt.show()
print(model.get_average_reward(15))
示例#6
0
    Qlambda31 = RootVariable(l0 * np.ones((latent_size3, )),
                             'lambda31',
                             learnable=True)
    Qlambda32 = RootVariable(l0 * np.ones((latent_size3, )),
                             'lambda32',
                             learnable=True)
    Qz3 = NormalVariable(
        BF.sigmoid(Qlambda31) * BF.relu(Qdecoder_output2["mean"]) +
        (1 - BF.sigmoid(Qlambda31)) * encoder_output1["mean"],
        BF.sigmoid(Qlambda32) * z3sd +
        (1 - BF.sigmoid(Qlambda32)) * encoder_output1["sd"],
        name="z3")

    model.set_posterior_model(ProbabilisticModel([Qx, Qz1, Qz2, Qz3, Qlabels]))

    model.get_sample(1)
    model.posterior_model.get_sample(1)

    # Joint-contrastive inference
    inference.perform_inference(
        model,
        inference_method=ReverseKL(
            gradient_estimator=PathwiseDerivativeEstimator),
        number_iterations=num_itr,
        number_samples=1,
        optimizer="Adam",
        lr=0.002)
    loss_list2.append(np.array(model.diagnostics["loss curve"]))
    #
    ELBO2 = []
    for n in range(N_ELBO_ITR):
示例#7
0
# Joint-contrastive inference
inference.perform_inference(
    model,
    inference_method=ReverseKL(gradient_estimator=Taylor2Estimator),
    number_iterations=2000,
    number_samples=1,
    optimizer="SGD",
    lr=0.001)
loss_list = model.diagnostics["loss curve"]

#Plot results
plt.plot(loss_list)
plt.show()

sigmoid = lambda x: 1 / (np.exp(-x) + 1)
image_grid = []
num_images = 30
z_values = [
    np.random.binomial(1, 0.5 * np.ones(latent_size, ))
    for _ in range(num_images)
]
for z_val in z_values:
    sample = model.get_sample(1, input_values={z: z_val})
    image = sigmoid(
        np.reshape(sample["decoder_output"].values[0]["mean"],
                   newshape=(28, 28)))
    image_grid += [image]
image_grid = np.concatenate(image_grid, axis=1)
plt.imshow(image_grid)
plt.colorbar()
plt.show()
from brancher.standard_variables import NormalVariable, LogNormalVariable
from brancher import inference

# Normal model
nu = LogNormalVariable(0., 1., "nu")
mu = NormalVariable(0., 10., "mu")
x = NormalVariable(mu, nu, "x")
model = ProbabilisticModel(
    [x])  # to fix plot_posterior (flatten automatically?)

# # Generate data
nu_real = 1.
mu_real = -2.
data = model.get_sample(number_samples=20,
                        input_values={
                            mu: mu_real,
                            nu: nu_real
                        })

# Observe data
x.observe(data)

# Variational model
Qnu = LogNormalVariable(0., 1., "nu", learnable=True)
Qmu = NormalVariable(0., 1., "mu", learnable=True)
model.set_posterior_model(ProbabilisticModel([Qmu, Qnu]))

# Inference
inference.perform_inference(model,
                            number_iterations=300,
                            number_samples=100,
示例#9
0
# Probabilistic model
T = 2.
N = 100
dt = T/float(N)
time_range = np.linspace(0., T, N)
a = BetaVariable(1., 1., name="a")
b = BetaVariable(1., 1., name="b")
c = NormalVariable(0., 0.1, name="c")
d = NormalVariable(0., 0.1, name="d")
xi = LogNormalVariable(0.1, 0.1, name="xi")
chi = LogNormalVariable(0.1, 0.1, name="chi")
x_series = [NormalVariable(0., 1., name="x_0")]
y_series = [NormalVariable(0., 1., name="y_0")]
for n, t in enumerate(time_range):
    x_new_mean = (1-dt*a)*x_series[-1] + dt*c*y_series[-1]
    y_new_mean = (1-dt*b)*y_series[-1] + dt*d*x_series[-1]
    x_series += [NormalVariable(x_new_mean, np.sqrt(dt)*xi, name="x_{}".format(n+1))]
    y_series += [NormalVariable(x_new_mean, np.sqrt(dt)*chi, name="y_{}".format(n+1))]
dynamic_causal_model = ProbabilisticModel([x_series[-1], y_series[-1]])

# Run dynamics
sample = dynamic_causal_model.get_sample(number_samples=3)

# Observe
observable_data = sample[[x.name for x in x_series] + [y.name for y in y_series]]
dynamic_causal_model.observe(observable_data)

# Variational model

    x_series += [
        NormalVariable(x_new_mean, np.sqrt(dt) * xi, name="x_{}".format(n + 1))
    ]
    y_series += [
        NormalVariable(y_new_mean,
                       np.sqrt(dt) * chi,
                       name="y_{}".format(n + 1))
    ]
dynamic_causal_model = ProbabilisticModel([x_series[-1], y_series[-1]])

# Run dynamics
sample = dynamic_causal_model.get_sample(number_samples=2,
                                         input_values={
                                             a: 1.,
                                             b: 1.,
                                             c: 0.,
                                             d: 3.,
                                             e: 10.,
                                             xi: 2.,
                                             chi: 2.
                                         })

# Plot sample
time_series = sample[[x.name for x in x_series]].transpose().plot()
plt.show()

# Observe
observable_data = sample[[x.name
                          for x in x_series] + [y.name for y in y_series]]
dynamic_causal_model.observe(observable_data)

# Variational model
示例#11
0
#
# model = ProbabilisticModel([k])
#
# perform_inference(model,
#                   inference_method=MaximumLikelihood(),
#                   number_iterations=100,
#                   optimizer="Adam",
#                   lr=0.001)
# loss_list = model.diagnostics["loss curve"]
# plt.plot(loss_list)
# print(loss_list[-1])
# plt.show()

N = 500

test_size = test.test_data.numpy().shape[0]
test_images = np.reshape(test.test_data.numpy(),
                         newshape=(test_size, image_size * image_size))

pred_labels = np.argmax(model.get_sample(1,
                                         input_values={x: test_images[:N, :]
                                                       })["k"][0],
                        axis=1)
true_labels = test.test_labels.numpy()[:N]

s = 0
for p_l, l in zip(pred_labels, true_labels):
    if p_l == l:
        s += 1
print("Accuracy: {}".format(s / float(N)))
示例#12
0
              Wk2,
              stride=2,
              padding=0), (2, 3)),
                          name="z")
Wl = NormalVariable(loc=np.zeros((num_classes, out_channels2)),
                    scale=np.ones((num_classes, out_channels2)),
                    name="Wl")
b = NormalVariable(loc=np.zeros((num_classes, 1)),
                   scale=np.ones((num_classes, 1)),
                   name="b")
reshaped_z = BF.reshape(z, shape=(out_channels2, 1))
k = CategoricalVariable(logits=BF.linear(reshaped_z, Wl, b), name="k")

# Probabilistic model
model = ProbabilisticModel([k])
samples = model.get_sample(10)

# Observations
k.observe(labels)

# Variational model
num_particles = 4  #10
wk1_locations = [
    np.random.normal(0., 1., (out_channels1, in_channels, 3, 3))
    for _ in range(num_particles)
]
wk2_locations = [
    np.random.normal(0., 1., (out_channels2, out_channels1, 3, 3))
    for _ in range(num_particles)
]
wl_locations = [
nu = LogNormalVariable(-1, 0.01, name="nu")
receptive_field = BF.exp((-(x - mu_x)**2 - (y - mu_y)**2) /
                         (2. * v**2)) / (2. * BF.sqrt(np.pi * v**2))
mean_response = BF.sum(BF.sum(receptive_field * experimental_input,
                              dim=1,
                              keepdim=True),
                       dim=2,
                       keepdim=True)  #TODO; not very intuitive
response = NormalVariable(mean_response, nu, name="response")
model = ProbabilisticModel([response, experimental_input])

# Generate data and observe the model
sample = model.get_sample(15,
                          input_values={
                              mu_x: 1.,
                              mu_y: 2.,
                              v: 0.3,
                              nu: 0.1
                          })[["x", "y", "w1", "w2", "b", "response"]]
model.observe(sample)

# Variational model
Qmu_x = NormalVariable(0., 1., name="mu_x", learnable=True)
Qmu_y = NormalVariable(0., 1., name="mu_y", learnable=True)
Qv = LogNormalVariable(0., 0.1, name="v", learnable=True)
Qnu = LogNormalVariable(-1, 0.01, name="nu", learnable=True)
variational_posterior = ProbabilisticModel([Qmu_x, Qmu_y, Qv, Qnu])
model.set_posterior_model(variational_posterior)

# Inference #
inference.perform_inference(model,