import brancher.functions as BF
from brancher.functions import BrancherFunction as bf

# Parameters
number_regressors = 1
number_observations = 15
real_weights = np.random.normal(0, 1, (number_regressors, 1))
real_sigma = 0.6
input_variable = np.random.normal(0, 1,
                                  (number_observations, number_regressors))

# ProbabilisticModel
regression_link = bf(L.Linear(number_regressors, 1))
x = DeterministicVariable(input_variable, "x", is_observed=True)
sigma = DeterministicVariable(0.1, "sigma", learnable=True)
y = NormalVariable(regression_link(x), BF.exp(sigma), "y")

# Observations
data = (np.matmul(x.value.data, real_weights) +
        np.random.normal(0, real_sigma, (number_observations, 1)))
y.observe(data)
print(y)

# Maximal Likelihood
loss_list = maximal_likelihood(y, number_iterations=1000)

a_range = np.linspace(-2, 2, 40)
model_prediction = []
for a in a_range:
    x.value = a
    sigma.value = -20.
minibatch_indices = RandomIndices(dataset_size=dataset_size,
                                  batch_size=minibatch_size,
                                  name="indices",
                                  is_observed=True)
x = EmpiricalVariable(input_variable,
                      indices=minibatch_indices,
                      name="x",
                      is_observed=True)
labels = EmpiricalVariable(output_labels,
                           indices=minibatch_indices,
                           name="labels",
                           is_observed=True)

# Architecture parameters
weights = NormalVariable(
    np.zeros((number_output_classes, number_regressors)), 10 * np.ones(
        (number_output_classes, number_regressors)), "weights")

# Forward pass
final_activations = BF.matmul(weights, x)
k = CategoricalVariable(logits=final_activations, name="k")

# Probabilistic model
model = ProbabilisticModel([k])

# Observations
k.observe(labels)

# Variational model
num_particles = 2  #10
initial_locations = [
Beispiel #3
0
minibatch_indices = RandomIndices(dataset_size=dataset_size,
                                  batch_size=minibatch_size,
                                  name="indices",
                                  is_observed=True)
x = EmpiricalVariable(input_variable,
                      indices=minibatch_indices,
                      name="x",
                      is_observed=True)
labels = EmpiricalVariable(output_labels,
                           indices=minibatch_indices,
                           name="labels",
                           is_observed=True)

# Architecture parameters
number_hidden_units = 20
b1 = NormalVariable(np.zeros((number_hidden_units, 1)), 10 * np.ones(
    (number_hidden_units, 1)), "b1")
b2 = NormalVariable(np.zeros((number_output_classes, 1)), 10 * np.ones(
    (number_output_classes, 1)), "b2")
weights1 = NormalVariable(np.zeros(
    (number_hidden_units, number_pixels)), 10 * np.ones(
        (number_hidden_units, number_pixels)), "weights1")
weights2 = NormalVariable(
    np.zeros((number_output_classes, number_hidden_units)), 10 * np.ones(
        (number_output_classes, number_hidden_units)), "weights2")

# Forward pass
hidden_units = BF.tanh(BF.matmul(weights1, x) + b1)
final_activations = BF.matmul(weights2, hidden_units) + b2
k = CategoricalVariable(softmax_p=final_activations, name="k")

# Probabilistic model
Beispiel #4
0
import chainer.functions as F

import torch

#from brancher.links import brancher_decorator
from brancher.variables import DeterministicVariable, RandomVariable, ProbabilisticModel
from brancher.standard_variables import NormalVariable
from brancher.functions import BrancherFunction
import brancher.functions as BF
#import brancher.links as BL

##
a = DeterministicVariable(data=1.5, name='a', learnable=True)
b = DeterministicVariable(0.3, 'b')
c = DeterministicVariable(0.3, 'c')
d = NormalVariable((a * b + c), c + a**2, 'd')

##
print(a._get_sample(10))

##
e1 = BF.cat(
    (a, b), 2
)  #TODO: to change later, so that user does not have to specify dim explicitly (adjust cat)
e2 = BF.cat((a, c), 2)
f = NormalVariable(e1**2, e2**1, 'f')
g = NormalVariable(BF.relu(f), 1., 'g')

##
print(g._get_sample(10))
Beispiel #5
0
minibatch_indices = RandomIndices(dataset_size=dataset_size,
                                  batch_size=minibatch_size,
                                  name="indices",
                                  is_observed=True)
x = EmpiricalVariable(input_variable,
                      indices=minibatch_indices,
                      name="x",
                      is_observed=True)
labels = EmpiricalVariable(output_labels,
                           indices=minibatch_indices,
                           name="labels",
                           is_observed=True)

# Architecture parameters
weights = NormalVariable(np.zeros(
    (number_output_classes, number_pixels)), 10 * np.ones(
        (number_output_classes, number_pixels)), "weights")

# Forward pass
final_activations = BF.matmul(weights, x)
k = CategoricalVariable(softmax_p=final_activations, name="k")

# Probabilistic model
model = ProbabilisticModel([k])

# Observations
k.observe(labels)

# Variational Model
Qweights = NormalVariable(np.zeros((number_output_classes, number_pixels)),
                          0.1 * np.ones(
import chainer
import chainer.functions as F
import matplotlib.pyplot as plt
import numpy as np

from brancher.distributions import NormalDistribution, LogNormalDistribution
from brancher.variables import DeterministicVariable, RandomVariable, ProbabilisticModel
from brancher.standard_variables import NormalVariable, LogNormalVariable
from brancher import inference
import brancher.functions as BF

# Real model
nu_real = 1.
mu_real = -2.
x_real = NormalVariable(mu_real, nu_real, "x_real")

# Normal model
nu = LogNormalVariable(0., 1., "nu")
mu = NormalVariable(0., 10., "mu")
x = NormalVariable(mu, nu, "x")
model = ProbabilisticModel([x])

print(model)

# Print samples
sample = model.get_sample(10)
print(sample)

# Print samples from single variable
x_sample = x.get_sample(10)
print(x_sample)
Beispiel #7
0
        self.l3 = nn.Linear(hidden_size, image_size) # Latent log sd output
        self.softplus = nn.Softplus()

    def __call__(self, x):
        h = self.relu(self.l1(x))
        output_mean = self.l2(h)
        output_log_sd = self.l3(h)
        return {"mean": output_mean, "sd": self.softplus(output_log_sd) + 0.01}


# Initialize encoder and decoders
encoder = BF.BrancherFunction(EncoderArchitecture(image_size=image_size, latent_size=latent_size))
decoder = BF.BrancherFunction(DecoderArchitecture(latent_size=latent_size, image_size=image_size))

# Generative model
z = NormalVariable(np.zeros((latent_size,)), np.ones((latent_size,)), name="z")
decoder_output = decoder(z)
x = NormalVariable(decoder_output["mean"], decoder_output["sd"], name="x")
model = ProbabilisticModel([x, z])

# Amortized variational distribution
Qx = EmpiricalVariable(dataset, batch_size=50, name="x", is_observed=True)
encoder_output = encoder(Qx)
Qz = NormalVariable(encoder_output["mean"], encoder_output["sd"], name="z")
model.set_posterior_model(ProbabilisticModel([Qx, Qz]))

# Joint-contrastive inference
inference.perform_inference(model,
                            number_iterations=5000,
                            number_samples=1,
                            optimizer="Adam",
Beispiel #8
0
import matplotlib.pyplot as plt
import numpy as np

from brancher.variables import DeterministicVariable, ProbabilisticModel
from brancher.standard_variables import NormalVariable, EmpiricalVariable
from brancher import inference
import brancher.functions as BF

# Data

# Neural architectures
#Encoder
#Decoder

# Generative model
latent_size = (10, )
z = NormalVariable(np.zeros(latent_size), np.ones(latent_size))
decoder_output = decoder(z)
x = NormalVariable(decoder_output["mean"],
                   BF.exp(decoder_output["log_var"]),
                   name="x")
model = ProbabilisticModel([x, z])

# Amortized variational distribution
Qx = EmpiricalVariable(dataset, name="x")
encoder_output = encoder(Qx)
Qz = NormalVariable(decoder_output["mean"],
                    BF.exp(decoder_output["log_var"]),
                    name="z")
variational_model = ProbabilisticModel([Qx, Qz])

def bayesian_update(r, w0, w1, mx, my, sx, sy):
    out = brancher_net(r, w0, w1, mx, my, sx, sy)
    return [out[0], out[1], out[2], out[3]]


# Model
T = 20

sigma = 0.1
mx = [DeterministicVariable(0., "mx_0")]
my = [DeterministicVariable(0., "my_0")]
sx = [DeterministicVariable(0., "sx_0")]
sy = [DeterministicVariable(0., "sy_0")]
mux = [NormalVariable(mx[0].value, sx[0].value, "mux_0")]
muy = [NormalVariable(my[0].value, sy[0].value, "muy_0")]
Qmux = [NormalVariable(mx[0], sx[0], "mux_0")]
Qmuy = [NormalVariable(my[0], sy[0], "muy_0")]
w = []
r = []
for t in range(T):

    # Reward
    w.append(
        DirichletVariable(np.ones((2, 1)),
                          "w_{}".format(t),
                          is_policy=True,
                          learnable=True))
    r.append(
        NormalVariable((w[t][0] * mux[t] + w[t][1] * muy[t]),
        n = np.random.choice(range(N))
        short_y = ts_y[n:n+T]
        short_y = sg.detrend(short_y, type='linear')
        short_y = (short_y - np.mean(short_y))/np.sqrt(np.var(short_y))
        noise = 0.3  # 0.5
        noisy_y = short_y + np.random.normal(0, noise, (T,))

        #plt.plot(noisy_y)
        #plt.show()

        # Probabilistic model #
        transform = lambda x: x + 0.5*x**5
        dt = 0.01
        driving_noise = 0.8 #0.5
        measure_noise = noise
        x0 = NormalVariable(0., 1., 'x0')
        y0 = NormalVariable(x0, measure_noise, 'y0')
        x1 = NormalVariable(0., 1., 'x1')
        y1 = NormalVariable(x1, measure_noise, 'y1')
        b = 50
        f = 9.
        omega = NormalVariable(2 * np.pi * f, 1., "omega")

        x = [x0, x1]
        y = [y0, y1]
        x_names = ["x0", "x1"]
        y_names = ["y0", "y1"]
        y_range = [t for t in range(T) if (t < 15 or t > T - 15)]
        for t in range(2, T):
            x_names.append("x{}".format(t))
            new_mu = (-1 - omega**2*dt**2 + b*dt)*x[t - 2] + (2 - b*dt)*x[t - 1]
Beispiel #11
0
number_output_classes = 3
dataset_size = 50
dataset = datasets.load_iris()
ind = list(range(dataset["target"].shape[0]))
np.random.shuffle(ind)
input_variable = dataset["data"][ind[:dataset_size], :].astype("float32")
output_labels = dataset["target"][ind[:dataset_size]].astype("int32")

# Data sampling model
minibatch_size = dataset_size
minibatch_indices = RandomIndices(dataset_size=dataset_size, batch_size=minibatch_size, name="indices", is_observed=True)
x = EmpiricalVariable(input_variable, indices=minibatch_indices, name="x", is_observed=True)
labels = EmpiricalVariable(output_labels, indices=minibatch_indices, name="labels", is_observed=True)

# Architecture parameters
weights = NormalVariable(np.zeros((number_output_classes, number_regressors)),
                         10 * np.ones((number_output_classes, number_regressors)), "weights")

# Forward pass
final_activations = BF.matmul(weights, x)
k = CategoricalVariable(softmax_p=final_activations, name="k")

# Probabilistic model
model = ProbabilisticModel([k])

# Observations
k.observe(labels)

# Variational model
num_particles = 1 #10
initial_locations = [np.random.normal(0., 1., (number_output_classes, number_regressors))
                     for _ in range(num_particles)]
number_pixels = 28*28
number_output_classes = 10
train, test = chainer.datasets.get_mnist()
#dataset_size = len(train)
dataset_size = 50
input_variable = np.array([np.reshape(image[0], newshape=(number_pixels, 1)) for image in train][0:dataset_size]).astype("float32")
output_labels = np.array([image[1]*np.ones((1, 1)) for image in train][0:dataset_size]).astype("int32")

# Data sampling model
minibatch_size = 50
minibatch_indices = RandomIndices(dataset_size=dataset_size, batch_size=minibatch_size, name="indices", is_observed=True)
x = EmpiricalVariable(input_variable, indices=minibatch_indices, name="x", is_observed=True)
labels = EmpiricalVariable(output_labels, indices=minibatch_indices, name="labels", is_observed=True)

# Architecture parameters
weights = NormalVariable(np.zeros((number_output_classes, number_pixels)),
                         10*np.ones((number_output_classes, number_pixels)), "weights")

# Forward pass
final_activations = BF.matmul(weights, x)
k = CategoricalVariable(softmax_p=final_activations, name="k")

# Probabilistic model
model = ProbabilisticModel([k])

# Observations
k.observe(labels)

# Variational model
number_particles = 2
initial_location_1 = np.random.normal(0., 1., (number_output_classes, number_pixels))
initial_location_2 = np.random.normal(0., 1., (number_output_classes, number_pixels))
Beispiel #13
0
import matplotlib.pyplot as plt
import numpy as np

from brancher.variables import DeterministicVariable, RandomVariable, ProbabilisticModel
from brancher.standard_variables import NormalVariable, LogNormalVariable, BetaVariable
from brancher import inference
import brancher.functions as BF

# Probabilistic model #
T = 100

nu = LogNormalVariable(0.3, 1., 'nu')
x0 = NormalVariable(0., 1., 'x0')
b = BetaVariable(0.5, 1.5, 'b')

x = [x0]
names = ["x0"]
for t in range(1, T):
    names.append("x{}".format(t))
    x.append(NormalVariable(b * x[t - 1], nu, names[t]))
AR_model = ProbabilisticModel(x)

# Generate data #
data = AR_model._get_sample(number_samples=1)
time_series = [float(data[xt].cpu().detach().numpy()) for xt in x]
true_b = data[b].cpu().detach().numpy()
true_nu = data[nu].cpu().detach().numpy()
print("The true coefficient is: {}".format(float(true_b)))

# Observe data #
[xt.observe(data[xt][:, 0, :]) for xt in x]
import brancher.config as cfg
cfg.set_device("cpu")

import matplotlib.pyplot as plt
import numpy as np

from brancher.variables import ProbabilisticModel
from brancher.standard_variables import NormalVariable, LogNormalVariable
from brancher import inference

# Normal model
nu = LogNormalVariable(0., 1., "nu")
mu = NormalVariable(0., 10., "mu")
x = NormalVariable(mu, nu, "x")
model = ProbabilisticModel(
    [x])  # to fix plot_posterior (flatten automatically?)

# # Generate data
nu_real = 1.
mu_real = -2.
data = model.get_sample(number_samples=20,
                        input_values={
                            mu: mu_real,
                            nu: nu_real
                        })

# Observe data
x.observe(data)

# Variational model
Qnu = LogNormalVariable(0., 1., "nu", learnable=True)
Beispiel #15
0
                             latent_size2=latent_size2))
    decoder2 = BF.BrancherFunction(
        DecoderArchitecture2(latent_size2=latent_size2,
                             latent_size3=latent_size3))
    decoder3 = BF.BrancherFunction(
        DecoderArchitecture3(latent_size3=latent_size3, image_size=image_size))
    decoderLabel = BF.BrancherFunction(
        DecoderArchitectureLabel(latent_size2=latent_size2,
                                 num_classes=num_classes))

    # # Generative model
    z1sd = 1.5  # 1
    z2sd = 0.25  # 0.25
    z3sd = 0.15
    z1 = NormalVariable(np.zeros((latent_size1, )),
                        z1sd * np.ones((latent_size1, )),
                        name="z1")
    decoder_output1 = DeterministicVariable(decoder1(z1),
                                            name="decoder_output1")
    z2 = NormalVariable(BF.relu(decoder_output1["mean"]),
                        z2sd * np.ones((latent_size2, )),
                        name="z2")
    label_logits = DeterministicVariable(decoderLabel(z2), "label_logits")
    labels = CategoricalVariable(logits=label_logits, name="labels")
    decoder_output2 = DeterministicVariable(decoder2(z2),
                                            name="decoder_output2")
    z3 = NormalVariable(BF.relu(decoder_output2["mean"]),
                        z3sd * np.ones((latent_size3, )),
                        name="z3")
    decoder_output3 = DeterministicVariable(decoder3(z3),
                                            name="decoder_output3")
Beispiel #16
0
import matplotlib.pyplot as plt

from brancher.variables import ProbabilisticModel
from brancher.standard_variables import NormalVariable, LogNormalVariable

from brancher.transformations import truncate_model
from brancher.visualizations import plot_density

# Normal model
mu = NormalVariable(0., 1., "mu")
x = NormalVariable(mu, 0.1, "x")
model = ProbabilisticModel([x])

# decision rule
model_statistics = lambda dic: dic[x].data
truncation_rule = lambda a: ((a > 0.5) & (a < 0.6)) | ((a > -0.6) & (a < -0.5))

# Truncated model
truncated_model = truncate_model(model, truncation_rule, model_statistics)

plot_density(truncated_model, variables=["mu", "x"], number_samples=10000)
plt.show()
N_ELBO_smpl = 1000


for cond, label in zip(condition_list, condition_label):
    ELBO1 = []
    ELBO2 = []
    ELBO3 = []
    ELBO4 = []
    for rep in range(N_rep):
        print("Repetition: {}".format(rep))
        # Probabilistic model #
        T = 40
        dt = 0.01
        driving_noise = 0.5
        measure_noise = 0.2
        x0 = NormalVariable(0., driving_noise, 'x0')
        y0 = NormalVariable(x0, measure_noise, 'y0')
        x1 = NormalVariable(0., driving_noise, 'x1')
        y1 = NormalVariable(x1, measure_noise, 'y1')
        b = 20
        omega = 2*np.pi*8

        x = [x0, x1]
        y = [y0, y1]
        x_names = ["x0", "x1"]
        y_names = ["y0", "y1"]
        y_range = [t for t in range(T) if cond(t)]
        for t in range(2, T):
            x_names.append("x{}".format(t))
            new_mu = (-1 - omega**2*dt**2 + b*dt)*x[t - 2] + (2 - b*dt)*x[t - 1]
            x.append(NormalVariable(new_mu, np.sqrt(dt)*driving_noise, x_names[t]))
# Probabilistic model
minibatch_size = 30
minibatch_indices = RandomIndices(dataset_size=dataset_size,
                                  batch_size=minibatch_size,
                                  name="indices",
                                  is_observed=True)
x = EmpiricalVariable(input_variable,
                      indices=minibatch_indices,
                      name="x",
                      is_observed=True)
labels = EmpiricalVariable(output_labels,
                           indices=minibatch_indices,
                           name="labels",
                           is_observed=True)
weights = NormalVariable(np.zeros((1, number_regressors)), 0.5 * np.ones(
    (1, number_regressors)), "weights")
logit_p = BF.matmul(weights, x)
k = BinomialVariable(1, logit_p=logit_p, name="k")
model = ProbabilisticModel([k])

#samples = model._get_sample(300)
#model.calculate_log_probability(samples)

# Observations
k.observe(labels)

#observed_model = inference.get_observed_model(model)
#observed_samples = observed_model._get_sample(number_samples=1, observed=True)

# Variational Model
Qweights = NormalVariable(np.zeros((1, number_regressors)),
    h_size = 120
    W1 = DeterministicVariable(np.random.normal(0., 0.2, (h_size, h_size)),
                               "W1",
                               learnable=False)
    W2 = DeterministicVariable(np.random.normal(0., 0.2, (h_size, h_size)),
                               "W2",
                               learnable=False)
    #V = DeterministicVariable(np.random.normal(0., 1., (100, h_size)), "V", learnable=False)

    f = lambda z, W: z + BF.tanh(BF.matmul(W, z))
    F = lambda z, W1, W2: f(f(z, W1), W2)

    measurement_noise = 2.  #1.5
    z = [
        NormalVariable(np.zeros((h_size, 1)),
                       np.ones((h_size, 1)),
                       "z0",
                       learnable=False)
    ]
    img = [
        DeterministicVariable(decoder(BF.reshape(z[0], (h_size, 1, 1))),
                              "img0",
                              learnable=False)
    ]
    x = [
        NormalVariable(img[0],
                       measurement_noise * np.ones(
                           (3, image_size, image_size)),
                       "x0",
                       learnable=False)
    ]
Beispiel #20
0
import matplotlib.pyplot as plt
import numpy as np
import torch

from brancher.variables import ProbabilisticModel
from brancher.standard_variables import NormalVariable, DeterministicVariable, LogNormalVariable
import brancher.functions as BF
from brancher.visualizations import plot_density
from brancher.transformations import PlanarFlow
from brancher import inference
from brancher.visualizations import plot_posterior

# Model
M = 8
y = NormalVariable(torch.zeros((M, )), 1. * torch.ones((M, )), "y")
y0 = DeterministicVariable(y[1], "y0")
d = NormalVariable(y, torch.ones((M, )), "d")
model = ProbabilisticModel([d, y, y0])

# get samples
d.observe(d.get_sample(55, input_values={y: 1. * torch.ones((M, ))}))

# Variational distribution
u1 = DeterministicVariable(torch.normal(0., 1., (M, 1)), "u1", learnable=True)
w1 = DeterministicVariable(torch.normal(0., 1., (M, 1)), "w1", learnable=True)
b1 = DeterministicVariable(torch.normal(0., 1., (1, 1)), "b1", learnable=True)
u2 = DeterministicVariable(torch.normal(0., 1., (M, 1)), "u2", learnable=True)
w2 = DeterministicVariable(torch.normal(0., 1., (M, 1)), "w2", learnable=True)
b2 = DeterministicVariable(torch.normal(0., 1., (1, 1)), "b2", learnable=True)
z = NormalVariable(torch.zeros((M, 1)),
                   torch.ones((M, 1)),
Beispiel #21
0
import matplotlib.pyplot as plt

from brancher.variables import ProbabilisticModel
from brancher.standard_variables import BernulliVariable, NormalVariable
import brancher.functions as BF
from brancher import inference
from brancher.inference import ReverseKL
from brancher.gradient_estimators import BlackBoxEstimator, Taylor1Estimator

#Model
z1 = BernulliVariable(logits=0., name="z1")
z2 = BernulliVariable(logits=0., name="z2")
y = NormalVariable(2 * z1 + z2, 1., name="y")
model = ProbabilisticModel([y])

#Generate data
data = y.get_sample(20, input_values={z1: 1, z2: 0})
data.hist(bins=20)
plt.show()

#Observe data
y.observe(data)

#Variational Model
Qz1 = BernulliVariable(logits=0., name="z1", learnable=True)
Qz2 = BernulliVariable(logits=0., name="z2", learnable=True)
variational_model = ProbabilisticModel([Qz1, Qz2])
model.set_posterior_model(variational_model)

# Joint-contrastive inference
inference.perform_inference(
import chainer
import matplotlib.pyplot as plt
import numpy as np

from brancher.variables import ProbabilisticModel
from brancher.standard_variables import ConcreteVariable, NormalVariable
from brancher import inference
import brancher.functions as BF

# Probabilistic Model
x = ConcreteVariable(tau=0.1, p=np.ones((2, 1))/2., name="x")
mu0 = -2
nu0 = 0.5
mu1 = 2
nu1 = 0.2
y = NormalVariable(x[0]*mu0 + x[1]*mu1, x[0]*nu0 + x[1]*nu1, "y")

samples = y._get_sample(1000)
plt.hist(samples[y].data._flatten(), 60)
print(y.calculate_log_probability(samples))
plt.title("Concrete mixture of Gaussians")
plt.show()
import chainer
import matplotlib.pyplot as plt
import numpy as np

from brancher.variables import DeterministicVariable, RandomVariable, ProbabilisticModel
from brancher.standard_variables import NormalVariable, LogNormalVariable, LogitNormalVariable
from brancher import inference
import brancher.functions as BF

# Probabilistic model #
T = 20
driving_noise = 1.
measure_noise = 0.5
x0 = NormalVariable(0., driving_noise, 'x0')
y0 = NormalVariable(x0, measure_noise, 'x0')
b = LogitNormalVariable(0.5, 1., 'b')

x = [x0]
y = [y0]
x_names = ["x0"]
y_names = ["y0"]
for t in range(1, T):
    x_names.append("x{}".format(t))
    y_names.append("y{}".format(t))
    x.append(NormalVariable(b * x[t - 1], driving_noise, x_names[t]))
    y.append(NormalVariable(x[t], measure_noise, y_names[t]))
AR_model = ProbabilisticModel(x + y)

# Generate data #
data = AR_model._get_sample(number_samples=1)
time_series = [float(data[yt].data) for yt in y]
Beispiel #24
0
import numpy as np
import matplotlib.pyplot as plt
import chainer

from brancher.variables import RootVariable, ProbabilisticModel
from brancher.particle_inference_tools import VoronoiSet
from brancher.standard_variables import EmpiricalVariable, NormalVariable, LogNormalVariable
from brancher import inference
from brancher.inference import WassersteinVariationalGradientDescent as WVGD
from brancher.visualizations import ensemble_histogram
from brancher.pandas_interface import reformat_sample_to_pandas

# Model
dimensionality = 1
theta = NormalVariable(loc=0., scale=2., name="theta")
x = NormalVariable(theta**2, scale=0.2, name="x")
model = ProbabilisticModel([x, theta])

# Generate data
N = 3
theta_real = 0.1
x_real = NormalVariable(theta_real**2, 0.2, "x")
data = x_real._get_sample(number_samples=N)

# Observe data
x.observe(data[x_real][:, 0, :])

# Variational model
num_particles = 2
initial_locations = [-2, 2]
Beispiel #25
0
    Lk4 = []
    MSE1 = []
    MSE2 = []
    MSE3 = []
    MSE4 = []
    for rep in range(N_rep):
        print("Repetition: {}".format(rep))
        # Probabilistic model #
        T = 30  #30
        dt = 0.02
        driving_noise = 0.5
        measure_noise = 2.  #1.
        s = 10.
        r = 28.
        b = 8 / 3.
        x0 = NormalVariable(0., driving_noise, 'x0')
        h0 = NormalVariable(0., driving_noise, 'h0')
        z0 = NormalVariable(0., driving_noise, 'z0')

        x = [x0]
        h = [h0]
        z = [z0]
        y = []
        x_names = ["x0"]
        h_names = ["h0"]
        z_names = ["z0"]
        y_names = ["y0"]
        y_range = [t for t in range(T) if cond(t)]
        if 0 in y_range:
            y0 = NormalVariable(x0, measure_noise, 'y0')
        for t in range(1, T):
Beispiel #26
0
x_max = 1.
n = 100
x_range = np.linspace(-x_max, x_max, n)
x1 = DeterministicVariable(np.sin(2 * np.pi * 2 * x_range),
                           name="x1",
                           is_observed=True)
x2 = DeterministicVariable(x_range, name="x2", is_observed=True)

# Multivariate Regression
b = Norm(0., 1., name="b")
w1 = Norm(0., 1., name="w1")
w2 = Norm(0., 1., name="w2")
w12 = Norm(0., 1., name="w12")
nu = LogNorm(0.2, 0.5, name="nu")
mean = b + w1 * x1 + w2 * x2 + w12 * x1 * x2
y = Norm(mean, nu, name="y")
model = ProbabilisticModel([y])

# Variational distributions
Qb = Norm(0., 1., name="b", learnable=True)
Qw1 = Norm(0., 1., name="w1", learnable=True)
Qw2 = Norm(0., 1., name="w2", learnable=True)
Qw12 = Norm(0., 1., name="w12", learnable=True)
Qnu = LogNorm(0.2, 0.5, name="nu", learnable=True)
variational_model = ProbabilisticModel([Qb, Qw1, Qw2, Qw12, Qnu])
model.set_posterior_model(variational_model)

# Generate data
ground_samples = model._get_sample(1)

# Observe data
        minibatch_indices = RandomIndices(dataset_size=dataset_size,
                                          batch_size=minibatch_size,
                                          name="indices",
                                          is_observed=True)
        x = EmpiricalVariable(input_variable,
                              indices=minibatch_indices,
                              name="x",
                              is_observed=True)
        labels = EmpiricalVariable(output_labels,
                                   indices=minibatch_indices,
                                   name="labels",
                                   is_observed=True)

        # Architecture parameters
        weights1 = NormalVariable(
            np.zeros((number_hidden_nodes, number_regressors)), 10 * np.ones(
                (number_hidden_nodes, number_regressors)), "weights1")
        weights2 = NormalVariable(
            np.zeros((number_output_classes, number_hidden_nodes)),
            10 * np.ones(
                (number_output_classes, number_hidden_nodes)), "weights2")

        # Forward pass
        final_activations = BF.matmul(weights2, BF.tanh(BF.matmul(weights1,
                                                                  x)))
        k = CategoricalVariable(softmax_p=final_activations, name="k")

        # Probabilistic model
        model = ProbabilisticModel([k])

        # Observations
import numpy as np

from brancher import functions as BF

from brancher.standard_variables import NormalVariable as Normal
from brancher.standard_variables import DeterministicVariable

in_channels = 4
out_channels = 5
a = Normal(loc=np.zeros((in_channels, 28, 28)),
           scale=1.,
           name="a")
W = Normal(loc=np.zeros((out_channels, in_channels, 3, 3)),
           scale=np.ones((out_channels, in_channels, 3, 3)),
           name="W")
y = Normal(BF.conv2d(a, W), 0.1, name="y")

samples = y.get_sample(9)["y"]
print(samples[0].shape)
print(len(samples))
Beispiel #29
0
#import chainer.functions as F
import chainer
import chainer.links as L
import chainer.functions as F

#from brancher.links import brancher_decorator
from brancher.variables import DeterministicVariable, RandomVariable, ProbabilisticModel
from brancher.standard_variables import NormalVariable
from brancher.functions import BrancherFunction
import brancher.functions as BF
#import brancher.links as BL

a = DeterministicVariable(1.5, 'a')
b = DeterministicVariable(0.3, 'b')
c = DeterministicVariable(0.3, 'c')
d = NormalVariable((a * b + c), c + a**2, 'd')
e1 = BF.concat((a, b), 2)
e2 = BF.concat((a, c), 2)
f = NormalVariable(e1**2, e2**1, 'f')

f._get_sample(10)

a_val = chainer.Variable(0.25 * np.pi * np.ones((1, 1), dtype="float32"))
b_val = chainer.Variable(0.25 * np.pi * np.ones((1, 1), dtype="float32"))
c_val = chainer.Variable(2 * np.ones((1, 1), dtype="float32"))

#z = BF.sin(a + b)/c

#print(z.fn({a: a_val, b: b_val, c: c_val}))

BLink = BrancherFunction(L.Linear(1, 10))
import matplotlib.pyplot as plt
import numpy as np

from brancher.variables import DeterministicVariable, RandomVariable, ProbabilisticModel
from brancher.standard_variables import NormalVariable, LogNormalVariable, BetaVariable
from brancher import inference
import brancher.functions as BF

# Probabilistic model
T = 2.
N = 100
dt = T/float(N)
time_range = np.linspace(0., T, N)
a = BetaVariable(1., 1., name="a")
b = BetaVariable(1., 1., name="b")
c = NormalVariable(0., 0.1, name="c")
d = NormalVariable(0., 0.1, name="d")
xi = LogNormalVariable(0.1, 0.1, name="xi")
chi = LogNormalVariable(0.1, 0.1, name="chi")
x_series = [NormalVariable(0., 1., name="x_0")]
y_series = [NormalVariable(0., 1., name="y_0")]
for n, t in enumerate(time_range):
    x_new_mean = (1-dt*a)*x_series[-1] + dt*c*y_series[-1]
    y_new_mean = (1-dt*b)*y_series[-1] + dt*d*x_series[-1]
    x_series += [NormalVariable(x_new_mean, np.sqrt(dt)*xi, name="x_{}".format(n+1))]
    y_series += [NormalVariable(x_new_mean, np.sqrt(dt)*chi, name="y_{}".format(n+1))]
dynamic_causal_model = ProbabilisticModel([x_series[-1], y_series[-1]])

# Run dynamics
sample = dynamic_causal_model.get_sample(number_samples=3)