Beispiel #1
0
 def construct_deterministic_parents(self, learnable, ranges, kwargs):
     for parameter_name, value in kwargs.items():
         if not isinstance(value, (Variable, PartialLink)):
             if isinstance(value, np.ndarray):
                 dim = value.shape[
                     0]  #TODO: This is probably not general enough
             elif isinstance(value, numbers.Number):
                 dim = 1
             else:
                 dim = [
                 ]  #TODO: You should consider the other possible cases individually
             deterministic_parent = DeterministicVariable(
                 ranges[parameter_name].inverse_transform(value, dim),
                 self.name + "_" + parameter_name,
                 learnable,
                 is_observed=self._observed)
             kwargs.update({
                 parameter_name:
                 ranges[parameter_name].forward_transform(
                     deterministic_parent, dim)
             })
Beispiel #2
0
#               link=lambda values: {'mu': F.sin(values[a]*values[b] + values[c]), 'sigma': values[c] + values[a]})
import numpy as np

#import chainer.functions as F
import chainer
import chainer.links as L
import chainer.functions as F

#from brancher.links import brancher_decorator
from brancher.variables import DeterministicVariable, RandomVariable, ProbabilisticModel
from brancher.standard_variables import NormalVariable
from brancher.functions import BrancherFunction
import brancher.functions as BF
#import brancher.links as BL

a = DeterministicVariable(1.5, 'a')
b = DeterministicVariable(0.3, 'b')
c = DeterministicVariable(0.3, 'c')
d = NormalVariable((a * b + c), c + a**2, 'd')
e1 = BF.concat((a, b), 2)
e2 = BF.concat((a, c), 2)
f = NormalVariable(e1**2, e2**1, 'f')

f._get_sample(10)

a_val = chainer.Variable(0.25 * np.pi * np.ones((1, 1), dtype="float32"))
b_val = chainer.Variable(0.25 * np.pi * np.ones((1, 1), dtype="float32"))
c_val = chainer.Variable(2 * np.ones((1, 1), dtype="float32"))

#z = BF.sin(a + b)/c
Beispiel #3
0
#import chainer.functions as F
import chainer
import chainer.links as L
import chainer.functions as F

import torch

#from brancher.links import brancher_decorator
from brancher.variables import DeterministicVariable, RandomVariable, ProbabilisticModel
from brancher.standard_variables import NormalVariable
from brancher.functions import BrancherFunction
import brancher.functions as BF
#import brancher.links as BL

##
a = DeterministicVariable(data=1.5, name='a', learnable=True)
b = DeterministicVariable(0.3, 'b')
c = DeterministicVariable(0.3, 'c')
d = NormalVariable((a * b + c), c + a**2, 'd')

##
print(a._get_sample(10))

##
e1 = BF.cat(
    (a, b), 2
)  #TODO: to change later, so that user does not have to specify dim explicitly (adjust cat)
e2 = BF.cat((a, c), 2)
f = NormalVariable(e1**2, e2**1, 'f')
g = NormalVariable(BF.relu(f), 1., 'g')
    y.append(NormalVariable(x[t], measure_noise, y_names[t]))
AR_model = ProbabilisticModel(x + y)

# Generate data #
data = AR_model._get_sample(number_samples=1)
time_series = [float(data[yt].data) for yt in y]
ground_truth = [float(data[xt].data) for xt in x]
true_b = data[b].data
print("The true coefficient is: {}".format(float(true_b)))

# Observe data #
[yt.observe(data[yt][:, 0, :]) for yt in y]

# Autoregressive variational distribution #
Qb = LogitNormalVariable(0.5, 0.5, "b", learnable=True)
logit_b_post = DeterministicVariable(0., 'logit_b_post', learnable=True)
Qx = [NormalVariable(0., 1., 'x0', learnable=True)]
Qx_mean = [DeterministicVariable(0., 'x0_mean', learnable=True)]
for t in range(1, T):
    Qx_mean.append(
        DeterministicVariable(0., x_names[t] + "_mean", learnable=True))
    Qx.append(
        NormalVariable(logit_b_post * Qx[t - 1] + Qx_mean[t],
                       1.,
                       x_names[t],
                       learnable=True))
variational_posterior = ProbabilisticModel([Qb] + Qx)
AR_model.set_posterior_model(variational_posterior)

# Inference #
inference.stochastic_variational_inference(
Beispiel #5
0
# Forward pass
final_activations = BF.matmul(weights, x)
k = CategoricalVariable(softmax_p=final_activations, name="k")

# Probabilistic model
model = ProbabilisticModel([k])

# Observations
k.observe(labels)

# Variational model
num_particles = 1 #10
initial_locations = [np.random.normal(0., 1., (number_output_classes, number_regressors))
                     for _ in range(num_particles)]
particles = [ProbabilisticModel([DeterministicVariable(location, name="weights", learnable=True)])
             for location in initial_locations]

# Importance sampling distributions
variational_samplers = [ProbabilisticModel([NormalVariable(loc=location, scale=0.1,
                                                           name="weights", learnable=True)])
                        for location in initial_locations]

# Inference
inference_method = WVGD(variational_samplers=variational_samplers,
                        particles=particles,
                        biased=False)
inference.perform_inference(model,
                            inference_method=inference_method,
                            number_iterations=1000,
                            number_samples=100,
Beispiel #6
0
import matplotlib.pyplot as plt
import numpy as np

from brancher.variables import DeterministicVariable, ProbabilisticModel
from brancher.standard_variables import NormalVariable as Norm
from brancher.standard_variables import LogNormalVariable as LogNorm
from brancher import inference
import brancher.functions as BF

# Regressors
x_max = 1.
n = 100
x_range = np.linspace(-x_max, x_max, n)
x1 = DeterministicVariable(np.sin(2 * np.pi * 2 * x_range),
                           name="x1",
                           is_observed=True)
x2 = DeterministicVariable(x_range, name="x2", is_observed=True)

# Multivariate Regression
b = Norm(0., 1., name="b")
w1 = Norm(0., 1., name="w1")
w2 = Norm(0., 1., name="w2")
w12 = Norm(0., 1., name="w12")
nu = LogNorm(0.2, 0.5, name="nu")
mean = b + w1 * x1 + w2 * x2 + w12 * x1 * x2
y = Norm(mean, nu, name="y")
model = ProbabilisticModel([y])

# Variational distributions
Qb = Norm(0., 1., name="b", learnable=True)
Qw1 = Norm(0., 1., name="w1", learnable=True)
from brancher.standard_variables import NormalVariable
from brancher.inference import maximal_likelihood
import brancher.functions as BF
from brancher.functions import BrancherFunction as bf

# Parameters
number_regressors = 1
number_observations = 15
real_weights = np.random.normal(0, 1, (number_regressors, 1))
real_sigma = 0.6
input_variable = np.random.normal(0, 1,
                                  (number_observations, number_regressors))

# ProbabilisticModel
regression_link = bf(L.Linear(number_regressors, 1))
x = DeterministicVariable(input_variable, "x", is_observed=True)
sigma = DeterministicVariable(0.1, "sigma", learnable=True)
y = NormalVariable(regression_link(x), BF.exp(sigma), "y")

# Observations
data = (np.matmul(x.value.data, real_weights) +
        np.random.normal(0, real_sigma, (number_observations, 1)))
y.observe(data)
print(y)

# Maximal Likelihood
loss_list = maximal_likelihood(y, number_iterations=1000)

a_range = np.linspace(-2, 2, 40)
model_prediction = []
for a in a_range:
# Forward pass
final_activations = BF.matmul(weights, x)
k = CategoricalVariable(softmax_p=final_activations, name="k")

# Probabilistic model
model = ProbabilisticModel([k])

# Observations
k.observe(labels)

# Variational model
number_particles = 2
initial_location_1 = np.random.normal(0., 1., (number_output_classes, number_pixels))
initial_location_2 = np.random.normal(0., 1., (number_output_classes, number_pixels))
particle_1 = DeterministicVariable(initial_location_1, name="weights", learnable=True)
particle_2 = DeterministicVariable(initial_location_2, name="weights", learnable=True)
particle_locations = [particle_1, particle_2]
particles = [ProbabilisticModel([l]) for l in particle_locations]

# Importance sampling distributions
voranoi_set = VoronoiSet(particle_locations) #TODO: Bug if you use variables instead of probabilistic models
variational_samplers = [ProbabilisticModel([TruncatedNormalVariable(mu=initial_location_1, sigma=0.1,
                                                truncation_rule=lambda a: voranoi_set(a, 0),
                                                name="weights", learnable=True)]),
                        ProbabilisticModel([TruncatedNormalVariable(mu=initial_location_2, sigma=0.1,
                                                truncation_rule=lambda a: voranoi_set(a, 1),
                                                name="weights", learnable=True)])]

# Inference
inference.perform_inference(model,
from brancher import inference
import brancher.functions as BF

# Data
number_regressors = 2
number_samples = 50
x1_input_variable = np.random.normal(1.5, 1.5, (int(number_samples/2), number_regressors, 1))
x1_labels = 0*np.ones((int(number_samples/2), 1))
x2_input_variable = np.random.normal(-1.5, 1.5, (int(number_samples/2), number_regressors, 1))
x2_labels = 1*np.ones((int(number_samples/2),1))
input_variable = np.concatenate((x1_input_variable, x2_input_variable), axis=0)
labels = np.concatenate((x1_labels, x2_labels), axis=0)

# Probabilistic model
weights = NormalVariable(np.zeros((1, number_regressors)), 0.5*np.ones((1, number_regressors)), "weights")
x = DeterministicVariable(input_variable, "x", is_observed=True)
logit_p = BF.matmul(weights, x)
k = BinomialVariable(1, logit_p=logit_p, name="k")
model = ProbabilisticModel([k])

samples = model._get_sample(300)

# Observations
k.observe(labels)

# Variational Model
#Qweights = NormalVariable(np.zeros((1, number_regressors)),
#                          np.ones((1, number_regressors)), "weights", learnable=True)
Qweights = MultivariateNormalVariable(loc=np.zeros((1, number_regressors)),
                                      covariance_matrix=np.identity(number_regressors),
                                      name="weights", learnable=True)
        k.observe(labels)

        # Variational model
        num_particles = N
        initial_locations1 = [
            np.random.normal(0., 1., (number_hidden_nodes, number_regressors))
            for _ in range(num_particles)
        ]
        initial_locations2 = [
            np.random.normal(0., 1.,
                             (number_output_classes, number_hidden_nodes))
            for _ in range(num_particles)
        ]
        particles = [
            ProbabilisticModel([
                DeterministicVariable(loc1, name="weights1", learnable=True),
                DeterministicVariable(loc2, name="weights2", learnable=True)
            ]) for loc1, loc2 in zip(initial_locations1, initial_locations2)
        ]

        # Importance sampling distributions
        variational_samplers = [
            ProbabilisticModel([
                NormalVariable(loc=loc1,
                               scale=0.1,
                               name="weights1",
                               learnable=True),
                NormalVariable(loc=loc2,
                               scale=0.1,
                               name="weights2",
                               learnable=True)
Beispiel #11
0
# Generate data
N = 4
theta_real = 0.5
x_real = NormalVariable(theta_real, 0.4, "x")
data = x_real._get_sample(number_samples=N)

# Observe data
x.observe(data[x_real][:, 0, :])

# Variational model
num_particles = 6
initial_locations = [np.random.normal(0., 1.) for _ in range(num_particles)]
particles = [
    ProbabilisticModel(
        [DeterministicVariable(p, name="theta", learnable=True)])
    for p in initial_locations
]

# Importance sampling distributions
variational_samplers = [
    ProbabilisticModel(
        [NormalVariable(mu=location, sigma=0.1, name="theta", learnable=True)])
    for location in initial_locations
]

# Inference
inference_method = WVGD(variational_samplers=variational_samplers,
                        particles=particles,
                        biased=False,
                        number_post_samples=20000)
Beispiel #12
0
from brancher.stochastic_processes import GaussianProcess as GP
from brancher.stochastic_processes import SquaredExponentialCovariance as SquaredExponential
from brancher.stochastic_processes import WhiteNoiseCovariance as WhiteNoise
from brancher.stochastic_processes import HarmonicCovariance as Harmonic
from brancher.stochastic_processes import ConstantMean
from brancher.variables import DeterministicVariable
from brancher.standard_variables import NormalVariable as Normal
from brancher.standard_variables import LogNormalVariable as LogNormal
from brancher.inference import WassersteinVariationalGradientDescent as WVGD
from brancher import inference
from brancher.visualizations import plot_particles, plot_density
import brancher.functions as BF

num_datapoints = 50
x_range = np.linspace(0, 2, num_datapoints)
x = DeterministicVariable(x_range, name="x")

# Model
length_scale = LogNormal(0., 0.3, name="length_scale")
noise_var = LogNormal(0., 0.3, name="noise_var")
amplitude = LogNormal(0., 0.3, name="amplitude")
mu = ConstantMean(0.5)
cov = amplitude*SquaredExponential(scale=length_scale) + WhiteNoise(magnitude=noise_var)
f = GP(mu, cov, name="f")
y = f(x)
model = ProbabilisticModel([y])

# Observe data
noise_level = 0.3
f0 = 2.5
df = 0.
# Forward pass
final_activations = BF.matmul(weights, x)
k = CategoricalVariable(softmax_p=final_activations, name="k")

# Probabilistic model
model = ProbabilisticModel([k])

# Observations
k.observe(labels)

# Variational model
initial_weights = np.random.normal(0., 1.,
                                   (number_output_classes, number_regressors))
model.set_posterior_model(
    ProbabilisticModel([
        DeterministicVariable(initial_weights, name="weights", learnable=True)
    ]))

# Inference
inference.perform_inference(model,
                            inference_method=MAP(),
                            number_iterations=3000,
                            number_samples=100,
                            optimizer="SGD",
                            lr=0.0025)
loss_list = model.diagnostics["loss curve"]
plt.show()

# Test accuracy
test_size = len(ind[dataset_size:])
num_images = test_size * 3
Beispiel #14
0
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd

from brancher.variables import ProbabilisticModel

from brancher.stochastic_processes import GaussianProcess as GP
from brancher.stochastic_processes import SquaredExponentialCovariance as SquaredExponential
from brancher.stochastic_processes import ConstantMean
from brancher.variables import DeterministicVariable
from brancher.standard_variables import NormalVariable as Normal
from brancher import inference

num_datapoints = 20
x_range = np.linspace(-2, 2, num_datapoints)
x = DeterministicVariable(x_range, name="x")

# Model
mu = ConstantMean(0.)
cov = SquaredExponential(scale=0.2, jitter=10**-4)
f = GP(mu, cov, name="f")
y = Normal(f(x), 0.2, name="y")
model = ProbabilisticModel([y])

# Observe data
noise_level = 0.2
data = np.sin(2 * np.pi * 0.4 * x_range) + noise_level * np.random.normal(
    0., 1., (1, num_datapoints))
y.observe(data)

#Variational Model
Beispiel #15
0
        # Probabilistic model
        model = ProbabilisticModel([k])

        # Observations
        k.observe(labels)

        # Variational model
        num_particles = N
        initial_locations = [
            np.random.normal(0., 1.,
                             (number_output_classes, number_regressors))
            for _ in range(num_particles)
        ]
        particles = [
            ProbabilisticModel([
                DeterministicVariable(location, name="weights", learnable=True)
            ]) for location in initial_locations
        ]

        # Importance sampling distributions
        variational_samplers = [
            ProbabilisticModel([
                NormalVariable(loc=location,
                               scale=0.1,
                               name="weights",
                               learnable=True)
            ]) for location in initial_locations
        ]

        # Inference
        inference_method = WVGD(variational_samplers=variational_samplers,