예제 #1
0
    def construct_biases(self, learnable, ranges, kwargs):
        """
        Method. Constructs a bias variable for each variable in the input. Bias variables are deterministic variables
        that transform the input variables.

        Args:
            learnable: Bool. Set true if the biases should be learnable.

            ranges: Dictionary(str: brancher.GeometricRange). Dictionary of variable names and the ranges that apply on
            those variables.

            kwargs: Named variables list that define the input variables of this variable. For each variable in here a
            bias variable will be created.

        Returns:
            None.
        """
        for parameter_name, value in kwargs.items():
            if isinstance(value, (Variable, PartialLink, np.ndarray, numbers.Number)):
                if isinstance(value, np.ndarray):
                    dim = value.shape[0]
                elif isinstance(value, numbers.Number):
                    dim = 1
                else:
                    dim = []
                bias = RootVariable(0.,
                                    self.name + "_" + parameter_name + "_" + "bias",
                                    learnable, is_observed=self._observed)
                mixing = RootVariable(5.,
                                      self.name + "_" + parameter_name + "_" + "mixing",
                                      learnable, is_observed=self._observed)
                kwargs.update({parameter_name: ranges[parameter_name].forward_transform(BF.sigmoid(mixing)*value + (1 - BF.sigmoid(mixing))*bias, dim)})
예제 #2
0
 def construct_biases(self, learnable, ranges, kwargs):
     for parameter_name, value in kwargs.items():
         if isinstance(value, (Variable, PartialLink)):
             if isinstance(value, np.ndarray):
                 dim = value.shape[0]
             elif isinstance(value, numbers.Number):
                 dim = 1
             else:
                 dim = []
             bias = RootVariable(0.,
                                 self.name + "_" + parameter_name + "_" +
                                 "bias",
                                 learnable,
                                 is_observed=self._observed)
             mixing = RootVariable(5.,
                                   self.name + "_" + parameter_name + "_" +
                                   "mixing",
                                   learnable,
                                   is_observed=self._observed)
             kwargs.update({
                 parameter_name:
                 ranges[parameter_name].forward_transform(
                     BF.sigmoid(mixing) * value +
                     (1 - BF.sigmoid(mixing)) * bias, dim)
             })
예제 #3
0
    def construct_deterministic_parents(self, learnable, ranges, kwargs):
        """
        Method. Constructs the deterministic variables for input variables that are numberic or numpy arrays. If a
        variable is an instance of brancher.Variable or brancher.PartialLink these should already have deterministic
        variables initialized.

        Args:
            learnable: Bool. Set true if the root variables should be learnable.

            ranges: Dictionary(str: brancher.GeometricRange). Dictionary of variable names and the ranges that apply on
            those variables.

            kwargs: Named variables list that define the input variables of this variable.

        Returns:
            None.
        """
        for parameter_name, value in kwargs.items():
            if not isinstance(value, (Variable, PartialLink)):
                if isinstance(value, np.ndarray):
                    dim = value.shape[0]
                elif isinstance(value, numbers.Number):
                    dim = 1
                else:
                    dim = []
                deterministic_parent = RootVariable(
                    ranges[parameter_name].inverse_transform(value, dim),
                    self.name + "_" + parameter_name,
                    learnable,
                    is_observed=self._observed)
                kwargs.update({
                    parameter_name:
                    ranges[parameter_name].forward_transform(
                        deterministic_parent, dim)
                })
예제 #4
0
 def construct_deterministic_parents(self, learnable, ranges, kwargs):
     for parameter_name, value in kwargs.items():
         if not isinstance(value, (Variable, PartialLink)):
             if isinstance(value, np.ndarray):
                 dim = value.shape[0]
             elif isinstance(value, numbers.Number):
                 dim = 1
             else:
                 dim = []
             deterministic_parent = RootVariable(ranges[parameter_name].inverse_transform(value, dim),
                                                 self.name + "_" + parameter_name, learnable, is_observed=self._observed)
             kwargs.update({parameter_name: ranges[parameter_name].forward_transform(deterministic_parent, dim)})
예제 #5
0
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd

from brancher.variables import ProbabilisticModel

from brancher.stochastic_processes import GaussianProcess as GP
from brancher.stochastic_processes import SquaredExponentialCovariance as SquaredExponential
from brancher.stochastic_processes import ConstantMean
from brancher.variables import RootVariable
from brancher.standard_variables import NormalVariable as Normal
from brancher import inference

num_datapoints = 20
x_range = np.linspace(-2, 2, num_datapoints)
x = RootVariable(x_range, name="x")

# Model
mu = ConstantMean(0.)
cov = SquaredExponential(scale=0.2, jitter=10**-4)
f = GP(mu, cov, name="f")
y = Normal(f(x), 0.2, name="y")
model = ProbabilisticModel([y])

# Observe data
noise_level = 0.2
data = np.sin(2 * np.pi * 0.4 * x_range) + noise_level * np.random.normal(
    0., 1., (1, num_datapoints))
y.observe(data)

#Variational Model
final_activations = BF.matmul(weights, x)
k = CategoricalVariable(softmax_p=final_activations, name="k")

# Probabilistic model
model = ProbabilisticModel([k])

# Observations
k.observe(labels)

# Variational model
number_particles = 2
initial_location_1 = np.random.normal(0., 1.,
                                      (number_output_classes, number_pixels))
initial_location_2 = np.random.normal(0., 1.,
                                      (number_output_classes, number_pixels))
particle_1 = RootVariable(initial_location_1, name="weights", learnable=True)
particle_2 = RootVariable(initial_location_2, name="weights", learnable=True)
particle_locations = [particle_1, particle_2]
particles = [ProbabilisticModel([l]) for l in particle_locations]

# Importance sampling distributions
voranoi_set = VoronoiSet(
    particle_locations
)  #TODO: Bug if you use variables instead of probabilistic models
variational_samplers = [
    ProbabilisticModel([
        TruncatedNormalVariable(mu=initial_location_1,
                                sigma=0.1,
                                truncation_rule=lambda a: voranoi_set(a, 0),
                                name="weights",
                                learnable=True)
예제 #7
0
[yt.observe(timeseries[t]) for yt, t in zip(y, y_range)]

# get time series
#plt.plot([data[xt][:, 0, :] for xt in x])
#plt.scatter(y_range, time_series, c="k")
#plt.show()

# Structured variational distribution #
Qomega = NormalVariable(2 * np.pi * 8, 5., 'omega', learnable=True)
Qdrift = NormalVariable(0., 1., 'drift', learnable=True)
Qx = [
    NormalVariable(0., 1., 'x0', learnable=True),
    NormalVariable(0., 1., 'x1', learnable=True)
]
Qx_mean = [
    RootVariable(0., 'x0_mean', learnable=True),
    RootVariable(0., 'x1_mean', learnable=True)
]
Qlambda = [
    RootVariable(-1, 'x0_lambda', learnable=True),
    RootVariable(-1, 'x1_lambda', learnable=True)
]
Qdriving_noise = [
    RootVariable(driving_noise, 'x0_driving_noise', learnable=True),
    RootVariable(driving_noise, 'x1_driving_noise', learnable=True)
]

for t in range(2, T):
    if t in y_range:
        l = -1.
    else:
예제 #8
0
# Probabilistic model
model = ProbabilisticModel([k])

# Observations
k.observe(labels)

# Variational model
num_particles = 2  #10
initial_locations = [
    np.random.normal(0., 1., (number_output_classes, number_regressors))
    for _ in range(num_particles)
]
particles = [
    ProbabilisticModel(
        [RootVariable(location, name="weights", learnable=True)])
    for location in initial_locations
]

# Importance sampling distributions
variational_samplers = [
    ProbabilisticModel([
        NormalVariable(loc=location, scale=0.1, name="weights", learnable=True)
    ]) for location in initial_locations
]

# Inference
inference_method = WVGD(variational_samplers=variational_samplers,
                        particles=particles,
                        biased=False)
inference.perform_inference(model,
예제 #9
0
                            lr=lr)
loss_list2 = model.diagnostics["loss curve"]

N_ELBO = 1000
ELBO2 = model.estimate_log_model_evidence(1000)

# Structured NN distribution #
hidden_size = 5
latent_size = 5
out_size = N_groups + N_people
Qepsilon = Normal(np.zeros((latent_size, 1)),
                  np.ones((latent_size, )),
                  'epsilon',
                  learnable=True)
W1 = RootVariable(np.random.normal(0, 0.1, (hidden_size, latent_size)),
                  "W1",
                  learnable=True)
W2 = RootVariable(np.random.normal(0, 0.1, (out_size, hidden_size)),
                  "W2",
                  learnable=True)
pre_x = BF.matmul(W2, BF.sigmoid(BF.matmul(W1, Qepsilon)))

Qgroup_means = [
    Normal(pre_x[n], 4., "group_mean_{}".format(n), learnable=True)
    for n in range(N_groups)
]
Qpeople_means = [
    Normal(pre_x[N_groups + m], 0.1, "person_{}".format(m), learnable=True)
    for m, assignment_list in enumerate(assignment_matrix)
]
예제 #10
0
                           is_observed=True)

    Qlabels = EmpiricalVariable(output_labels,
                                indices=minibatch_indices,
                                name="labels",
                                is_observed=True)

    encoder_output1 = DeterministicVariable(encoder1(Qx),
                                            name="encoder_output1")
    encoder_output2 = DeterministicVariable(encoder2(encoder_output1["mean"]),
                                            name="encoder_output2")
    encoder_output3 = DeterministicVariable(encoder3(encoder_output2["mean"]),
                                            name="encoder_output3")

    Qlambda11 = RootVariable(l1 * np.ones((latent_size1, )),
                             'lambda11',
                             learnable=True)
    Qlambda12 = RootVariable(l1 * np.ones((latent_size1, )),
                             'lambda12',
                             learnable=True)
    Qz1 = NormalVariable((1 - BF.sigmoid(Qlambda11)) * encoder_output3["mean"],
                         BF.sigmoid(Qlambda12) * z2sd +
                         (1 - BF.sigmoid(Qlambda12)) * encoder_output3["sd"],
                         name="z1")

    Qdecoder_output1 = DeterministicVariable(decoder1(Qz1),
                                             name="Qdecoder_output1")

    Qlambda21 = RootVariable(l0 * np.ones((latent_size2, )),
                             'lambda21',
                             learnable=True)
예제 #11
0
    for _ in range(num_particles)
]
wk2_locations = [
    np.random.normal(0., 1., (out_channels2, out_channels1, 3, 3))
    for _ in range(num_particles)
]
wl_locations = [
    np.random.normal(0., 1., (num_classes, out_channels2))
    for _ in range(num_particles)
]
b_locations = [
    np.random.normal(0., 1., (num_classes, 1)) for _ in range(num_particles)
]
particles = [
    ProbabilisticModel([
        RootVariable(wk1, name="Wk1", learnable=True),
        RootVariable(wk2, name="Wk2", learnable=True),
        RootVariable(wl, name="Wl", learnable=True),
        RootVariable(b, name="b", learnable=True)
    ]) for wk1, wk2, wl, b in zip(wk1_locations, wk2_locations, wl_locations,
                                  b_locations)
]

# Importance sampling distributions
variational_samplers = [
    ProbabilisticModel([
        NormalVariable(wk1, 1 + 0 * wk1, name="Wk1", learnable=True),
        NormalVariable(wk2, 1 + 0 * wk2, name="Wk2", learnable=True),
        NormalVariable(wl, 1 + 0 * wl, name="Wl", learnable=True),
        NormalVariable(b, 1 + 0 * b, name="b", learnable=True)
    ]) for wk1, wk2, wl, b in zip(wk1_locations, wk2_locations, wl_locations,
예제 #12
0
from brancher.variables import RootVariable, RandomVariable, ProbabilisticModel
from brancher.standard_variables import NormalVariable, LogNormalVariable
from brancher import functions as BF
from brancher import inference
from brancher.visualizations import plot_posterior

# Parameters
S = 6.
N = 40
x_range = np.linspace(-S / 2., S / 2., N)
y_range = np.linspace(-S / 2., S / 2., N)
x_mesh, y_mesh = np.meshgrid(x_range, y_range)
#x_mesh, y_mesh = np.expand_dims(x_mesh, 0), np.expand_dims(y_mesh, 0)

# Experimental model
x = RootVariable(x_mesh, name="x")  #TODO: it should create this automatically
y = RootVariable(y_mesh, name="y")
w1 = NormalVariable(0., 1., name="w1")
w2 = NormalVariable(0., 1., name="w2")
b = NormalVariable(0., 1., name="b")
experimental_input = NormalVariable(BF.exp(BF.sin(w1 * x + w2 * y + b)),
                                    0.1,
                                    name="input",
                                    is_observed=True)

# Probabilistic Model
mu_x = NormalVariable(0., 1., name="mu_x")
mu_y = NormalVariable(0., 1., name="mu_y")
v = LogNormalVariable(0., 0.1, name="v")
nu = LogNormalVariable(-1, 0.01, name="nu")
receptive_field = BF.exp((-(x - mu_x)**2 - (y - mu_y)**2) /
예제 #13
0
plt.show()
ground_truth = short_y
#true_b = data[omega].data
#print("The true coefficient is: {}".format(float(true_b)))

# Observe data #
[yt.observe(noisy_y[t]) for t, yt in zip(y_range, y)]

# Structured variational distribution #
Qomega = NormalVariable(2 * np.pi * 7.5, 1., "omega", learnable=True)
Qx = [
    NormalVariable(0., 0.1, 'x0', learnable=True),
    NormalVariable(0., 0.1, 'x1', learnable=True)
]
Qx_mean = [
    RootVariable(0., 'x0_mean', learnable=True),
    RootVariable(0., 'x1_mean', learnable=True)
]
Qlambda = [
    RootVariable(0., 'x0_lambda', learnable=True),
    RootVariable(0., 'x1_lambda', learnable=True)
]

for t in range(2, T):
    if t in y_range:
        l = 0.
    else:
        l = 0.
    Qx_mean.append(RootVariable(0, x_names[t] + "_mean", learnable=True))
    Qlambda.append(RootVariable(l, x_names[t] + "_lambda", learnable=True))
    new_mu = (-1 + b * dt) * Qx[t - 2] - Qomega**2 * dt**2 * (BF.sin(
        data = AR_model._get_sample(number_samples=1)
        time_series = [float(data[yt].data) for yt in y]
        ground_truth = [float(data[xt].data) for xt in x]
        #true_b = data[omega].data
        #print("The true coefficient is: {}".format(float(true_b)))

        # Observe data #
        [yt.observe(data[yt][:, 0, :]) for yt in y]

        # Structured variational distribution #
        Qx = [
            NormalVariable(0., 1., 'x0', learnable=True),
            NormalVariable(0., 1., 'x1', learnable=True)
        ]
        Qx_mean = [
            RootVariable(0., 'x0_mean', learnable=True),
            RootVariable(0., 'x1_mean', learnable=True)
        ]
        Qlambda = [
            RootVariable(0., 'x0_lambda', learnable=True),
            RootVariable(0., 'x1_lambda', learnable=True)
        ]

        for t in range(2, T):
            if t in y_range:
                l = 0.
            else:
                l = 0.
            Qx_mean.append(
                RootVariable(0, x_names[t] + "_mean", learnable=True))
            Qlambda.append(
time_series = [float(data[yt].data) for yt in y]
ground_truth = [float(data[xt].data) for xt in x]
#true_b = data[b].data
#print("The true coefficient is: {}".format(float(true_b)))

# Observe data #
[yt.observe(data[yt][:, 0, :]) for yt in y]

# get time series
#plt.plot([data[xt][:, 0, :] for xt in x])
#plt.scatter(y_range, time_series, c="k")
#plt.show()

# Structured variational distribution #
Qx = [NormalVariable(0., 1., 'x0', learnable=True)]
Qx_mean = [RootVariable(0., 'x0_mean', learnable=True)]
Qlambda = [RootVariable(-1., 'x0_lambda', learnable=True)]

for t in range(1, T):
    if t in y_range:
        l = 0.
    else:
        l = 1.
    Qx_mean.append(RootVariable(0, x_names[t] + "_mean", learnable=True))
    Qlambda.append(RootVariable(l, x_names[t] + "_lambda", learnable=True))
    Qx.append(
        NormalVariable(BF.sigmoid(Qlambda[t]) * Qx[t - 1] +
                       (1 - BF.sigmoid(Qlambda[t])) * Qx_mean[t],
                       np.sqrt(dt) * driving_noise,
                       x_names[t],
                       learnable=True))
예제 #16
0
# Generate data
N = 3
theta_real = 0.1
x_real = NormalVariable(theta_real**2, 0.2, "x")
data = x_real._get_sample(number_samples=N)

# Observe data
x.observe(data[x_real][:, 0, :])

# Variational model
num_particles = 2
initial_locations = [-2, 2]

#initial_locations = [0, 0.1]
particles = [
    ProbabilisticModel([RootVariable(p, name="theta", learnable=True)])
    for p in initial_locations
]

# Importance sampling distributions
variational_samplers = [
    ProbabilisticModel([
        NormalVariable(loc=location, scale=0.2, name="theta", learnable=True)
    ]) for location in initial_locations
]

# Inference
inference_method = WVGD(variational_samplers=variational_samplers,
                        particles=particles,
                        biased=False,
                        number_post_samples=80000)
from brancher import inference
import brancher.functions as BF

# Data
number_regressors = 2
number_samples = 50
x1_input_variable = np.random.normal(1.5, 1.5, (int(number_samples/2), number_regressors, 1))
x1_labels = 0*np.ones((int(number_samples/2), 1))
x2_input_variable = np.random.normal(-1.5, 1.5, (int(number_samples/2), number_regressors, 1))
x2_labels = 1*np.ones((int(number_samples/2),1))
input_variable = np.concatenate((x1_input_variable, x2_input_variable), axis=0)
labels = np.concatenate((x1_labels, x2_labels), axis=0)

# Probabilistic model
weights = NormalVariable(np.zeros((1, number_regressors)), 0.5*np.ones((1, number_regressors)), "weights")
x = RootVariable(input_variable, "x", is_observed=True)
logit_p = BF.matmul(weights, x)
k = BinomialVariable(1, logits=logit_p, name="k")
model = ProbabilisticModel([k])

samples = model._get_sample(300)

# Observations
k.observe(labels)

# Variational Model
Qweights = MultivariateNormalVariable(loc=np.zeros((1, number_regressors)),
                                      covariance_matrix=np.identity(number_regressors),
                                      name="weights", learnable=True)
variational_model = ProbabilisticModel([Qweights])
model.set_posterior_model(variational_model)
    y.append(NormalVariable(x[t], measure_noise, y_names[t]))
AR_model = ProbabilisticModel(x + y)

# Generate data #
data = AR_model._get_sample(number_samples=1)
time_series = [float(data[yt].data) for yt in y]
ground_truth = [float(data[xt].data) for xt in x]
true_b = data[b].data
print("The true coefficient is: {}".format(float(true_b)))

# Observe data #
[yt.observe(data[yt][:, 0, :]) for yt in y]

# Autoregressive variational distribution #
Qb = BetaVariable(0.5, 0.5, "b", learnable=True)
logit_b_post = RootVariable(0., 'logit_b_post', learnable=True)
Qx = [NormalVariable(0., 1., 'x0', learnable=True)]
Qx_mean = [RootVariable(0., 'x0_mean', learnable=True)]
for t in range(1, T):
    Qx_mean.append(RootVariable(0., x_names[t] + "_mean", learnable=True))
    Qx.append(
        NormalVariable(logit_b_post * Qx[t - 1] + Qx_mean[t],
                       1.,
                       x_names[t],
                       learnable=True))
variational_posterior = ProbabilisticModel([Qb] + Qx)
AR_model.set_posterior_model(variational_posterior)

# Inference #
inference.perform_inference(AR_model,
                            number_iterations=200,
# Forward pass
final_activations = BF.matmul(weights, x)
k = CategoricalVariable(logits=final_activations, name="k")

# Probabilistic model
model = ProbabilisticModel([k])

# Observations
k.observe(labels)

# Variational model
initial_weights = np.random.normal(0., 1.,
                                   (number_output_classes, number_regressors))
model.set_posterior_model(
    ProbabilisticModel(
        [RootVariable(initial_weights, name="weights", learnable=True)]))

# Inference
inference.perform_inference(model,
                            inference_method=MAP(),
                            number_iterations=3000,
                            number_samples=100,
                            optimizer="SGD",
                            lr=0.0025)
loss_list = model.diagnostics["loss curve"]
plt.show()

# Test accuracy
test_size = len(ind[dataset_size:])
num_images = test_size * 3
test_indices = RandomIndices(dataset_size=test_size,
        # Generate data #
        data = AR_model._get_sample(number_samples=1)
        time_series = [float(data[yt].data) for yt in y]
        ground_truth = [float(data[xt].data) for xt in x]
        #true_b = data[omega].data
        #print("The true coefficient is: {}".format(float(true_b)))

        # Observe data #
        [yt.observe(data[yt][:, 0, :]) for yt in y]


        # Structured variational distribution #
        Qx = [NormalVariable(0., 1., 'x0', learnable=True),
              NormalVariable(0., 1., 'x1', learnable=True)]
        Qx_mean = [RootVariable(0., 'x0_mean', learnable=True),
                   RootVariable(0., 'x1_mean', learnable=True)]
        Qlambda = [RootVariable(-0.5, 'x0_lambda', learnable=True),
                   RootVariable(-0.5, 'x1_lambda', learnable=True)]


        for t in range(2, T):
            if t in y_range:
                l = 1.
            else:
                l = 1.
            Qx_mean.append(RootVariable(0, x_names[t] + "_mean", learnable=True))
            Qlambda.append(RootVariable(l, x_names[t] + "_lambda", learnable=True))
            new_mu = (-1 - omega ** 2 * dt ** 2 + b * dt) * Qx[t - 2] + (2 - b * dt) * Qx[t - 1]
            Qx.append(NormalVariable(BF.sigmoid(Qlambda[t])*new_mu + (1 - BF.sigmoid(Qlambda[t]))*Qx_mean[t],
                                     np.sqrt(dt) * driving_noise, x_names[t], learnable=True))
        #num_particles = 2 #10
        wk_locations = [
            np.random.normal(0., 0.1, (out_channels, in_channels, 2, 2))
            for _ in range(num_particles)
        ]
        wl_locations = [
            np.random.normal(0., 0.1, (num_classes, out_channels))
            for _ in range(num_particles)
        ]
        b_locations = [
            np.random.normal(0., 0.1, (num_classes, 1))
            for _ in range(num_particles)
        ]
        particles = [
            ProbabilisticModel([
                RootVariable(wk, name="Wk", learnable=True),
                RootVariable(wl, name="Wl", learnable=True),
                RootVariable(b, name="b", learnable=True)
            ]) for wk, wl, b in zip(wk_locations, wl_locations, b_locations)
        ]

        # Importance sampling distributions
        variational_samplers = [
            ProbabilisticModel([
                NormalVariable(wk, 0.1 + 0 * wk, name="Wk", learnable=True),
                NormalVariable(wl, 0.1 + 0 * wl, name="Wl", learnable=True),
                NormalVariable(b, 0.1 + 0 * b, name="b", learnable=True)
            ]) for wk, wl, b in zip(wk_locations, wl_locations, b_locations)
        ]

        # Inference
예제 #22
0
                y_names.append(y_name)
                y.append(NormalVariable(x[t], measure_noise, y_name))
        AR_model = ProbabilisticModel(x + y + z + h)

        # Generate data #
        data = AR_model._get_sample(number_samples=1)
        time_series = [float(data[yt].data) for yt in y]
        ground_truth = [float(data[xt].data) for xt in x]

        # Observe data #
        [yt.observe(data[yt][:, 0, :]) for yt in y]

        # Structured variational distribution #
        mx0 = DeterministicVariable(value=0., name="mx0", learnable=True)
        Qx = [NormalVariable(mx0, 5 * driving_noise, 'x0', learnable=True)]
        Qx_mean = [RootVariable(0., 'x0_mean', learnable=True)]
        Qxlambda = [RootVariable(-1., 'x0_lambda', learnable=True)]

        mh0 = DeterministicVariable(value=0., name="mh0", learnable=True)
        Qh = [NormalVariable(mh0, 5 * driving_noise, 'h0', learnable=True)]
        Qh_mean = [RootVariable(0., 'h0_mean', learnable=True)]
        Qhlambda = [RootVariable(-1., 'h0_lambda', learnable=True)]

        mz0 = DeterministicVariable(value=0., name="mz0", learnable=True)
        Qz = [NormalVariable(mz0, 5 * driving_noise, 'z0', learnable=True)]
        Qz_mean = [RootVariable(0., 'z0_mean', learnable=True)]
        Qzlambda = [RootVariable(-1., 'z0_lambda', learnable=True)]

        for t in range(1, T):
            Qx_mean.append(
                RootVariable(0, x_names[t] + "_mean", learnable=True))
예제 #23
0
# Forward pass
final_activations = BF.matmul(weights, x)
k = CategoricalVariable(softmax_p=final_activations, name="k")

# Probabilistic model
model = ProbabilisticModel([k])

# Observations
k.observe(labels)

# Variational model
num_particles = 5 #10
initial_locations = [np.random.normal(0., 1., (number_output_classes, number_regressors))
                     for _ in range(num_particles)]
particles = [ProbabilisticModel([RootVariable(location, name="weights", learnable=True)])
             for location in initial_locations]
initial_particles = copy.deepcopy(particles)

# Inference
inference_method = SVGD()
inference.perform_inference(model,
                            inference_method=inference_method,
                            number_iterations=3000,
                            number_samples=100,
                            optimizer="SGD",
                            lr=0.0025,
                            posterior_model=particles)
loss_list = model.diagnostics["loss curve"]

# Local variational models
        k.observe(labels)

        # Variational model
        num_particles = N
        initial_locations1 = [
            np.random.normal(0., 1., (number_hidden_nodes, number_regressors))
            for _ in range(num_particles)
        ]
        initial_locations2 = [
            np.random.normal(0., 1.,
                             (number_output_classes, number_hidden_nodes))
            for _ in range(num_particles)
        ]
        particles = [
            ProbabilisticModel([
                RootVariable(loc1, name="weights1", learnable=True),
                RootVariable(loc2, name="weights2", learnable=True)
            ]) for loc1, loc2 in zip(initial_locations1, initial_locations2)
        ]

        # Importance sampling distributions
        variational_samplers = [
            ProbabilisticModel([
                NormalVariable(loc=loc1,
                               scale=0.1,
                               name="weights1",
                               learnable=True),
                NormalVariable(loc=loc2,
                               scale=0.1,
                               name="weights2",
                               learnable=True)
예제 #25
0
from brancher.variables import RootVariable
from brancher.standard_variables import NormalVariable
from brancher.inference import maximal_likelihood
import brancher.functions as BF
from brancher.functions import BrancherFunction as bf

# Parameters
number_regressors = 1
number_observations = 15
real_weights = np.random.normal(0, 1, (number_regressors, 1))
real_sigma = 0.6
input_variable = np.random.normal(0, 1, (number_observations, number_regressors))

# ProbabilisticModel
regression_link = bf(L.Linear(number_regressors, 1))
x = RootVariable(input_variable, "x", is_observed=True)
sigma = RootVariable(0.1, "sigma", learnable=True)
y = NormalVariable(regression_link(x), BF.exp(sigma), "y")

# Observations
data = (np.matmul(x.value.data, real_weights)
        + np.random.normal(0,real_sigma,(number_observations,1)))
y.observe(data)
print(y)

# Maximal Likelihood
loss_list = maximal_likelihood(y, number_iterations=1000)

a_range = np.linspace(-2,2,40)
model_prediction = []
for a in a_range: