示例#1
0
 def __init__(self, scale, jitter=0.):
     self.scale = var2link(scale)
     covariance = lambda x, y: BF.exp(-BF.abs(x - y) /
                                      (scale)) + BF.delta(x, y) * jitter
     super().__init__(covariance=covariance)
示例#2
0
 def __init__(self, frequency, scale, jitter=0.):
     self.frequency = var2link(frequency)
     self.scale = var2link(scale)
     covariance = lambda x, y: BF.exp(-2 * BF.sin(np.pi * self.frequency * (
         x - y))**2 / scale**2) + BF.delta(x, y) * jitter
     super().__init__(covariance=covariance)
示例#3
0
import matplotlib.pyplot as plt
import numpy as np

from brancher.variables import DeterministicVariable, ProbabilisticModel
from brancher.standard_variables import NormalVariable, EmpiricalVariable
from brancher import inference
import brancher.functions as BF

# Data

# Neural architectures
#Encoder
#Decoder

# Generative model
latent_size = (10, )
z = NormalVariable(np.zeros(latent_size), np.ones(latent_size))
decoder_output = decoder(z)
x = NormalVariable(decoder_output["mean"],
                   BF.exp(decoder_output["log_var"]),
                   name="x")
model = ProbabilisticModel([x, z])

# Amortized variational distribution
Qx = EmpiricalVariable(dataset, name="x")
encoder_output = encoder(Qx)
Qz = NormalVariable(decoder_output["mean"],
                    BF.exp(decoder_output["log_var"]),
                    name="z")
variational_model = ProbabilisticModel([Qx, Qz])
import brancher.functions as BF
from brancher.functions import BrancherFunction as bf

# Parameters
number_regressors = 1
number_observations = 15
real_weights = np.random.normal(0, 1, (number_regressors, 1))
real_sigma = 0.6
input_variable = np.random.normal(0, 1,
                                  (number_observations, number_regressors))

# ProbabilisticModel
regression_link = bf(L.Linear(number_regressors, 1))
x = DeterministicVariable(input_variable, "x", is_observed=True)
sigma = DeterministicVariable(0.1, "sigma", learnable=True)
y = NormalVariable(regression_link(x), BF.exp(sigma), "y")

# Observations
data = (np.matmul(x.value.data, real_weights) +
        np.random.normal(0, real_sigma, (number_observations, 1)))
y.observe(data)
print(y)

# Maximal Likelihood
loss_list = maximal_likelihood(y, number_iterations=1000)

a_range = np.linspace(-2, 2, 40)
model_prediction = []
for a in a_range:
    x.value = a
    sigma.value = -20.
示例#5
0
 def __call__(self, var):
     return DeterministicVariable(BF.exp(var), log_determinant=-var, name="Exp({})".format(var.name))
# Parameters
S = 6.
N = 40
x_range = np.linspace(-S / 2., S / 2., N)
y_range = np.linspace(-S / 2., S / 2., N)
x_mesh, y_mesh = np.meshgrid(x_range, y_range)
#x_mesh, y_mesh = np.expand_dims(x_mesh, 0), np.expand_dims(y_mesh, 0)

# Experimental model
x = RootVariable(x_mesh, name="x")  #TODO: it should create this automatically
y = RootVariable(y_mesh, name="y")
w1 = NormalVariable(0., 1., name="w1")
w2 = NormalVariable(0., 1., name="w2")
b = NormalVariable(0., 1., name="b")
experimental_input = NormalVariable(BF.exp(BF.sin(w1 * x + w2 * y + b)),
                                    0.1,
                                    name="input",
                                    is_observed=True)

# Probabilistic Model
mu_x = NormalVariable(0., 1., name="mu_x")
mu_y = NormalVariable(0., 1., name="mu_y")
v = LogNormalVariable(0., 0.1, name="v")
nu = LogNormalVariable(-1, 0.01, name="nu")
receptive_field = BF.exp((-(x - mu_x)**2 - (y - mu_y)**2) /
                         (2. * v**2)) / (2. * BF.sqrt(np.pi * v**2))
mean_response = BF.sum(BF.sum(receptive_field * experimental_input,
                              dim=1,
                              keepdim=True),
                       dim=2,