Beispiel #1
0
#from brancher.links import brancher_decorator
from brancher.variables import DeterministicVariable, RandomVariable, ProbabilisticModel
from brancher.standard_variables import NormalVariable
from brancher.functions import BrancherFunction
import brancher.functions as BF
#import brancher.links as BL

a = DeterministicVariable(1.5, 'a')
b = DeterministicVariable(0.3, 'b')
c = DeterministicVariable(0.3, 'c')
d = NormalVariable((a * b + c), c + a**2, 'd')
e1 = BF.concat((a, b), 2)
e2 = BF.concat((a, c), 2)
f = NormalVariable(e1**2, e2**1, 'f')

f._get_sample(10)

a_val = chainer.Variable(0.25 * np.pi * np.ones((1, 1), dtype="float32"))
b_val = chainer.Variable(0.25 * np.pi * np.ones((1, 1), dtype="float32"))
c_val = chainer.Variable(2 * np.ones((1, 1), dtype="float32"))

#z = BF.sin(a + b)/c

#print(z.fn({a: a_val, b: b_val, c: c_val}))

BLink = BrancherFunction(L.Linear(1, 10))

print(BLink)
#import inspect
#print(inspect.getmro(BLink))
#print(issubclass(BLink, chainer.Link))
Beispiel #2
0
c = DeterministicVariable(0.3, 'c')
d = NormalVariable((a * b + c), c + a**2, 'd')

##
print(a._get_sample(10))

##
e1 = BF.cat(
    (a, b), 2
)  #TODO: to change later, so that user does not have to specify dim explicitly (adjust cat)
e2 = BF.cat((a, c), 2)
f = NormalVariable(e1**2, e2**1, 'f')
g = NormalVariable(BF.relu(f), 1., 'g')

##
print(g._get_sample(10))

##
a_val = torch.tensor(0.25 * np.pi * np.ones((1, 1), dtype="float32"))
b_val = torch.tensor(0.25 * np.pi * np.ones((1, 1), dtype="float32"))
c_val = torch.tensor(2 * np.ones((1, 1), dtype="float32"))

##
z = BF.sin(a + b) / c

print(z.fn({a: a_val, b: b_val, c: c_val}))

##
BLink = BrancherFunction(torch.nn.Linear(1, 10))

print(BLink)
import chainer
import matplotlib.pyplot as plt
import numpy as np

from brancher.variables import ProbabilisticModel
from brancher.standard_variables import ConcreteVariable, NormalVariable
from brancher import inference
import brancher.functions as BF

# Probabilistic Model
x = ConcreteVariable(tau=0.1, p=np.ones((2, 1))/2., name="x")
mu0 = -2
nu0 = 0.5
mu1 = 2
nu1 = 0.2
y = NormalVariable(x[0]*mu0 + x[1]*mu1, x[0]*nu0 + x[1]*nu1, "y")

samples = y._get_sample(1000)
plt.hist(samples[y].data._flatten(), 60)
print(y.calculate_log_probability(samples))
plt.title("Concrete mixture of Gaussians")
plt.show()
print(model)

# Print samples
sample = model.get_sample(10)
print(sample)

# Print samples from single variable
x_sample = x.get_sample(10)
print(x_sample)

# Print samples conditional on an input
in_sample = model.get_sample(10, input_values={mu: 100.})
print(in_sample)

# # Generate data
data = x_real._get_sample(number_samples=50)

# Observe data
x.observe(data[x_real][:, 0, :])

# Variational model
Qnu = LogNormalVariable(0., 1., "nu", learnable=True)
Qmu = NormalVariable(0., 1., "mu", learnable=True)
model.set_posterior_model(ProbabilisticModel([Qmu, Qnu]))

# Inference
inference.stochastic_variational_inference(model,
                                           number_iterations=100,
                                           number_samples=50,
                                           optimizer=chainer.optimizers.Adam(0.1))
loss_list = model.diagnostics["loss curve"]
x = DeterministicVariable(input_variable, "x", is_observed=True)
sigma = DeterministicVariable(0.1, "sigma", learnable=True)
y = NormalVariable(regression_link(x), BF.exp(sigma), "y")

# Observations
data = (np.matmul(x.value.data, real_weights) +
        np.random.normal(0, real_sigma, (number_observations, 1)))
y.observe(data)
print(y)

# Maximal Likelihood
loss_list = maximal_likelihood(y, number_iterations=1000)

a_range = np.linspace(-2, 2, 40)
model_prediction = []
for a in a_range:
    x.value = a
    sigma.value = -20.
    model_prediction.append(float(y._get_sample()[y].data))

# Two subplots, unpack the axes array immediately
f, (ax1, ax2) = plt.subplots(1, 2)
ax1.plot(np.array(loss_list))
ax1.set_title("Convergence")
ax1.set_xlabel("Iteration")
ax1.set_ylabel("Negative log-likelihood")
ax2.plot(a_range, a_range * real_weights.flatten())
ax2.plot(a_range, np.array(model_prediction), c="red")
ax2.scatter(input_variable.flatten(), data, c="k")
ax2.set_title("ML fit")
plt.show()