Beispiel #1
0
    (a, b), 2
)  #TODO: to change later, so that user does not have to specify dim explicitly (adjust cat)
e2 = BF.cat((a, c), 2)
f = NormalVariable(e1**2, e2**1, 'f')
g = NormalVariable(BF.relu(f), 1., 'g')

##
print(g._get_sample(10))

##
a_val = torch.tensor(0.25 * np.pi * np.ones((1, 1), dtype="float32"))
b_val = torch.tensor(0.25 * np.pi * np.ones((1, 1), dtype="float32"))
c_val = torch.tensor(2 * np.ones((1, 1), dtype="float32"))

##
z = BF.sin(a + b) / c

print(z.fn({a: a_val, b: b_val, c: c_val}))

##
BLink = BrancherFunction(torch.nn.Linear(1, 10))

print(BLink)
#import inspect
#print(inspect.getmro(BLink))
#print(issubclass(BLink, chainer.Link))

##
print(BLink(a).fn({a: a_val}).detach().numpy())

##
Beispiel #2
0
 def __init__(self, frequency, scale, jitter=0.):
     self.frequency = var2link(frequency)
     self.scale = var2link(scale)
     covariance = lambda x, y: BF.exp(-2 * BF.sin(np.pi * self.frequency * (
         x - y))**2 / scale**2) + BF.delta(x, y) * jitter
     super().__init__(covariance=covariance)
Beispiel #3
0
from brancher.standard_variables import NormalVariable, DeterministicVariable
import brancher.functions as BF

from numpy import sin

##
a = DeterministicVariable(1.5, 'a')
b = DeterministicVariable(0.3, 'b')
c = DeterministicVariable(0.3, 'c')
d = BF.sin(a + b**2) / (3 * c)

##
print(d)
# Parameters
S = 6.
N = 40
x_range = np.linspace(-S / 2., S / 2., N)
y_range = np.linspace(-S / 2., S / 2., N)
x_mesh, y_mesh = np.meshgrid(x_range, y_range)
#x_mesh, y_mesh = np.expand_dims(x_mesh, 0), np.expand_dims(y_mesh, 0)

# Experimental model
x = RootVariable(x_mesh, name="x")  #TODO: it should create this automatically
y = RootVariable(y_mesh, name="y")
w1 = NormalVariable(0., 1., name="w1")
w2 = NormalVariable(0., 1., name="w2")
b = NormalVariable(0., 1., name="b")
experimental_input = NormalVariable(BF.exp(BF.sin(w1 * x + w2 * y + b)),
                                    0.1,
                                    name="input",
                                    is_observed=True)

# Probabilistic Model
mu_x = NormalVariable(0., 1., name="mu_x")
mu_y = NormalVariable(0., 1., name="mu_y")
v = LogNormalVariable(0., 0.1, name="v")
nu = LogNormalVariable(-1, 0.01, name="nu")
receptive_field = BF.exp((-(x - mu_x)**2 - (y - mu_y)**2) /
                         (2. * v**2)) / (2. * BF.sqrt(np.pi * v**2))
mean_response = BF.sum(BF.sum(receptive_field * experimental_input,
                              dim=1,
                              keepdim=True),
                       dim=2,
Beispiel #5
0
x0 = NormalVariable(0., driving_noise, 'x0')
y0 = NormalVariable(x0, measure_noise, 'y0')
x1 = NormalVariable(0., driving_noise, 'x1')
y1 = NormalVariable(x1, measure_noise, 'y1')
b = 35
omega = NormalVariable(2 * np.pi * 7.5, 1., "omega")

x = [x0, x1]
y = [y0, y1]
x_names = ["x0", "x1"]
y_names = ["y0", "y1"]
y_range = [t for t in range(T) if (t < 15 or t > T - 15)]
for t in range(2, T):
    x_names.append("x{}".format(t))
    #new_mu = (-1 - omega**2*dt**2 + b*dt)*x[t - 2] + (2 - b*dt)*x[t - 1]
    new_mu = (-1 + b * dt) * x[t - 2] - omega**2 * dt**2 * (BF.sin(
        x[t - 2])) + (2 - b * dt) * x[t - 1]
    x.append(NormalVariable(new_mu, np.sqrt(dt) * driving_noise, x_names[t]))
    if t in y_range:
        y_name = "y{}".format(t)
        y_names.append(y_name)
        y.append(NormalVariable(transform(x[t]), measure_noise, y_name))
AR_model = ProbabilisticModel(x + y)

# Generate data #
data = AR_model._get_sample(number_samples=1)
time_series = [float(data[xt].data) for xt in x]
plt.plot(time_series)
plt.show()
ground_truth = short_y
#true_b = data[omega].data
#print("The true coefficient is: {}".format(float(true_b)))