Exemplo n.º 1
0
import numpy as np
import matplotlib.pyplot as plt

import torch

from modules.distributions import NormalDistribution, BernoulliDistribution
from modules.models import HierarchicalModel
from modules.networks import TriResNet

mean_sigma = 1.
scale_mu = 0.1
scale_sigma = 0.1
n_children = 10
emission_sigma_list = [0.5 for _ in range(n_children)]
d_x = 2
mean_dist = NormalDistribution()
scale_dist = NormalDistribution()
children_dist = NormalDistribution()
mean_link = lambda x: x
scale_link = lambda x: torch.exp(x)


def emission(x, r):
    return x[:, 0] * r + x[:, 1]


emission_distribution = NormalDistribution()
M = 100
regressors = [
    torch.tensor(np.random.normal(0., 2., (1, M))) for _ in range(n_children)
]
from modules.distributions import NormalDistribution
from modules.emissions import SingleCoordinateEmission

# Simulation parameters
num_iterations = 3000
batch_size = 200

# Model
T = 100
dt = 0.5
sigma = np.sqrt(dt) * 0.5
initial_sigma = 3.
initial_mean = 0.
d_x = 2  #Number of latent variables
d_eps = 10
dist = NormalDistribution()
lk_sigma = 3.
#transition_model = lambda x,m: x #
transition_model = VolterraTransition(dt=dt)

# Likelihood
observation_gain = 1.
emission_model = SingleCoordinateEmission(k=0, gain=observation_gain)
emission_dist = NormalDistribution(scale=lk_sigma)

### Prior model ###
prior_model = DynamicModel(sigma=sigma,
                           initial_sigma=initial_sigma,
                           distribution=dist,
                           d_x=d_x,
                           transition=transition_model,
Exemplo n.º 3
0
import numpy as np
import matplotlib.pyplot as plt

import torch
import torch.optim as optim

from modules.networks import LinearGaussianTree, TriResNet, ASVIupdate
from modules.models import ColliderModel, MeanField, GlobalFlow, MultivariateNormal
from modules.distributions import NormalDistribution
from modules.eval_utils import evaluate_multi_likelihood

# Parameters
depth = 2  #3
#join_link = lambda x, y: x - y
join_link = lambda x, y, k=2.: torch.tanh(k * x) - torch.tanh(k * y)
dist = NormalDistribution()
num_iterations = 7000  #10000
batch_size = 80
sigma = 0.05
in_sigma = 0.1  #0.2
num_samples = 20000

# Prior model
prior_model = ColliderModel(depth=depth,
                            sigma=sigma,
                            in_sigma=in_sigma,
                            join_link=join_link,
                            transition_distribution=NormalDistribution())

# Data
true_smpl, _, _, _ = prior_model.sample(1)
Exemplo n.º 4
0
#plt.scatter(norm(sm[:,0,0]), norm(sm[:,0,1000]), c="g", alpha=0.1)
#plt.scatter(norm(sm[:,0,0]), norm(sm[:,0,1]), c="r", alpha=0.1)

#plt.scatter(norm(sm[:,0,0]), norm(sm[:,0,20]), c="g", alpha=0.1)
corr = [
    np.corrcoef(sm[:, 0, node], sm[:, 0, k])[0, 1] for k in range(sm.shape[2])
]
plt.plot(corr)

join_link = lambda x, y: x + y
emission = lambda x: x
prior_model = ColliderModel(depth=depth,
                            sigma=0.1,
                            in_sigma=1.,
                            join_link=join_link,
                            transition_distribution=NormalDistribution(),
                            eps_generator=tree)

smpl, _, _, _ = prior_model.sample(num_samples)
smpl = torch.cat(smpl, 1).detach().numpy()
corr = [
    np.corrcoef(smpl[:, node], smpl[:, k])[0, 1] for k in range(smpl.shape[1])
]
plt.plot(corr)

transformations = [
    TriResNet(d_x=1,
              d_epsilon=d_eps,
              epsilon_nu=0.001,
              in_pre_lambda=0.5,
              scale_w=0.4) for _ in range(2**depth - 1)
Exemplo n.º 5
0
import numpy as np
import matplotlib.pyplot as plt

from modules.distributions import NormalDistribution, BernoulliDistribution
from modules.models import DynamicModel
from modules.dynamics import LorentzTransition
from modules.emissions import SingleCoordinateEmission


T = 40
dt = 0.02
sigma = np.sqrt(dt)*2.
initial_sigma = 1.
observation_gain = 2.
d_x = 3
dist = NormalDistribution()
bm = DynamicModel(sigma=sigma, initial_sigma=initial_sigma, distribution=dist, d_x=d_x,
                  transition=LorentzTransition(dt=dt),
                  emission=SingleCoordinateEmission(k=0, gain=observation_gain),
                  emission_distribution=BernoulliDistribution(), observation_gain=observation_gain, T=T)
N = 12
bm_sample,_ ,_ ,_ ,_ ,_ = bm.sample_timeseries(N)

plt.plot(np.transpose(bm_sample[:,0,:].detach().numpy()))
plt.show()

X_true, Y, mu =  bm.sample_observations(N)

print(1.)

#bm_obs_sample = bm.sample_observations(1)
from modules.dynamics import LorentzTransition
from modules.emissions import SingleCoordinateEmission
from modules.models import MeanField, MultivariateNormal, GlobalFlow, Autoregressive
from modules.training_tools import variational_update
from modules.eval_utils import evaluate_model
from modules.plot_tools import plot_model
from modules.networks import TriResNet, ASVIupdate, LinearNet, DeepNet

# Defining dynamical and emission model
T = 40
dt = 0.02
sigma = np.sqrt(dt) * 2.
initial_sigma = 1.
observation_gain = 1.
d_x = 1  #Number of latent variables
dist = NormalDistribution()
transition_model = lambda x, m: x
bm = DynamicModel(sigma=sigma,
                  initial_sigma=initial_sigma,
                  distribution=dist,
                  d_x=d_x,
                  transition=transition_model,
                  emission=SingleCoordinateEmission(k=0,
                                                    gain=observation_gain),
                  emission_distribution=NormalDistribution(0.1),
                  observation_gain=observation_gain,
                  T=T)

num_repetitions = 10
num_iterations = 2000  #10000
batch_size = 50