예제 #1
0
   Still in development -- working out the most natural way of calling fit on the
   product object. So far all of this note does it demonstrate that the product
   operator now gathers the MLFM objects up nicely and provides a nice method
   for flattening them back out.

"""
import numpy as np
import matplotlib.pyplot as plt
from pydygp.liealgebras import so
from pydygp.linlatentforcemodels import MLFMAdapGrad
from pydygp.probabilitydistributions import (Normal, Laplace)
from sklearn.gaussian_process.kernels import RBF

N_manifolds = 3

mlfm1 = MLFMAdapGrad(so(2), R=1, lf_kernels=[RBF(), ])
mlfm2 = MLFMAdapGrad(so(2), R=1, lf_kernels=[RBF(), ])
mlfm3 = MLFMAdapGrad(so(2), R=1, lf_kernels=[RBF(), ])

mmlfm = mlfm1 * mlfm2 * mlfm3
mlfms = mmlfm.flatten()
for item in mlfms:
    print(hasattr(item, 'fit'), item.dim.D)

######################################################################
#
# Simulation
# ==========
#
# We simulate from this model by specifying the values of the
# :py:obj:`beta_i` for each of the manifolds defining the product.
예제 #2
0
"""
import matplotlib.pyplot as plt
import numpy as np
from pydygp.probabilitydistributions import (GeneralisedInverseGaussian,
                                             InverseGamma, Normal)
from sklearn.gaussian_process.kernels import RBF
from pydygp.liealgebras import so
from pydygp.linlatentforcemodels import (MLFMAdapGrad, GibbsMLFMAdapGrad,
                                         VarMLFMAdapGrad)
np.random.seed(15)
np.set_printoptions(precision=3, suppress=True)
###########################################################################
# Our first step is to initialise the models and then simulate some data.
#
# Make the model
vmlfm = VarMLFMAdapGrad(so(3), R=1, lf_kernels=[
    RBF(),
])
gmlfm = GibbsMLFMAdapGrad(so(3), R=1, lf_kernels=[
    RBF(),
])

beta = np.row_stack(([0.] * 3, np.random.normal(size=3)))

# simulate some initial conditions
x0 = np.random.normal(size=6).reshape(2, 3)
x0 /= np.linalg.norm(x0, axis=1)[:, None]

# Time points to solve the model at
tt = np.linspace(0., 6, 7)
예제 #3
0
Variational Inference
=====================
"""
import numpy as np
import matplotlib.pyplot as plt
from pydygp.probabilitydistributions import (Normal,
                                             GeneralisedInverseGaussian,
                                             Gamma)
from sklearn.gaussian_process.kernels import RBF
from pydygp.liealgebras import so
from pydygp.linlatentforcemodels import VarMLFMAdapGrad
                                             
np.random.seed(17)
np.set_printoptions(precision=3, suppress=True)

mlfm = VarMLFMAdapGrad(so(3), R=1, lf_kernels=(RBF(), ))

beta = np.row_stack(([0.]*3,
                     np.random.normal(size=3)))

x0 = np.eye(3)

# Time points to solve the model at
tt = np.linspace(0., 6., 9)

# Data and true forces
Data, lf = mlfm.sim(x0, tt, beta=beta, size=3)

# vectorise and stack the data
Y = np.column_stack((y.T.ravel() for y in Data))
예제 #4
0
The :ref:`previous note <tutorials-mlfmsa-motiv-part1>` demonstrated that it
is possible to recover the latent by inverting the trajectories formed by
the operator evaluated at the known true values. This is obviously of
limited use and so in this note we will expand this construction to an
iterative procedure for estimating the unknown forces.

We set up the model and simulate the data exactly as was done previously

"""
import numpy as np
from pydygp.liealgebras import so
from sklearn.gaussian_process.kernels import RBF
from pydygp.linlatentforcemodels import MLFMSA

mlfm = MLFMSA(so(3), R=1, lf_kernels=[RBF(), ], order=10)
beta = np.array([[0.1, 0., 0.],
                 [-0.5, 0.31, 0.11]])
t1 = np.linspace(0., 5.5, 7)
t2 = np.linspace(0., 5.5, 11)
x0 = np.eye(3)
Y1, g = mlfm.sim(x0[0, :], t1, beta=beta)
Y2, _ = mlfm.sim(x0[1, :], t2, beta=beta, latent_forces=g)

mlfm._setup_times([t1, t2], h=.25, multi_output=True)

#################################################################
#
# We now consider the iterative process for constructing an
# estimate of the latent force
예제 #5
0
from pydygp.linlatentforcemodels import MLFMAdapGrad
from pydygp.pydygp_examples import MocapExample
from pydygp.liealgebras import so
from pydygp.linlatentforcemodels import MLFMAdapGrad
from pydygp.probabilitydistributions import Normal
from sklearn.gaussian_process.kernels import RBF
from scipy.interpolate import interp1d

Data = MocapExample.load_data('lhumerus')

motions = [str(i) for i in range(1, 6)]

tt = Data['times']
Y = np.column_stack([Data[m].T.ravel() for m in motions])

mlfm = MLFMAdapGrad(so(4), R=2, lf_kernels=[
    RBF(),
] * 2)

# Define the priors
beta_prior = Normal(scale=5) * ((mlfm.dim.R + 1) * mlfm.dim.D)
map_res = mlfm.fit(tt, Y, beta_prior=beta_prior, logtau_is_fixed=False)

fig = plt.figure()
for k in range(mlfm.dim.K):
    ax = fig.add_subplot(2, 2, k + 1)
    for m in motions:
        ax.plot(Data['times'], Data[m][:, k], '+')

##############################################################################
#
예제 #6
0
Gibbs Sampling in the Product Model
===================================
"""
import numpy as np
import matplotlib.pyplot as plt
from pydygp.liealgebras import so
from pydygp.linlatentforcemodels import GibbsMLFMAdapGrad
from pydygp.probabilitydistributions import (Normal, Laplace)
from sklearn.gaussian_process.kernels import RBF



N_manifolds = 2

mlfm1 = GibbsMLFMAdapGrad(so(2), R=1, lf_kernels=[RBF(), ])
mlfm2 = GibbsMLFMAdapGrad(so(2), R=1, lf_kernels=[RBF(), ])

mmlfm = mlfm1 * mlfm2


x0_0 = [1., 0.]
x0_0_1 = [0., 1.]
x0_1 = [0., 1.]
beta_0 = np.array([[0., ], [1, ]])
beta_1 = np.array([[-0.3, ], [1, ]])

beta = (beta_0, beta_1, )

tt = np.linspace(0., 5., 7)
예제 #7
0
"""
Variational Inference
=====================
"""
import numpy as np
import matplotlib.pyplot as plt
from pydygp.probabilitydistributions import (Normal,
                                             GeneralisedInverseGaussian, Gamma)
from sklearn.gaussian_process.kernels import RBF
from pydygp.liealgebras import so
from pydygp.linlatentforcemodels import VarMLFMAdapGrad

np.random.seed(17)
np.set_printoptions(precision=3, suppress=True)

mlfm = VarMLFMAdapGrad(so(3), R=1, lf_kernels=(RBF(), ))

beta = np.row_stack(([0.] * 3, np.random.normal(size=3)))

x0 = np.eye(3)

# Time points to solve the model at
tt = np.linspace(0., 6., 9)

# Data and true forces
Data, lf = mlfm.sim(x0, tt, beta=beta, size=3)

# vectorise and stack the data
Y = np.column_stack((y.T.ravel() for y in Data))

logpsi_prior = GeneralisedInverseGaussian(a=5, b=5, p=-1).logtransform()
예제 #8
0
   Still in development -- working out the most natural way of calling fit on the
   product object. So far all of this note does it demonstrate that the product
   operator now gathers the MLFM objects up nicely and provides a nice method
   for flattening them back out.

"""
import numpy as np
import matplotlib.pyplot as plt
from pydygp.liealgebras import so
from pydygp.linlatentforcemodels import MLFMAdapGrad
from pydygp.probabilitydistributions import (Normal, Laplace)
from sklearn.gaussian_process.kernels import RBF

N_manifolds = 3

mlfm1 = MLFMAdapGrad(so(2), R=1, lf_kernels=[
    RBF(),
])
mlfm2 = MLFMAdapGrad(so(2), R=1, lf_kernels=[
    RBF(),
])
mlfm3 = MLFMAdapGrad(so(2), R=1, lf_kernels=[
    RBF(),
])

mmlfm = mlfm1 * mlfm2 * mlfm3
mlfms = mmlfm.flatten()
for item in mlfms:
    print(hasattr(item, 'fit'), item.dim.D)

######################################################################
예제 #9
0
import numpy as np
from pydygp.probabilitydistributions import (GeneralisedInverseGaussian,
                                             InverseGamma,
                                             Normal)
from sklearn.gaussian_process.kernels import RBF
from pydygp.liealgebras import so
from pydygp.linlatentforcemodels import (MLFMAdapGrad,
                                         GibbsMLFMAdapGrad,
                                         VarMLFMAdapGrad)
np.random.seed(15)
np.set_printoptions(precision=3, suppress=True)
###########################################################################
# Our first step is to initialise the models and then simulate some data.
#
# Make the model
vmlfm = VarMLFMAdapGrad(so(3), R=1, lf_kernels=[RBF(),])
gmlfm = GibbsMLFMAdapGrad(so(3), R=1, lf_kernels=[RBF(),])

beta = np.row_stack(([0.]*3,
                     np.random.normal(size=3)))

# simulate some initial conditions
x0 = np.random.normal(size=6).reshape(2, 3)
x0 /= np.linalg.norm(x0, axis=1)[:, None]

# Time points to solve the model at
tt = np.linspace(0., 6, 7)

# Data and true forces
Data, g0 = vmlfm.sim(x0, tt, beta=beta, size=2)
# vectorised and stack the data
예제 #10
0
# representation of the coefficient matrix
#
# .. math::
#
#     \mathbf{A}(t) = \sum_{d=0}^3 \beta_{0d}\mathbf{L}_d +
#     \sum_{r=1}^R g_r(t) \sum_{d=1}^3 \beta_{rd}\mathbf{L}_d,
#
# where :math:`\{\mathbf{L}_d \}` is a basis of the Lie algebra
# :math:`\mathfrak{so}(3)`. And the collection :math:`\{ g_r(t) \}_{r=1}^{R}`
# are a set of smooth Gaussian processes. To construct this model in the
# :py:obj:`pydygp` package we provide the :class:`pydygp.liealgebras.so` class
# which can be used to return basis elements of the Lie algebras of the special
# orthogonal group :math:`SO(n)`. For example if we import and call
# :py:obj:`so(3)` we get the following output
from pydygp.liealgebras import so
for d, item in enumerate(so(3)):
    print(''.join(('\n', 'L{}'.format(d + 1))))
    print(item)
##############################################################################
# Having imported the basis matrices for the Lie algebra we also need to
# choose how many latent forces we want the model to have, for this example
# we are going to consider a single latent forces with RBF kernel. We can
# then construct the :class:`MLFMAdapGrad` object

# Tuple of kernel objects for the latent forces, for r=1,..., R
lf_kernels = (RBF(), )

# construct the MLFM object
mlfm = MLFMAdapGrad(so(3), R=1, lf_kernels=lf_kernels)
##############################################################################
#
예제 #11
0
---------------------
We take a break in the model to now discuss how to start putting some
of the ideas discussed above into code. For the Kalman Filter we are
going to use the code in the
`PyKalman package <https://pykalman.github.io/>`_, but hacked a little
bit to allow for filtering and smoothing of independent sequences
with a common transition matrix.
"""
import numpy as np
from pydygp.liealgebras import so
from sklearn.gaussian_process.kernels import RBF
from pydygp.linlatentforcemodels import MLFMSA

np.random.seed(123)

mlfm = MLFMSA(so(3), R=1, lf_kernels=[
    RBF(),
], order=10)
beta = np.array([[0.1, 0., 0.], [-0.5, 0.31, 0.11]])
tt = np.linspace(0., 6., 7)
x0 = np.eye(3)
Data, g = mlfm.sim(x0, tt, beta=beta, size=3)

######################################
# Expectation Maximisation
# ------------------------
#
# So we have introduced a large collection of unintersting latent variables,
# the set of successive approximations :math:`\{ z_0, \ldots, z_M \}`, and
# so we need to integrate them out. If we define the statistics
#
예제 #12
0
   \dot{\mathbf{x}}(t)    

We do the usual imports and generate some simulated data
"""
import numpy as np
import matplotlib.pyplot as plt
from pydygp.probabilitydistributions import (Normal,
                                             GeneralisedInverseGaussian,
                                             ChiSquare, Gamma, InverseGamma)
from sklearn.gaussian_process.kernels import RBF
from pydygp.liealgebras import so
from pydygp.linlatentforcemodels import GibbsMLFMAdapGrad

np.random.seed(15)

gmlfm = GibbsMLFMAdapGrad(so(3), R=1, lf_kernels=(RBF(), ))

beta = np.row_stack(([0.] * 3, np.random.normal(size=3)))

x0 = np.eye(3)

# Time points to solve the model at
tt = np.linspace(0., 6., 9)

# Data and true forces
Data, lf = gmlfm.sim(x0, tt, beta=beta, size=3)

# vectorise and stack the data
Y = np.column_stack((y.T.ravel() for y in Data))

logpsi_prior = GeneralisedInverseGaussian(a=5, b=5, p=-1).logtransform()
예제 #13
0
from pydygp.liealgebras import so
from pydygp.linlatentforcemodels import MLFMAdapGrad
from pydygp.probabilitydistributions import Normal
from sklearn.gaussian_process.kernels import RBF
from scipy.interpolate import interp1d


Data = MocapExample.load_data('lhumerus')

motions = [str(i) for i in range(1, 6)]

tt = Data['times']
Y = np.column_stack([Data[m].T.ravel() for m in motions])


mlfm = MLFMAdapGrad(so(4), R=2, lf_kernels=[RBF(),]*2)

# Define the priors
beta_prior = Normal(scale=5)*((mlfm.dim.R+1)*mlfm.dim.D)
map_res = mlfm.fit(tt, Y,
                   beta_prior=beta_prior,
                   logtau_is_fixed=False)

fig = plt.figure()
for k in range(mlfm.dim.K):
    ax = fig.add_subplot(2, 2, k+1)
    for m in motions:
        ax.plot(Data['times'], Data[m][:, k], '+')

##############################################################################
#
예제 #14
0
# representation of the coefficient matrix
#
# .. math::
#
#     \mathbf{A}(t) = \sum_{d=0}^3 \beta_{0d}\mathbf{L}_d +
#     \sum_{r=1}^R g_r(t) \sum_{d=1}^3 \beta_{rd}\mathbf{L}_d,
#
# where :math:`\{\mathbf{L}_d \}` is a basis of the Lie algebra
# :math:`\mathfrak{so}(3)`. And the collection :math:`\{ g_r(t) \}_{r=1}^{R}`
# are a set of smooth Gaussian processes. To construct this model in the
# :py:obj:`pydygp` package we provide the :class:`pydygp.liealgebras.so` class
# which can be used to return basis elements of the Lie algebras of the special
# orthogonal group :math:`SO(n)`. For example if we import and call
# :py:obj:`so(3)` we get the following output
from pydygp.liealgebras import so
for d, item in enumerate(so(3)):
    print(''.join(('\n', 'L{}'.format(d+1))))
    print(item)
##############################################################################
# Having imported the basis matrices for the Lie algebra we also need to
# choose how many latent forces we want the model to have, for this example
# we are going to consider a single latent forces with RBF kernel. We can
# then construct the :class:`MLFMAdapGrad` object

# Tuple of kernel objects for the latent forces, for r=1,..., R
lf_kernels = (RBF(), )

# construct the MLFM object
mlfm = MLFMAdapGrad(so(3), R=1, lf_kernels=lf_kernels)
##############################################################################
# 
예제 #15
0
"""
import numpy as np
import matplotlib.pyplot as plt
from pydygp.probabilitydistributions import (Normal,
                                             GeneralisedInverseGaussian,
                                             ChiSquare,
                                             Gamma,
                                             InverseGamma)
from sklearn.gaussian_process.kernels import RBF
from pydygp.liealgebras import so
from pydygp.linlatentforcemodels import GibbsMLFMAdapGrad

np.random.seed(15)


gmlfm = GibbsMLFMAdapGrad(so(3), R=1, lf_kernels=(RBF(), ))

beta = np.row_stack(([0.]*3,
                     np.random.normal(size=3)))

x0 = np.eye(3)

# Time points to solve the model at
tt = np.linspace(0., 6., 9)

# Data and true forces
Data, lf = gmlfm.sim(x0, tt, beta=beta, size=3)

# vectorise and stack the data
Y = np.column_stack((y.T.ravel() for y in Data))
예제 #16
0
Linear Gaussian Model
---------------------
We take a break in the model to now discuss how to start putting some
of the ideas discussed above into code. For the Kalman Filter we are
going to use the code in the
`PyKalman package <https://pykalman.github.io/>`_, but hacked a little
bit to allow for filtering and smoothing of independent sequences
with a common transition matrix.
"""
import numpy as np
from pydygp.liealgebras import so
from sklearn.gaussian_process.kernels import RBF
from pydygp.linlatentforcemodels import MLFMSA
np.random.seed(123)

mlfm = MLFMSA(so(3), R=1, lf_kernels=[RBF(), ], order=10)
beta = np.array([[0.1, 0., 0.],
                 [-0.5, 0.31, 0.11]])
tt = np.linspace(0., 6., 7)
x0 = np.eye(3)
Data, g = mlfm.sim(x0, tt, beta=beta, size=3)

######################################
# Expectation Maximisation
# ------------------------
#
# So we have introduced a large collection of unintersting latent variables,
# the set of successive approximations :math:`\{ z_0, \ldots, z_M \}`, and
# so we need to integrate them out. If we define the statistics
#
# .. math::
예제 #17
0
===============

This note continues on from the :ref:`basic MAP tutorial<tutorials-mlfm-ag>`
examining the Adaptive Gradient matching approximation the MLFM.

"""
import numpy as np
import matplotlib.pyplot as plt
from pydygp.liealgebras import so
from pydygp.linlatentforcemodels import MLFMAdapGrad
from pydygp.probabilitydistributions import Normal, Laplace
from sklearn.gaussian_process.kernels import RBF

np.random.seed(12345)

mlfm = MLFMAdapGrad(so(2), R=1, lf_kernels=(RBF(), ))


x0 = np.array([1., 0.])
beta = np.array([[0., ], [1., ]])

ttd = np.linspace(0., 5., 100)
data, lf = mlfm.sim(x0, ttd, beta=beta)

tt = ttd[::10]
Y = data[::10, :]

mapres = mlfm.fit(tt, Y.T.ravel(),
                  logpsi_is_fixed=True,
                  beta_is_fixed=True, beta0=beta)
gpred = mlfm.predict_lf(ttd)