Esempio n. 1
0
 def __init__(self, dim, a=None, b=None, c=None, D=None, Q=None, name=None):
     _Q = np.eye(dim) * 0.5 if Q is None else Q
     super().__init__(dim=dim, Q=_Q, name=name)
     self.a = Param(np.ones(self.dim) * 0.8 if a is None else a)
     self.b = Param(np.ones(self.dim) * 0.2 if b is None else b)
     self.c = Param(np.ones(self.dim) * 5.0 if c is None else c)
     self.D = Param(np.eye(self.dim) * 2.0 if D is None else D)
Esempio n. 2
0
    def __init__(self, state_dim, state_idxs=None, W=None, t=None):
        Cost.__init__(self)
        self.state_dim = state_dim
        if W is not None:
            self.W = Param(np.reshape(W, (state_dim, state_dim)),
                           trainable=False)
        else:
            self.W = Param(np.ones((state_dim, state_dim)), trainable=False)
        if t is not None:
            self.t = Param(np.reshape(t, (1, state_dim)), trainable=False)
        else:
            self.t = Param(np.zeros((1, state_dim)), trainable=False)

        if state_idxs is None:
            state_idxs = np.linspace(0,
                                     state_dim - 1,
                                     num=state_dim,
                                     dtype=int)

        self.m_idxs = []
        self.s_idxs = []

        for index in state_idxs:
            self.m_idxs.append([0, index])
            element = []
            for j in state_idxs:
                element.append([index, j])
            self.s_idxs.append(element)
Esempio n. 3
0
    def __init__(self, dim, input_dim=0, kern=None, Z=None, n_ind_pts=100,
                 mean_fn=None, Q_diag=None, Umu=None, Ucov_chol=None,
                 jitter=gps.numerics.jitter_level, name=None):
        super().__init__(name=name)
        self.OBSERVATIONS_AS_INPUT = False
        self.dim = dim
        self.input_dim = input_dim
        self.jitter = jitter

        self.Q_sqrt = Param(np.ones(self.dim) if Q_diag is None else Q_diag ** 0.5, transform=gtf.positive)

        self.n_ind_pts = n_ind_pts if Z is None else (Z[0].shape[-2] if isinstance(Z, list) else Z.shape[-2])

        if isinstance(Z, np.ndarray) and Z.ndim == 2:
            self.Z = mf.SharedIndependentMof(gp.features.InducingPoints(Z))
        else:
            Z_list = [np.random.randn(self.n_ind_pts, self.dim + self.input_dim)
                      for _ in range(self.dim)] if Z is None else [z for z in Z]
            self.Z = mf.SeparateIndependentMof([gp.features.InducingPoints(z) for z in Z_list])

        if isinstance(kern, gp.kernels.Kernel):
            self.kern = mk.SharedIndependentMok(kern, self.dim)
        else:
            kern_list = kern or [gp.kernels.Matern32(self.dim + self.input_dim, ARD=True) for _ in range(self.dim)]
            self.kern = mk.SeparateIndependentMok(kern_list)

        self.mean_fn = mean_fn or mean_fns.Identity(self.dim)
        self.Umu = Param(np.zeros((self.dim, self.n_ind_pts)) if Umu is None else Umu)  # Lm^-1(Umu - m(Z))
        transform = gtf.LowerTriangular(self.n_ind_pts, num_matrices=self.dim, squeeze=False)
        self.Ucov_chol = Param(np.tile(np.eye(self.n_ind_pts)[None, ...], [self.dim, 1, 1])
                               if Ucov_chol is None else Ucov_chol, transform=transform)  # Lm^-1(Ucov_chol)
        self._Kzz = None
Esempio n. 4
0
 def __init__(self, dim, input_dim=None, A=None, B=None, C=None, D=None, Q=None, name=None):
     _input_dim = input_dim or dim
     _Q = np.eye(dim) * np.sqrt(10.) if Q is None else Q
     super().__init__(dim=dim, input_dim=_input_dim, Q=_Q, name=name)
     self.A = Param(np.eye(self.dim) * 0.5 if A is None else A)
     self.B = Param(np.eye(self.dim) * 25. if B is None else B)
     self.C = Param(np.eye(self.dim) * 8.0 if C is None else C)
     self.D = Param(np.eye(self.dim, self.input_dim) * 1.2 if D is None else D)
Esempio n. 5
0
 def __init__(self, latent_dim, input_dim, A=None, B=None, C=None, d=None, Q=None, name=None):
     _Q = np.eye(latent_dim) * 0.2 if Q is None else Q
     super().__init__(dim=latent_dim, input_dim=input_dim, Q=_Q, name=name)
     self.OBSERVATIONS_AS_INPUT = True
     self.A = Param(np.eye(self.dim) * 0.2 if A is None else A)
     self.B = Param(np.eye(self.dim, self.input_dim) * (-0.2) if B is None else B)
     self.C = Param(np.eye(self.dim, self.input_dim) * 0.1 if C is None else C)
     self.d = Param(np.zeros(self.dim) + 0 if d is None else d)
Esempio n. 6
0
    def _build_encoder(self, layers):
        Ws, bs = [], []
        dims = [self.X_dim + self.Y_dim] + layers + [self.latent_dim * 2]
        for dim_in, dim_out in zip(dims[:-1], dims[1:]):
            init_xavier_std = (2.0 / (dim_in + dim_out)) ** 0.5
            Ws.append(Param((np.random.randn(dim_in, dim_out) * init_xavier_std).astype(np.float32)))
            bs.append(Param(np.zeros(dim_out).astype(np.float32)))

        self.Ws, self.bs = ParamList(Ws), ParamList(bs)
Esempio n. 7
0
 def __init__(self, dim, input_dim=0, Q=None, name=None):
     super().__init__(name=name)
     self.OBSERVATIONS_AS_INPUT = False
     self.dim = dim
     self.input_dim = input_dim
     if Q is None or Q.ndim == 2:
         self.Qchol = Param(np.eye(self.dim) if Q is None else np.linalg.cholesky(Q),
                            gtf.LowerTriangular(self.dim, squeeze=True))
     elif Q.ndim == 1:
         self.Qchol = Param(Q ** 0.5)
Esempio n. 8
0
 def __init__(self, state_dim, W=None, t=None):
     Reward.__init__(self)
     self.state_dim = state_dim
     if W is not None:
         self.W = Param(np.reshape(W, (state_dim, state_dim)), trainable=False)
     else:
         self.W = Param(np.eye(state_dim), trainable=False)
     if t is not None:
         self.t = Param(np.reshape(t, (1, state_dim)), trainable=False)
     else:
         self.t = Param(np.zeros((1, state_dim)), trainable=False)
Esempio n. 9
0
 def __init__(self,
              input_dim,
              variance=1.0,
              frequency=np.array([1.0, 1.0]),
              lengthscale=1.0,
              correlation=0.0,
              max_freq=1.0,
              active_dims=None):
     assert (input_dim == 1
             )  # the derivations are valid only for one dimensional input
     Kernel.__init__(self, input_dim=input_dim, active_dims=active_dims)
     self.variance = Param(variance, transforms.positive)
     self.frequency = Param(frequency, transforms.Logistic(0.0, max_freq))
     self.lengthscale = Param(lengthscale, transforms.positive)
     correlation = np.clip(correlation, 1e-4,
                           1 - 1e-4)  # clip for numerical reasons
     self.correlation = Param(correlation, transforms.Logistic())
Esempio n. 10
0
 def __init__(self,
              input_dim,
              variance=1.0,
              lengthscales=None,
              frequency=1.0,
              active_dims=None,
              ARD=False):
     Stationary.__init__(self,
                         input_dim=input_dim,
                         variance=variance,
                         lengthscales=lengthscales,
                         active_dims=active_dims,
                         ARD=ARD)
     self.frequency = Param(frequency,
                            transforms.positive,
                            dtype=float_type)
     self.frequency.prior = gpflow.priors.Exponential(1.0)
     self.variance.prior = gpflow.priors.LogNormal(0, 1)
Esempio n. 11
0
 def __init__(self, state_dim):
     Reward.__init__(self)
     self.state_dim = state_dim
     self.W = Param(np.random.rand(state_dim, state_dim), trainable=False)
     self.t = Param(np.random.rand(1, state_dim), trainable=False)
Esempio n. 12
0
 def __init__(self, state_dim, W):
     Reward.__init__(self)
     self.state_dim = state_dim
     self.W = Param(np.reshape(W, (state_dim, 1)), trainable=False)
Esempio n. 13
0
 def __init__(self, state_dim):
     Reward.__init__(self)
     self.state_dim = state_dim
     self.W = Param(np.ones((state_dim, state_dim)), trainable=False)
     self.t = Param(np.zeros((1, state_dim)), trainable=False)
Esempio n. 14
0
 def to_param_list(var_list, name):
     param_list = []
     for idx, var in enumerate(var_list):
         name_idx = '{name}_{idx}'.format(name=name, idx=idx)
         param_list.append(Param(var, dtype=float_type, name=name_idx))
     return ParamList(param_list)
Esempio n. 15
0
    def __init__(self,
                 latent_dim,
                 Y,
                 inputs=None,
                 emissions=None,
                 px1_mu=None,
                 px1_cov=None,
                 kern=None,
                 Z=None,
                 n_ind_pts=100,
                 mean_fn=None,
                 Q_diag=None,
                 Umu=None,
                 Ucov_chol=None,
                 qx1_mu=None,
                 qx1_cov=None,
                 As=None,
                 bs=None,
                 Ss=None,
                 n_samples=100,
                 batch_size=None,
                 chunking=False,
                 seed=None,
                 parallel_iterations=10,
                 jitter=gp.settings.numerics.jitter_level,
                 name=None):

        super().__init__(latent_dim,
                         Y[0],
                         inputs=None if inputs is None else inputs[0],
                         emissions=emissions,
                         px1_mu=px1_mu,
                         px1_cov=None,
                         kern=kern,
                         Z=Z,
                         n_ind_pts=n_ind_pts,
                         mean_fn=mean_fn,
                         Q_diag=Q_diag,
                         Umu=Umu,
                         Ucov_chol=Ucov_chol,
                         qx1_mu=qx1_mu,
                         qx1_cov=None,
                         As=None,
                         bs=None,
                         Ss=False if Ss is False else None,
                         n_samples=n_samples,
                         seed=seed,
                         parallel_iterations=parallel_iterations,
                         jitter=jitter,
                         name=name)

        self.T = [Y_s.shape[0] for Y_s in Y]
        self.T_tf = tf.constant(self.T, dtype=gp.settings.int_type)
        self.max_T = max(self.T)
        self.sum_T = float(sum(self.T))
        self.n_seq = len(self.T)
        self.batch_size = batch_size
        self.chunking = chunking

        if self.batch_size is None:
            self.Y = ParamList(Y, trainable=False)
        else:
            _Y = np.stack([
                np.concatenate(
                    [Ys, np.zeros((self.max_T - len(Ys), self.obs_dim))])
                for Ys in Y
            ])
            self.Y = Param(_Y, trainable=False)

        if inputs is not None:
            if self.batch_size is None:
                self.inputs = ParamList(inputs, trainable=False)
            else:
                desired_length = self.max_T if self.chunking else self.max_T - 1
                _inputs = [
                    np.concatenate([
                        inputs[s],
                        np.zeros(
                            (desired_length - len(inputs[s]), self.input_dim))
                    ]) for s in range(self.n_seq)
                ]  # pad the inputs
                self.inputs = Param(_inputs, trainable=False)

        if qx1_mu is None:
            self.qx1_mu = Param(np.zeros((self.n_seq, self.latent_dim)))

        self.qx1_cov_chol = Param(
            np.tile(np.eye(self.latent_dim)[None, ...], [self.n_seq, 1, 1])
            if qx1_cov is None else np.linalg.cholesky(qx1_cov),
            transform=gtf.LowerTriangular(self.latent_dim,
                                          num_matrices=self.n_seq))

        _As = [np.ones((T_s - 1, self.latent_dim))
               for T_s in self.T] if As is None else As
        _bs = [np.zeros((T_s - 1, self.latent_dim))
               for T_s in self.T] if bs is None else bs
        if Ss is not False:
            _S_chols = [np.tile(self.Q_sqrt.value.copy()[None, ...], [T_s - 1, 1]) for T_s in self.T] if Ss is None \
                else [np.sqrt(S) if S.ndim == 2 else np.linalg.cholesky(S) for S in Ss]

        if self.batch_size is None:
            self.As = ParamList(_As)
            self.bs = ParamList(_bs)
            if Ss is not False:
                self.S_chols = ParamList([
                    Param(Sc,
                          transform=gtf.positive if Sc.ndim == 2 else
                          gtf.LowerTriangular(self.latent_dim,
                                              num_matrices=Sc.shape[0]))
                    for Sc in _S_chols
                ])
        else:
            _As = np.stack([
                np.concatenate(
                    [_A,
                     np.zeros((self.max_T - len(_A) - 1, *_A.shape[1:]))])
                for _A in _As
            ])
            _bs = np.stack([
                np.concatenate([
                    _b,
                    np.zeros((self.max_T - len(_b) - 1, self.latent_dim))
                ]) for _b in _bs
            ])
            self.As = Param(_As)
            self.bs = Param(_bs)
            if Ss is not False:
                _S_chols = [
                    np.concatenate([
                        _S,
                        np.zeros((self.max_T - len(_S) - 1, *_S.shape[1:]))
                    ]) for _S in _S_chols
                ]
                _S_chols = np.stack(_S_chols)
                self.S_chols = Param(_S_chols, transform=gtf.positive if _S_chols.ndim == 3 else \
                    gtf.LowerTriangular(self.latent_dim, num_matrices=(self.n_seq, self.max_T - 1)))

        self.multi_diag_px1_cov = False
        if isinstance(px1_cov, list):  # different prior for each sequence
            _x1_cov = np.stack(px1_cov)
            _x1_cov = np.sqrt(
                _x1_cov) if _x1_cov.ndim == 2 else np.linalg.cholesky(_x1_cov)
            _transform = None if _x1_cov.ndim == 2 else gtf.LowerTriangular(
                self.latent_dim, num_matrices=self.n_seq)
            self.multi_diag_px1_cov = _x1_cov.ndim == 2
        elif isinstance(px1_cov, np.ndarray):  # same prior for each sequence
            assert px1_cov.ndim < 3
            _x1_cov = np.sqrt(
                px1_cov) if px1_cov.ndim == 1 else np.linalg.cholesky(px1_cov)
            _transform = None if px1_cov.ndim == 1 else gtf.LowerTriangular(
                self.latent_dim, squeeze=True)

        self.px1_cov_chol = None if px1_cov is None else Param(
            _x1_cov, trainable=False, transform=_transform)

        if self.chunking:
            px1_mu_check = len(self.px1_mu.shape) == 1
            px1_cov_check_1 = not self.multi_diag_px1_cov
            px1_cov_check_2 = self.px1_cov_chol is None or len(
                self.px1_cov_chol.shape) < 3
            assert px1_mu_check and px1_cov_check_1 and px1_cov_check_2, \
                'Only one prior over x1 allowed for chunking'
Esempio n. 16
0
 def __init__(self, state_dim, x_goal, Q):
     Reward.__init__(self)
     self.state_dim = state_dim
     self.Q = Param(np.reshape(Q, (state_dim, state_dim)), trainable=False)
     self.x_goal = Param(np.reshape(x_goal, (1, state_dim)),
                         trainable=False)
Esempio n. 17
0
    def __init__(self,
                 X,
                 Y,
                 kernf,
                 kerng,
                 likelihood,
                 Zf,
                 Zg,
                 mean_function=None,
                 minibatch_size=None,
                 name='model'):
        Model.__init__(self, name)
        self.mean_function = mean_function or Zero()
        self.kernf = kernf
        self.kerng = kerng
        self.likelihood = likelihood
        self.whiten = False
        self.q_diag = True

        # save initial attributes for future plotting purpose
        Xtrain = DataHolder(X)
        Ytrain = DataHolder(Y)
        self.Xtrain, self.Ytrain = Xtrain, Ytrain

        # sort out the X, Y into MiniBatch objects.
        if minibatch_size is None:
            minibatch_size = X.shape[0]
        self.num_data = X.shape[0]
        self.num_latent = Y.shape[1]  # num_latent will be 1
        self.X = MinibatchData(X, minibatch_size, np.random.RandomState(0))
        self.Y = MinibatchData(Y, minibatch_size, np.random.RandomState(0))

        # Add variational paramters
        self.Zf = Param(Zf)
        self.Zg = Param(Zg)
        self.num_inducing_f = Zf.shape[0]
        self.num_inducing_g = Zg.shape[0]

        # init variational parameters
        self.u_fm = Param(
            np.random.randn(self.num_inducing_f, self.num_latent) * 0.01)
        self.u_gm = Param(
            np.random.randn(self.num_inducing_g, self.num_latent) * 0.01)

        if self.q_diag:
            self.u_fs_sqrt = Param(
                np.ones((self.num_inducing_f, self.num_latent)),
                transforms.positive)
            self.u_gs_sqrt = Param(
                np.ones((self.num_inducing_g, self.num_latent)),
                transforms.positive)
        else:
            u_fs_sqrt = np.array([
                np.eye(self.num_inducing_f) for _ in range(self.num_latent)
            ]).swapaxes(0, 2)
            self.u_fs_sqrt = Param(
                u_fs_sqrt, transforms.LowerTriangular(u_fs_sqrt.shape[2]))

            u_gs_sqrt = np.array([
                np.eye(self.num_inducing_g) for _ in range(self.num_latent)
            ]).swapaxes(0, 2)
            self.u_gs_sqrt = Param(
                u_gs_sqrt, transforms.LowerTriangular(u_gs_sqrt.shape[2]))
Esempio n. 18
0
class ExponentialReward(Reward):
    def __init__(self, state_dim, W=None, t=None):
        Reward.__init__(self)
        self.state_dim = state_dim
        if W is not None:
            self.W = Param(np.reshape(W, (state_dim, state_dim)), trainable=False)
        else:
            self.W = Param(np.eye(state_dim), trainable=False)
        if t is not None:
            self.t = Param(np.reshape(t, (1, state_dim)), trainable=False)
        else:
            self.t = Param(np.zeros((1, state_dim)), trainable=False)

    def update_target(self,t):
        self.t.assign(np.reshape(t, (1, self.state_dim)))
        # self.t=t
    def update_weights(self,w):
        self.W.assign(np.reshape(w, (self.state_dim, self.state_dim)))

    @params_as_tensors
    def compute_reward(self, m, s):
        '''
        Reward function, calculating mean and variance of rewards, given
        mean and variance of state distribution, along with the target State
        and a weight matrix.
        Input m : [1, k]
        Input s : [k, k]
        Output M : [1, 1]
        Output S  : [1, 1]
        '''
        # for robot arm
        m=m[:,:9]
        s=s[:9,:9]

        SW = s @ self.W

        iSpW = tf.transpose(
                tf.matrix_solve( (tf.eye(self.state_dim, dtype=float_type) + SW),
                tf.transpose(self.W), adjoint=True))

        muR = tf.exp(-(m-self.t) @  iSpW @ tf.transpose(m-self.t)/2) / \
                tf.sqrt( tf.linalg.det(tf.eye(self.state_dim, dtype=float_type) + SW) )

        i2SpW = tf.transpose(
                tf.matrix_solve( (tf.eye(self.state_dim, dtype=float_type) + 2*SW),
                tf.transpose(self.W), adjoint=True))

        r2 =  tf.exp(-(m-self.t) @ i2SpW @ tf.transpose(m-self.t)) / \
                tf.sqrt( tf.linalg.det(tf.eye(self.state_dim, dtype=float_type) + 2*SW) )

        sR = r2 - muR @ muR
        muR.set_shape([1, 1])
        sR.set_shape([1, 1])
        return muR, sR
# import abc
# import tensorflow as tf
# from gpflow import Parameterized, Param, params_as_tensors, settings
# import numpy as np
#
# float_type = settings.dtypes.float_type
#
#
# class Reward(Parameterized):
#     def __init__(self):
#         Parameterized.__init__(self)
#
#     @abc.abstractmethod
#     def compute_reward(self, m, s):
#         raise NotImplementedError
#
#
# class ExponentialReward(Reward):
#     def __init__(self, state_dim, W=None, t=None):
#         Reward.__init__(self)
#         self.state_dim = state_dim
#         if W is not None:
#             self.W = Param(np.reshape(W, (state_dim, state_dim)), trainable=False)
#         else:
#             self.W = Param(np.eye(state_dim), trainable=False)
#         self.t=t
#         # if t is not None:
#         #     self.t = Param(np.reshape(t, (1, state_dim)), trainable=False)
#         # else:
#         #     self.t = Param(np.zeros((1, state_dim)), trainable=False)
#
#     def update_target(self,t):
#         # self.t.assign(np.reshape(t, (1, self.state_dim)))
#         self.t=t
#
#     @params_as_tensors
#     def compute_reward(self, m, s):
#         '''
#         Reward function, calculating mean and variance of rewards, given
#         mean and variance of state distribution, along with the target State
#         and a weight matrix.
#         Input m : [1, k]
#         Input s : [k, k]
#
#         Output M : [1, 1]
#         Output S  : [1, 1]
#         '''
#         # for robot arm
#         m=m[:,:3]
#         s=s[:3,:3]
#
#         SW = s @ self.W
#
#         iSpW = tf.transpose(
#                 tf.matrix_solve( (tf.eye(self.state_dim, dtype=float_type) + SW),
#                 tf.transpose(self.W), adjoint=True))
#
#         muR = tf.exp(-(m-self.t) @  iSpW @ tf.transpose(m-self.t)/2) / \
#                 tf.sqrt( tf.linalg.det(tf.eye(self.state_dim, dtype=float_type) + SW) )
#
#         i2SpW = tf.transpose(
#                 tf.matrix_solve( (tf.eye(self.state_dim, dtype=float_type) + 2*SW),
#                 tf.transpose(self.W), adjoint=True))
#
#         r2 =  tf.exp(-(m-self.t) @ i2SpW @ tf.transpose(m-self.t)) / \
#                 tf.sqrt( tf.linalg.det(tf.eye(self.state_dim, dtype=float_type) + 2*SW) )
#
#         sR = r2 - muR @ muR
#         muR.set_shape([1, 1])
#         sR.set_shape([1, 1])
#         return muR, sR
Esempio n. 19
0
    def __init__(self,
                 latent_dim,
                 Y,
                 inputs=None,
                 emissions=None,
                 px1_mu=None,
                 px1_cov=None,
                 kern=None,
                 Z=None,
                 n_ind_pts=100,
                 mean_fn=None,
                 Q_diag=None,
                 Umu=None,
                 Ucov_chol=None,
                 qx1_mu=None,
                 qx1_cov=None,
                 As=None,
                 bs=None,
                 Ss=None,
                 n_samples=100,
                 seed=None,
                 parallel_iterations=10,
                 jitter=gps.numerics.jitter_level,
                 name=None):

        super().__init__(name=name)

        self.latent_dim = latent_dim
        self.T, self.obs_dim = Y.shape
        self.Y = Param(Y, trainable=False)

        self.inputs = None if inputs is None else Param(inputs,
                                                        trainable=False)
        self.input_dim = 0 if self.inputs is None else self.inputs.shape[1]

        self.qx1_mu = Param(
            np.zeros(self.latent_dim) if qx1_mu is None else qx1_mu)
        self.qx1_cov_chol = Param(
            np.eye(self.latent_dim)
            if qx1_cov is None else np.linalg.cholesky(qx1_cov),
            transform=gtf.LowerTriangular(self.latent_dim, squeeze=True))

        self.As = Param(
            np.ones((self.T - 1, self.latent_dim)) if As is None else As)
        self.bs = Param(
            np.zeros((self.T - 1, self.latent_dim)) if bs is None else bs)

        self.Q_sqrt = Param(
            np.ones(self.latent_dim) if Q_diag is None else Q_diag**0.5,
            transform=gtf.positive)
        if Ss is False:
            self._S_chols = None
        else:
            self.S_chols = Param(
                np.tile(self.Q_sqrt.value.copy()[None, ...], [self.T - 1, 1])
                if Ss is None else
                (np.sqrt(Ss) if Ss.ndim == 2 else np.linalg.cholesky(Ss)),
                transform=gtf.positive if
                (Ss is None or Ss.ndim == 2) else gtf.LowerTriangular(
                    self.latent_dim, num_matrices=self.T - 1, squeeze=False))

        self.emissions = emissions or GaussianEmissions(
            latent_dim=self.latent_dim, obs_dim=self.obs_dim)

        self.px1_mu = Param(
            np.zeros(self.latent_dim) if px1_mu is None else px1_mu,
            trainable=False)
        self.px1_cov_chol = None if px1_cov is None else \
            Param(np.sqrt(px1_cov) if px1_cov.ndim == 1 else np.linalg.cholesky(px1_cov), trainable=False,
                  transform=gtf.positive if px1_cov.ndim == 1 else gtf.LowerTriangular(self.latent_dim, squeeze=True))

        self.n_samples = n_samples
        self.seed = seed
        self.parallel_iterations = parallel_iterations
        self.jitter = jitter

        # Inference-specific attributes (see gpssm_models.py for appropriate choices):
        nans = tf.constant(np.zeros(
            (self.T, self.n_samples, self.latent_dim)) * np.nan,
                           dtype=gps.float_type)
        self.sample_fn = lambda **kwargs: (nans, None)
        self.sample_kwargs = {}
        self.KL_fn = lambda *fs: tf.constant(np.nan, dtype=gps.float_type)

        # GP Transitions:
        self.n_ind_pts = n_ind_pts if Z is None else (
            Z[0].shape[-2] if isinstance(Z, list) else Z.shape[-2])

        if isinstance(Z, np.ndarray) and Z.ndim == 2:
            self.Z = mf.SharedIndependentMof(gp.features.InducingPoints(Z))
        else:
            Z_list = [
                np.random.randn(self.n_ind_pts, self.latent_dim +
                                self.input_dim) for _ in range(self.latent_dim)
            ] if Z is None else [z for z in Z]
            self.Z = mf.SeparateIndependentMof(
                [gp.features.InducingPoints(z) for z in Z_list])

        if isinstance(kern, gp.kernels.Kernel):
            self.kern = mk.SharedIndependentMok(kern, self.latent_dim)
        else:
            kern_list = kern or [
                gp.kernels.Matern32(self.latent_dim + self.input_dim, ARD=True)
                for _ in range(self.latent_dim)
            ]
            self.kern = mk.SeparateIndependentMok(kern_list)

        self.mean_fn = mean_fn or mean_fns.Identity(self.latent_dim)
        self.Umu = Param(
            np.zeros((self.latent_dim, self.n_ind_pts))
            if Umu is None else Umu)  # (Lm^-1)(Umu - m(Z))
        LT_transform = gtf.LowerTriangular(self.n_ind_pts,
                                           num_matrices=self.latent_dim,
                                           squeeze=False)
        self.Ucov_chol = Param(np.tile(
            np.eye(self.n_ind_pts)[None, ...], [self.latent_dim, 1, 1])
                               if Ucov_chol is None else Ucov_chol,
                               transform=LT_transform)  # (Lm^-1)Lu
        self._Kzz = None
Esempio n. 20
0
 def __init__(self, dims):
     Parameterized.__init__(self)
     self.dims = dims
     for i, (dim_in, dim_out) in enumerate(zip(dims[:-1], dims[1:])):
         setattr(self, 'W_{}'.format(i), Param(xavier(dim_in, dim_out)))
         setattr(self, 'b_{}'.format(i), Param(np.zeros(dim_out)))
Esempio n. 21
0
def _create_params(input_dim, output_dim):
    def initializer():
        limit = np.sqrt(6. / (input_dim + output_dim))
        return np.random.uniform(-limit, +limit, (input_dim, output_dim))
    return Param(initializer(), dtype=float_type, prior=gpflow.priors.Gaussian(0, 1)), \
           Param(np.zeros(output_dim), dtype=float_type)