コード例 #1
0
ファイル: binary_ops.py プロジェクト: sharadmv/nvmp
 def _statistic(self, stat):
     if stat == Stats.X:
         return Stats.X(self.left) + Stats.X(self.right)
     elif stat == Stats.XXT:
         return (Stats.XXT(self.left) + Stats.XXT(self.right) +
                 T.outer(Stats.X(self.left), Stats.X(self.right)) +
                 T.outer(Stats.X(self.right), Stats.X(self.left)))
コード例 #2
0
ファイル: blds.py プロジェクト: yuchen8807/parasol
 def em(i, q_dyn_natparam, q_X_natparam, _, curr_elbo):
     q_X_ = stats.LDS(q_X_natparam, 'natural')
     ess = q_X_.expected_sufficient_statistics()
     batch_size = T.shape(ess)[0]
     yyT = ess[..., :-1, ds:2 * ds, ds:2 * ds]
     xxT = ess[..., :-1, :ds, :ds]
     yxT = ess[..., :-1, ds:2 * ds, :ds]
     x = ess[..., :-1, -1, :ds]
     y = ess[..., :-1, -1, ds:2 * ds]
     xaT = T.outer(x, a)
     yaT = T.outer(y, a)
     xaxaT = T.concatenate([
         T.concatenate([xxT, xaT], -1),
         T.concatenate([T.matrix_transpose(xaT), aaT], -1),
     ], -2)
     ess = [
         yyT,
         T.concatenate([yxT, yaT], -1), xaxaT,
         T.ones([batch_size, self.horizon - 1])
     ]
     q_dyn_natparam = [
         T.sum(a, [0]) * data_strength + b
         for a, b in zip(ess, initial_dyn_natparam)
     ]
     q_dyn_ = stats.MNIW(q_dyn_natparam, 'natural')
     q_stats = q_dyn_.expected_sufficient_statistics()
     p_X = stats.LDS((q_stats, state_prior, None,
                      q_A.expected_value(), self.horizon))
     q_X_ = stats.LDS((q_stats, state_prior, q_X,
                       q_A.expected_value(), self.horizon))
     elbo = (T.sum(stats.kl_divergence(q_X_, p_X)) +
             T.sum(stats.kl_divergence(q_dyn_, prior_dyn)))
     return i + 1, q_dyn_.get_parameters(
         'natural'), q_X_.get_parameters('natural'), curr_elbo, elbo
コード例 #3
0
ファイル: lds.py プロジェクト: yuchen8807/parasol
    def get_statistics(self, q_Xt, q_At, q_Xt1):
        Xt1_Xt1T, Xt1 = stats.Gaussian.unpack(q_Xt1.expected_sufficient_statistics())

        Xt_XtT, Xt = stats.Gaussian.unpack(q_Xt.expected_sufficient_statistics())
        At_AtT, At = stats.Gaussian.unpack(q_At.expected_sufficient_statistics())

        XtAt = T.concatenate([Xt, At], -1)
        XtAt_XtAtT = T.concatenate([
            T.concatenate([Xt_XtT, T.outer(Xt, At)], -1),
            T.concatenate([T.outer(At, Xt), At_AtT], -1),
        ], -2)
        return (XtAt_XtAtT, XtAt), (Xt1_Xt1T, Xt1)
コード例 #4
0
ファイル: stats.py プロジェクト: sharadmv/nvmp
def get_stat(x, name, feed_dict={}):
    node = get_current_graph().get_node(x)
    print(x, name)
    if node is not None:
        return node.get_stat(name, feed_dict=feed_dict)
    if name == 'x':
        return x
    elif name == 'xxT':
        return T.outer(x, x)
    elif name == '-0.5S^-1':
        return -0.5 * T.matrix_inverse(x)
    elif name == '-0.5log|S|':
        return -0.5 * T.logdet(x)
    raise Exception()
コード例 #5
0
ファイル: blds.py プロジェクト: yuchen8807/parasol
 def kl_gradients(self, q_X, q_A, _, num_data):
     if self.smooth:
         ds = self.ds
         ess = q_X.expected_sufficient_statistics()
         yyT = ess[..., :-1, ds:2 * ds, ds:2 * ds]
         xxT = ess[..., :-1, :ds, :ds]
         yxT = ess[..., :-1, ds:2 * ds, :ds]
         aaT, a = stats.Gaussian.unpack(
             q_A.expected_sufficient_statistics())
         aaT, a = aaT[:, :-1], a[:, :-1]
         x = ess[..., :-1, -1, :ds]
         y = ess[..., :-1, -1, ds:2 * ds]
         xaT = T.outer(x, a)
         yaT = T.outer(y, a)
         xaxaT = T.concatenate([
             T.concatenate([xxT, xaT], -1),
             T.concatenate([T.matrix_transpose(xaT), aaT], -1),
         ], -2)
         batch_size = T.shape(ess)[0]
         num_batches = T.to_float(num_data) / T.to_float(batch_size)
         ess = [
             yyT,
             T.concatenate([yxT, yaT], -1), xaxaT,
             T.ones([batch_size, self.horizon - 1])
         ]
     else:
         q_Xt = q_X.__class__([
             q_X.get_parameters('regular')[0][:, :-1],
             q_X.get_parameters('regular')[1][:, :-1],
         ])
         q_At = q_A.__class__([
             q_A.get_parameters('regular')[0][:, :-1],
             q_A.get_parameters('regular')[1][:, :-1],
         ])
         q_Xt1 = q_X.__class__([
             q_X.get_parameters('regular')[0][:, 1:],
             q_X.get_parameters('regular')[1][:, 1:],
         ])
         (XtAt_XtAtT, XtAt), (Xt1_Xt1T,
                              Xt1) = self.get_statistics(q_Xt, q_At, q_Xt1)
         batch_size = T.shape(XtAt)[0]
         num_batches = T.to_float(num_data) / T.to_float(batch_size)
         ess = [
             Xt1_Xt1T,
             T.einsum('nha,nhb->nhba', XtAt, Xt1), XtAt_XtAtT,
             T.ones([batch_size, self.horizon - 1])
         ]
     if self.time_varying:
         ess = [
             T.sum(ess[0], [0]),
             T.sum(ess[1], [0]),
             T.sum(ess[2], [0]),
             T.sum(ess[3], [0]),
         ]
     else:
         ess = [
             T.sum(ess[0], [0, 1]),
             T.sum(ess[1], [0, 1]),
             T.sum(ess[2], [0, 1]),
             T.sum(ess[3], [0, 1]),
         ]
     return [
         -(a + num_batches * b - c) / T.to_float(num_data)
         for a, b, c in zip(
             self.A_prior.get_parameters('natural'),
             ess,
             self.A_variational.get_parameters('natural'),
         )
     ]
コード例 #6
0
ファイル: gmm-svi.py プロジェクト: sharadmv/vi-demos
        map(lambda x: np.array(x).astype(T.floatx()), [
            np.tile(np.eye(D)[None] * 100, [K, 1, 1]),
            np.random.multivariate_normal(
                mean=np.zeros([D]), cov=np.eye(D) * 20, size=[K]),
            np.ones(K),
            np.ones(K) * (D + 1)
        ])))

sigma, mu = Gaussian(q_theta.expected_sufficient_statistics(),
                     parameter_type='natural').get_parameters('regular')
alpha = Categorical(q_pi.expected_sufficient_statistics(),
                    parameter_type='natural').get_parameters('regular')

pi_cmessage = q_pi.expected_sufficient_statistics()
x_tmessage = NIW.pack([
    T.outer(X, X),
    X,
    T.ones([batch_size]),
    T.ones([batch_size]),
])
x_stats = Gaussian.pack([
    T.outer(X, X),
    X,
])
theta_cmessage = q_theta.expected_sufficient_statistics()

num_batches = N / T.to_float(batch_size)
nat_scale = 10.0

parent_z = q_pi.expected_sufficient_statistics()[None]
new_z = T.einsum('iab,jab->ij', x_tmessage, theta_cmessage) + parent_z
コード例 #7
0
 def activate(self, X):
     shape = T.shape(X)
     return stats.NIW.pack(
         [T.outer(X, X), X,
          T.ones(shape[:-1]),
          T.ones(shape[:-1])])
コード例 #8
0
 def compute(self, x):
     return T.outer(x, x)