Exemple #1
0
    def test_pairwise_dists(self):
        """Tests `_compute_pairwise_dists` method.
    """
        max_seq_len = 5
        n_seq = 4
        seq_lens = [2, 4, 3, 2]
        n_dims = 3
        n_labels = 2

        x = np.random.randn(n_seq, max_seq_len, n_dims) / 3
        x = tf.constant(x)

        inputs = grid.InputsGrid(n_dims, left=-1., right=1., npoints=5)
        mu_ranks = 5
        projector = Identity(n_dims)
        cov = SE_multidim(n_labels, .7, .3, .1, projector)
        gp = TTGPstruct(cov, inputs, mu_ranks)

        ans_ = gp._compute_pairwise_dists(x)
        with self.test_session() as sess:
            gp.initialize(sess)
            ans = sess.run(ans_)

            x_np = sess.run(x)
            ans_np = []
            for i in range(x_np.shape[0]):
                ans_np.append(cdist(x_np[i], x_np[i])[None, :, :])
            ans_np = np.vstack(ans_np)**2
            self.assertAllClose(ans_np, ans)
Exemple #2
0
    def test_unary_complexity_penalty(self):
        """Tests `_unary_complexity_penalty` method.
    """
        n_dims = 3
        n_labels = 2
        inputs = grid.InputsGrid(n_dims, left=-1., right=1., npoints=5)
        mu_ranks = 5
        projector = Identity(n_dims)
        cov = SE_multidim(n_labels, .7, .3, .1, projector)
        gp = TTGPstruct(cov, inputs, mu_ranks)
        ans_ = gp._unary_complexity_penalty()
        with self.test_session() as sess:
            gp.initialize(sess)
            ans = sess.run(ans_)

            Kmm = sess.run(t3f.full(gp._K_mms()))
            mu = sess.run(t3f.full(gp.mus))
            sigma = sess.run(
                t3f.full(
                    t3f.tt_tt_matmul(gp.sigma_ls, t3f.transpose(gp.sigma_ls))))
            mu_prior = np.zeros_like(mu)
            ans_np = 0
            for i in range(mu.shape[0]):
                ans_np += KL(mu[i], sigma[i], mu_prior[i], Kmm[i])
            self.assertAllClose(-ans_np[0, 0], ans)
from gptt_embed.covariance import SE_multidim
from gptt_embed.projectors import FeatureTransformer, LinearProjector, Identity
from gptt_embed.gpc_runner import GPCRunner

with tf.Graph().as_default():
    data_dir = "data/"
    n_inputs = 10
    mu_ranks = 10
    D = 14
    d = 10
    projector = LinearProjector(D=D, d=d)
    #projector = Identity(D=D)
    C = 2

    cov = SE_multidim(C, 0.7, 0.2, 0.1, projector)

    lr = 5e-3
    decay = (10, 0.2)
    n_epoch = 20
    batch_size = 200
    data_type = 'numpy'
    log_dir = 'log'
    save_dir = None#'models/gpnn_100_100_4.ckpt'
    model_dir = None#save_dir
    load_model = False#True
    
    runner=GPCRunner(data_dir, n_inputs, mu_ranks, cov,
                lr=lr, decay=decay, n_epoch=n_epoch, batch_size=batch_size,
                data_type=data_type, log_dir=log_dir, save_dir=save_dir,
                model_dir=model_dir, load_model=load_model, batch_test=False)
Exemple #4
0
    def test_latent_vars_distribution(self):
        """Tests `_latent_vars_distribution` method.
    """
        max_seq_len = 5
        n_seq = 4
        seq_lens = [2, 4, 3, 2]
        n_dims = 3
        n_labels = 2

        x = np.random.randn(n_seq, max_seq_len, n_dims) / 3
        x = tf.constant(x)

        inputs = grid.InputsGrid(n_dims, left=-1., right=1., npoints=5)
        mu_ranks = 5
        projector = Identity(n_dims)
        cov = SE_multidim(n_labels, .7, .3, .1, projector)
        gp = TTGPstruct(cov, inputs, mu_ranks)

        m_un, S_un, m_bin, S_bin = gp._latent_vars_distribution(
            x, tf.constant(seq_lens))

        with self.test_session() as sess:
            gp.initialize(sess)
            m_un_tf, S_un_tf, m_bin_tf, S_bin_tf = sess.run(
                [m_un, S_un, m_bin, S_bin])

            x_np = sess.run(x)
            mus, Sigma_ls, mu_bin, Sigma_bin_l = sess.run([
                t3f.full(gp.mus),
                t3f.full(gp.sigma_ls), gp.bin_mu, gp.bin_sigma_l
            ])

            batch_size, max_len, d = x.get_shape().as_list()
            sequence_mask = tf.sequence_mask(seq_lens, maxlen=max_len)
            indices = tf.cast(tf.where(sequence_mask), tf.int32)
            x_flat = tf.gather_nd(x, indices)

            w = sess.run(
                t3f.full(gp.inputs.interpolate_on_batch(
                    gp.cov.project(x_flat))))
            K_nn = sess.run(gp._K_nns(x))
            K_mm = sess.run(t3f.full(gp._K_mms()))

    # Now let's compute the parameters with numpy

        mus = mus[:, :, 0]
        w = w[:, :, 0]
        m_un_np_flat = mus.dot(w.T)
        m_un_np = np.zeros([n_labels, n_seq, max_seq_len])
        S_un_np = K_nn
        for label in range(n_labels):
            prev_seq_len = 0
            Sigma = Sigma_ls[label].dot(Sigma_ls[label].T)
            for seq, seq_len in enumerate(seq_lens):
                # m_un
                m_un_np[label, seq, :seq_len] = m_un_np_flat[
                    label, prev_seq_len:prev_seq_len + seq_len]
                w_cur = w[prev_seq_len:prev_seq_len + seq_len, :]
                cur_cov = w_cur.dot(Sigma.dot(w_cur.T))
                cur_cov -= w_cur.dot(K_mm[label].dot(w_cur.T))
                S_un_np[label, seq, :seq_len, :seq_len] += cur_cov
                S_un_np[label, seq, seq_len:, :] = 0
                S_un_np[label, seq, :, seq_len:] = 0

                prev_seq_len += seq_len

        m_bin_np = mu_bin
        S_bin_np = Sigma_bin_l.dot(Sigma_bin_l.T)

        self.assertAllClose(S_un_tf, S_un_np)
        self.assertAllClose(m_un_tf, m_un_np)
        self.assertAllClose(m_bin_tf, m_bin_np)
        self.assertAllClose(S_bin_tf, S_bin_np)