def test_orthogonal_matrix_covariance():
    msg = "Orthogonal matrix should have less covariance than a random matrix"
    orth = Variable(fake_data.orthogonal_matrix([20, 20]).astype('float32'))
    rand = Variable(np.random.randn(20, 20).astype('float32'))
    orth_cc = cross_covariance(orth, orth).data
    rand_cc = cross_covariance(rand, rand).data
    assert orth_cc < rand_cc, msg
 def check_type(self, y_data, z_data):
     y = chainer.Variable(y_data)
     z = chainer.Variable(z_data)
     loss = functions.cross_covariance(y, z)
     loss.backward()
     self.assertEqual(y_data.dtype, y.grad.dtype)
     self.assertEqual(z_data.dtype, z.grad.dtype)
    def check_forward(self, y_data, z_data):
        y = chainer.Variable(y_data)
        z = chainer.Variable(z_data)
        loss = functions.cross_covariance(y, z)
        self.assertEqual(loss.data.shape, ())
        self.assertEqual(loss.data.dtype, numpy.float32)
        loss_value = float(cuda.to_cpu(loss.data))

        # Compute expected value
        y_data, z_data = cuda.to_cpu(y_data), cuda.to_cpu(z_data)
        y_mean = y_data.mean(axis=0)
        z_mean = z_data.mean(axis=0)
        N = y_data.shape[0]

        loss_expect = 0
        for i in six.moves.xrange(y_data.shape[1]):
            for j in six.moves.xrange(z_data.shape[1]):
                ij_loss = 0.
                for n in six.moves.xrange(N):
                    ij_loss += (y_data[n, i] - y_mean[i]) * (
                        z_data[n, j] - z_mean[j])
                ij_loss /= N
                loss_expect += ij_loss ** 2
        loss_expect *= 0.5

        self.assertAlmostEqual(loss_expect, loss_value, places=5)
 def check_type(self, y_data, z_data):
     y = chainer.Variable(y_data)
     z = chainer.Variable(z_data)
     loss = functions.cross_covariance(y, z)
     loss.backward()
     self.assertEqual(y_data.dtype, y.grad.dtype)
     self.assertEqual(z_data.dtype, z.grad.dtype)
    def check_forward(self, y_data, z_data):
        y = chainer.Variable(y_data)
        z = chainer.Variable(z_data)
        loss = functions.cross_covariance(y, z)
        self.assertEqual(loss.data.shape, ())
        self.assertEqual(loss.data.dtype, numpy.float32)
        loss_value = float(loss.data)

        # Compute expected value
        y_data, z_data = cuda.to_cpu(y_data), cuda.to_cpu(z_data)
        y_mean = y_data.mean(axis=0)
        z_mean = z_data.mean(axis=0)
        N = y_data.shape[0]

        loss_expect = 0
        for i in six.moves.xrange(y_data.shape[1]):
            for j in six.moves.xrange(z_data.shape[1]):
                ij_loss = 0.
                for n in six.moves.xrange(N):
                    ij_loss += (y_data[n, i] - y_mean[i]) * (
                        z_data[n, j] - z_mean[j])
                ij_loss /= N
                loss_expect += ij_loss ** 2
        loss_expect *= 0.5

        self.assertAlmostEqual(loss_expect, loss_value, places=5)
示例#6
0
 def _priors(self):
     """ Measure likelihood of seeing topic proportions"""
     loss = None
     for cat_feat_name, vals in self.categorical_features.items():
         embedding, transform, loss_func, penalty = vals
         name = cat_feat_name + "_mixture"
         dl = dirichlet_likelihood(self[name].weights)
         if penalty:
             factors = self[name].factors.W
             cc = F.cross_covariance(factors, factors)
             dl += cc
         loss = dl if loss is None else dl + loss
     return loss
示例#7
0
 def _priors(self):
     """ Measure likelihood of seeing topic proportions"""
     loss = None
     for cat_feat_name, vals in self.categorical_features.items():
         embedding, transform, loss_func, penalty = vals
         name = cat_feat_name + "_mixture"
         dl = dirichlet_likelihood(self[name].weights)
         if penalty:
             factors = self[name].factors.W
             cc = F.cross_covariance(factors, factors)
             dl += cc
         loss = dl if loss is None else dl + loss
     return loss
    def check_backward(self, y_data, z_data, use_cudnn=True):
        y = chainer.Variable(y_data)
        z = chainer.Variable(z_data)
        loss = functions.cross_covariance(y, z, use_cudnn)
        loss.backward()

        func = loss.creator
        f = lambda: func.forward((y.data, z.data))
        gy, gz = gradient_check.numerical_grad(f, (y.data, z.data),
                                               (1, ), eps=0.02)

        gradient_check.assert_allclose(gy, y.grad)
        gradient_check.assert_allclose(gz, z.grad)
示例#9
0
    def check_forward(self, y_data, z_data):
        y = chainer.Variable(y_data)
        z = chainer.Variable(z_data)
        loss = functions.cross_covariance(y, z, self.reduce)

        self.assertEqual(loss.shape, self.gloss.shape)
        self.assertEqual(loss.data.dtype, numpy.float32)
        loss_value = cuda.to_cpu(loss.data)

        # Compute expected value
        loss_expect = _cross_covariance(y_data, z_data)
        if self.reduce == 'half_squared_sum':
            loss_expect = numpy.sum(loss_expect ** 2) * 0.5
        numpy.testing.assert_allclose(
            loss_expect, loss_value, rtol=1e-4, atol=1e-4)
示例#10
0
    def check_forward(self, y_data, z_data):
        y = chainer.Variable(y_data)
        z = chainer.Variable(z_data)
        loss = functions.cross_covariance(y, z, self.reduce)

        self.assertEqual(loss.shape, self.gloss.shape)
        self.assertEqual(loss.data.dtype, self.dtype)
        loss_value = cuda.to_cpu(loss.data)

        # Compute expected value
        loss_expect = _cross_covariance(y_data, z_data, dtype=self.dtype)
        if self.reduce == 'half_squared_sum':
            loss_expect = numpy.sum(loss_expect ** 2) * 0.5
        numpy.testing.assert_allclose(
            loss_expect, loss_value, **self.forward_options)
示例#11
0
文件: models.py 项目: kzky/works
    def __call__(self, x_recon, x, enc_hiddens, dec_hiddens, scale=True):
        """
        Parameters
        -----------------
        x_recon: Variable to be reconstructed as label
        x: Variable to be reconstructed as label
        enc_hiddens: list of Variable
        dec_hiddens: list of Varialbe
        """
        cc_loss = 0

        # Lateral Recon Loss
        if self.rc and enc_hiddens is not None:
            for h0, h1 in zip(enc_hiddens[::-1], dec_hiddens):
                n = h0.shape[0]
                d = np.prod(h0.shape[1:])
                l = F.cross_covariance(h0, h1) / n / d
                cc_loss += l

        self.loss = cc_loss
        return self.loss
示例#12
0
文件: models.py 项目: kzky/works
    def __call__(self, x_recon, x, enc_hiddens, dec_hiddens, scale=True):
        """
        Parameters
        -----------------
        x_recon: Variable to be reconstructed as label
        x: Variable to be reconstructed as label
        enc_hiddens: list of Variable
        dec_hiddens: list of Varialbe
        """
        cc_loss = 0
        
        # Lateral Recon Loss
        if self.rc and enc_hiddens is not None:
            for h0, h1 in zip(enc_hiddens[::-1], dec_hiddens):
                n = h0.shape[0]
                d = np.prod(h0.shape[1:])
                l = F.cross_covariance(h0, h1) / n / d
                cc_loss += l

        self.loss = cc_loss
        return self.loss
示例#13
0
 def f(y, z):
     return functions.cross_covariance(y, z, self.reduce)
示例#14
0
    def check_invalid_option(self, xp):
        y = xp.asarray(self.y)
        z = xp.asarray(self.z)

        with self.assertRaises(ValueError):
            functions.cross_covariance(y, z, 'invalid_option')
示例#15
0
 def f(y, z):
     return functions.cross_covariance(y, z, self.reduce)
示例#16
0
    def check_invalid_option(self, xp):
        y = xp.asarray(self.y)
        z = xp.asarray(self.z)

        with self.assertRaises(ValueError):
            functions.cross_covariance(y, z, 'invalid_option')