def Test(self):
        with self.test_session():
            np.random.seed(1)
            m = np.random.uniform(
                low=1.0, high=100.0,
                size=np.prod(shape_)).reshape(shape_).astype(dtype_)
            a = tf.constant(m)
            epsilon = np.finfo(dtype_).eps
            # Optimal stepsize for central difference is O(epsilon^{1/3}).
            delta = epsilon**(1.0 / 3.0)

            # tolerance obtained by looking at actual differences using
            # np.linalg.norm(theoretical-numerical, np.inf) on -mavx build

            tol = 1e-3

            if len(shape_) == 2:
                c = tf.matrix_determinant(a)
            else:
                c = tf.batch_matrix_determinant(a)

            out_shape = shape_[:-2]  # last two dimensions hold matrices
            theoretical, numerical = tf.test.compute_gradient(a,
                                                              shape_,
                                                              c,
                                                              out_shape,
                                                              delta=delta)

            self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
  def Test(self):
    with self.test_session():
      np.random.seed(1)
      m = np.random.uniform(low=1.0,
                            high=100.0,
                            size=np.prod(shape_)).reshape(shape_).astype(dtype_)
      a = tf.constant(m)
      epsilon = np.finfo(dtype_).eps
      # Optimal stepsize for central difference is O(epsilon^{1/3}).
      delta = epsilon**(1.0 / 3.0)

      # tolerance obtained by looking at actual differences using
      # np.linalg.norm(theoretical-numerical, np.inf) on -mavx build

      tol = 1e-3

      if len(shape_) == 2:
        c = tf.matrix_determinant(a)
      else:
        c = tf.batch_matrix_determinant(a)

      out_shape = shape_[:-2]  # last two dimensions hold matrices
      theoretical, numerical = tf.test.compute_gradient(a,
                                                        shape_,
                                                        c,
                                                        out_shape,
                                                        delta=delta)

      self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
 def _compareDeterminant(self, matrix_x):
   with self.test_session():
     # Check the batch version, which should work for ndim >= 2
     self._compareDeterminantBase(
         matrix_x, tf.batch_matrix_determinant(matrix_x))
     if matrix_x.ndim == 2:
       # Check the simple version
       self._compareDeterminantBase(matrix_x, tf.matrix_determinant(matrix_x))
Exemple #4
0
 def get_function(points, mu, sigma): # f_ik [n,k]
     div = coef*tf.rsqrt(tf.batch_matrix_determinant(sigma)) # ((2pi)^p*|S_k|)^-1/2  [k]
     div = tf.tile(tf.reshape(div, [1,k]), [n,1]) # [n,k]
     diff = tf.sub(tf.tile(points, [k,1,1]), tf.tile(mu, [n,1,1])) # x_i-u_k [n*k, p, 1]
     sigma = tf.tile(sigma, [n,1,1]) # [n*k,p,p]
     exp = tf.exp(-0.5*tf.batch_matmul( tf.transpose(diff,perm=[0,2,1]), tf.batch_matmul(tf.batch_matrix_inverse(sigma), diff) )) # e^(d'*S^-1*d)_ik [n*k, 1, 1]
     exp = tf.reshape(exp, [n,k])
     return tf.mul(div, exp)
    def get_function(points, mu, sigma): # f_ik [n,k]
        div = coef*tf.rsqrt(tf.batch_matrix_determinant(sigma)) # ((2pi)^p*|S_k|)^-1/2  [k]
        div = tf.tile(tf.reshape(div, [1,k]), [n,1]) # [n,k]
        diff = tf.sub(tf.tile(points, [k,1,1]), tf.tile(mu, [n,1,1])) # x_i-u_k [n*k, p, 1]
        sigma = tf.tile(sigma, [n,1,1]) # [n*k,p,p]
        exp = tf.exp(-0.5*tf.batch_matmul( tf.transpose(diff,perm=[0,2,1]), tf.batch_matmul(tf.batch_matrix_inverse(sigma), diff) )) # e^(d'*S^-1*d)_ik [n*k, 1, 1]
        exp = tf.reshape(exp, [n,k])

        return tf.mul(div, exp) # Multivariate normal distribution evaluated for each vector, for each cluster parameter. Hence the [n,k] shape.
Exemple #6
0
 def _compareDeterminant(self, matrix_x):
     with self.test_session():
         # Check the batch version, which should work for ndim >= 2
         self._compareDeterminantBase(matrix_x,
                                      tf.batch_matrix_determinant(matrix_x))
         if matrix_x.ndim == 2:
             # Check the simple version
             self._compareDeterminantBase(matrix_x,
                                          tf.matrix_determinant(matrix_x))
  def test_determinants(self):
    with self.test_session():
      for batch_shape in [(), (2, 3,)]:
        for k in [1, 4]:
          operator, mat = self._build_operator_and_mat(batch_shape, k)
          expected_det = tf.batch_matrix_determinant(mat).eval()

          self._compare_results(expected_det, operator.det())
          self._compare_results(np.log(expected_det), operator.log_det())
 def get_function(points, mu, sigma):  # f_ik [n,k]
     div = coef * tf.rsqrt(tf.batch_matrix_determinant(sigma))  # ((2pi)^p*|S_k|)^-1/2  [k]
     div = tf.tile(tf.reshape(div, [1, k]), [n, 1])  # [n,k]
     diff = tf.sub(tf.tile(points, [k, 1, 1]), tf.tile(mu, [n, 1, 1]))  # x_i-u_k [n*k, p, 1]
     sigma = tf.tile(sigma, [n, 1, 1])  # [n*k,p,p]
     exp = tf.exp(
         -0.5
         * tf.batch_matmul(tf.transpose(diff, perm=[0, 2, 1]), tf.batch_matmul(tf.batch_matrix_inverse(sigma), diff))
     )  # e^(d'*S^-1*d)_ik [n*k, 1, 1]
     exp = tf.reshape(exp, [n, k])
     return tf.mul(div, exp)
 def testBatchGradientUnknownSize(self):
     with self.test_session():
         batch_size = tf.constant(3)
         matrix_size = tf.constant(4)
         batch_identity = tf.tile(
             tf.expand_dims(tf.diag(tf.ones([matrix_size])), 0),
             [batch_size, 1, 1])
         determinants = tf.batch_matrix_determinant(batch_identity)
         reduced = tf.reduce_sum(determinants)
         sum_grad = tf.gradients(reduced, batch_identity)[0]
         self.assertAllClose(batch_identity.eval(), sum_grad.eval())
 def testBatchGradientUnknownSize(self):
   with self.test_session():
     batch_size = tf.constant(3)
     matrix_size = tf.constant(4)
     batch_identity = tf.tile(
         tf.expand_dims(
             tf.diag(tf.ones([matrix_size])), 0), [batch_size, 1, 1])
     determinants = tf.batch_matrix_determinant(batch_identity)
     reduced = tf.reduce_sum(determinants)
     sum_grad = tf.gradients(reduced, batch_identity)[0]
     self.assertAllClose(batch_identity.eval(), sum_grad.eval())
 def _compareDeterminant(self, matrix_x):
     with self.test_session():
         if matrix_x.ndim == 2:
             tf_ans = tf.matrix_determinant(matrix_x)
         else:
             tf_ans = tf.batch_matrix_determinant(matrix_x)
         out = tf_ans.eval()
     shape = matrix_x.shape
     if shape[-1] == 0 and shape[-2] == 0:
         np_ans = np.ones(shape[:-2]).astype(matrix_x.dtype)
     else:
         np_ans = np.array(np.linalg.det(matrix_x)).astype(matrix_x.dtype)
     self.assertAllClose(np_ans, out)
     self.assertShapeEqual(np_ans, tf_ans)
 def _compareDeterminant(self, matrix_x):
   with self.test_session():
     if matrix_x.ndim == 2:
       tf_ans = tf.matrix_determinant(matrix_x)
     else:
       tf_ans = tf.batch_matrix_determinant(matrix_x)
     out = tf_ans.eval()
   shape = matrix_x.shape
   if shape[-1] == 0 and shape[-2] == 0:
     np_ans = np.ones(shape[:-2]).astype(matrix_x.dtype)
   else:
     np_ans = np.array(np.linalg.det(matrix_x)).astype(matrix_x.dtype)
   self.assertAllClose(np_ans, out)
   self.assertShapeEqual(np_ans, tf_ans)
    def test_determinants(self):
        with self.test_session():
            for batch_shape in [(), (
                    2,
                    3,
            )]:
                for k in [1, 4]:
                    operator, mat = self._build_operator_and_mat(
                        batch_shape, k)
                    expected_det = tf.batch_matrix_determinant(mat).eval()

                    self._compare_results(expected_det, operator.det())
                    self._compare_results(np.log(expected_det),
                                          operator.log_det())
    def get_function(points, mu, sigma):  # f_ik [n,k]
        div = coef * tf.rsqrt(
            tf.batch_matrix_determinant(sigma))  # ((2pi)^p*|S_k|)^-1/2  [k]
        div = tf.tile(tf.reshape(div, [1, k]), [n, 1])  # [n,k]
        diff = tf.sub(tf.tile(points, [k, 1, 1]),
                      tf.tile(mu, [n, 1, 1]))  # x_i-u_k [n*k, p, 1]
        sigma = tf.tile(sigma, [n, 1, 1])  # [n*k,p,p]
        exp = tf.exp(-0.5 * tf.batch_matmul(
            tf.transpose(diff, perm=[0, 2, 1]),
            tf.batch_matmul(tf.batch_matrix_inverse(sigma),
                            diff)))  # e^(d'*S^-1*d)_ik [n*k, 1, 1]
        exp = tf.reshape(exp, [n, k])

        return tf.mul(
            div, exp
        )  # Multivariate normal distribution evaluated for each vector, for each cluster parameter. Hence the [n,k] shape.
Exemple #15
0
x = np.random.rand(2, 3)
y = np.random.rand(3, 2)
z_matmul = tf.matmul(x, y)

# tf.batch_matmul
x = np.random.rand(10, 2, 3)
y = np.random.rand(10, 3, 2)
z_batch_matmul = tf.batch_matmul(x, y)

# tf.matrix_determinant
x = np.random.rand(5, 5)
z_matrix_determinant = tf.matrix_determinant(x)

# tf.batch_matrix_determinant
batch_x = np.random.rand(10, 5, 5)
z_batch_matrix_determinant = tf.batch_matrix_determinant(batch_x)

# tf.matrix_inverse
x = np.random.rand(10, 10)
z_matrix_inverse = tf.matrix_inverse(x)

# tf.batch_matrix_inverse
batch_x = np.random.rand(10, 5, 5)
z_batch_matrix_inverse = tf.batch_matrix_inverse(batch_x)

# tf.cholesky
x = np.random.rand(10, 10)
z_cholesky = tf.cholesky(x)

# tf.batch_cholesky
batch_x = np.random.rand(10, 5, 5)
Exemple #16
0
 def test_BatchMatrixDeterminant(self):
     t = tf.batch_matrix_determinant(self.random(2, 3, 4, 3, 3))
     self.check(t)