Beispiel #1
0
    def benchmarkMatrixDeterminantOp(self):
        for shape in self.shapes:
            with ops.Graph().as_default(), session.Session(
                    config=benchmark.benchmark_config()) as sess, ops.device(
                        "/cpu:0"):
                matrix = self._GenerateMatrix(shape)
                d = linalg_ops.matrix_determinant(matrix)
                variables.global_variables_initializer().run()
                self.run_op_benchmark(
                    sess,
                    control_flow_ops.group(d, ),
                    min_iters=25,
                    name="matrix_determinant_cpu_{shape}".format(shape=shape))

            if test.is_gpu_available(True):
                with ops.Graph().as_default(), session.Session(
                        config=benchmark.benchmark_config(
                        )) as sess, ops.device("/gpu:0"):
                    matrix = self._GenerateMatrix(shape)
                    d = linalg_ops.matrix_determinant(matrix)
                    variables.global_variables_initializer().run()
                    self.run_op_benchmark(
                        sess,
                        control_flow_ops.group(d, ),
                        min_iters=25,
                        name="matrix_determinant_gpu_{shape}".format(
                            shape=shape))
  def benchmarkMatrixDeterminantOp(self):
    for shape in self.shapes:
      with ops.Graph().as_default(), session.Session(
          config=benchmark.benchmark_config()) as sess, ops.device("/cpu:0"):
        matrix = self._GenerateMatrix(shape)
        d = linalg_ops.matrix_determinant(matrix)
        variables.global_variables_initializer().run()
        self.run_op_benchmark(
            sess,
            control_flow_ops.group(
                d,),
            min_iters=25,
            name="matrix_determinant_cpu_{shape}".format(shape=shape))

      if test.is_gpu_available(True):
        with ops.Graph().as_default(), session.Session(
            config=benchmark.benchmark_config()) as sess, ops.device("/gpu:0"):
          matrix = self._GenerateMatrix(shape)
          d = linalg_ops.matrix_determinant(matrix)
          variables.global_variables_initializer().run()
          self.run_op_benchmark(
              sess,
              control_flow_ops.group(
                  d,),
              min_iters=25,
              name="matrix_determinant_gpu_{shape}".format(shape=shape))
Beispiel #3
0
 def testConcurrentExecutesWithoutError(self):
     with self.session(use_gpu=True) as sess:
         matrix1 = random_ops.random_normal([5, 5], seed=42)
         matrix2 = random_ops.random_normal([5, 5], seed=42)
         det1 = linalg_ops.matrix_determinant(matrix1)
         det2 = linalg_ops.matrix_determinant(matrix2)
         det1_val, det2_val = sess.run([det1, det2])
         self.assertEqual(det1_val, det2_val)
 def testConcurrentExecutesWithoutError(self):
   with self.session(use_gpu=True) as sess:
     matrix1 = random_ops.random_normal([5, 5], seed=42)
     matrix2 = random_ops.random_normal([5, 5], seed=42)
     det1 = linalg_ops.matrix_determinant(matrix1)
     det2 = linalg_ops.matrix_determinant(matrix2)
     det1_val, det2_val = sess.run([det1, det2])
     self.assertEqual(det1_val, det2_val)
 def _determinant(self):
   logging.warn(
       "Using (possibly slow) default implementation of determinant."
       "  Requires conversion to a dense matrix and O(N^3) operations.")
   if self._can_use_cholesky():
     return math_ops.exp(self.log_abs_determinant())
   return linalg_ops.matrix_determinant(self._matrix)
Beispiel #6
0
 def _determinant(self):
     logging.warn(
         "Using (possibly slow) default implementation of determinant."
         "  Requires conversion to a dense matrix and O(N^3) operations.")
     if self._can_use_cholesky():
         return math_ops.exp(self.log_abs_determinant())
     return linalg_ops.matrix_determinant(self.to_dense())
Beispiel #7
0
def _det_large_enough_mask(x, det_bounds):
  """Returns whether the input matches the given determinant limit.

  Args:
    x: A floating-point `Tensor` of shape `[B1, ..., Bn, M, M]`.
    det_bounds: A floating-point `Tensor` that must broadcast to shape
      `[B1, ..., Bn]`, giving the desired lower bound on the
      determinants in `x`.

  Returns:
    mask: A floating-point `Tensor` of shape [B1, ..., Bn].  Each
      scalar is 1 if the corresponding matrix had determinant above
      the corresponding bound, otherwise 0.
  """
  # For the curious: I wonder whether it is possible and desirable to
  # use a Cholesky decomposition-based algorithm for this, since the
  # only matrices whose determinant this code cares about will be PSD.
  # Didn't figure out how to code that in TensorFlow.
  #
  # Expert opinion is that it would be about twice as fast since
  # Cholesky is roughly half the cost of Gaussian Elimination with
  # Partial Pivoting. But this is less of an impact than the switch in
  # _psd_mask.
  return math_ops.cast(
      linalg_ops.matrix_determinant(x) > det_bounds, dtype=x.dtype)
Beispiel #8
0
  def testDeterminants(self):
    with self.test_session():
      for batch_shape in [(), (
          2,
          3,)]:
        for k in [1, 4]:
          operator, mat = self._build_operator_and_mat(batch_shape, k)
          expected_det = linalg_ops.matrix_determinant(mat).eval()

          self._compare_results(expected_det, operator.det())
          self._compare_results(np.log(expected_det), operator.log_det())
  def testDeterminants(self):
    with self.test_session():
      for batch_shape in [(), (
          2,
          3,)]:
        for k in [1, 4]:
          operator, mat = self._build_operator_and_mat(batch_shape, k)
          expected_det = linalg_ops.matrix_determinant(mat).eval()

          self._compare_results(expected_det, operator.det())
          self._compare_results(np.log(expected_det), operator.log_det())
 def test_det(self):
   with self.session(graph=ops.Graph()) as sess:
     sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
     operator, mat = self.operator_and_matrix(
         shapes_info, dtype, use_placeholder=use_placeholder)
     op_det = operator.determinant()
     if not use_placeholder:
       self.assertAllEqual(shapes_info.shape[:-2], op_det.get_shape())
     op_det_v, mat_det_v = sess.run(
         [op_det, linalg_ops.matrix_determinant(mat)])
     self.assertAC(op_det_v, mat_det_v)
Beispiel #11
0
 def test_det(self):
     with self.session(graph=ops.Graph()) as sess:
         sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
         operator, mat = self.operator_and_matrix(
             shapes_info, dtype, use_placeholder=use_placeholder)
         op_det = operator.determinant()
         if not use_placeholder:
             self.assertAllEqual(shapes_info.shape[:-2], op_det.shape)
         op_det_v, mat_det_v = sess.run(
             [op_det, linalg_ops.matrix_determinant(mat)])
         self.assertAC(op_det_v, mat_det_v)
Beispiel #12
0
 def testBatchGradientUnknownSize(self):
   with self.cached_session():
     batch_size = constant_op.constant(3)
     matrix_size = constant_op.constant(4)
     batch_identity = array_ops.tile(
         array_ops.expand_dims(
             array_ops.diag(array_ops.ones([matrix_size])), 0),
         [batch_size, 1, 1])
     determinants = linalg_ops.matrix_determinant(batch_identity)
     reduced = math_ops.reduce_sum(determinants)
     sum_grad = gradients_impl.gradients(reduced, batch_identity)[0]
     self.assertAllClose(batch_identity.eval(), self.evaluate(sum_grad))
 def testBatchGradientUnknownSize(self):
   with self.test_session():
     batch_size = constant_op.constant(3)
     matrix_size = constant_op.constant(4)
     batch_identity = array_ops.tile(
         array_ops.expand_dims(
             array_ops.diag(array_ops.ones([matrix_size])), 0),
         [batch_size, 1, 1])
     determinants = linalg_ops.matrix_determinant(batch_identity)
     reduced = math_ops.reduce_sum(determinants)
     sum_grad = gradients_impl.gradients(reduced, batch_identity)[0]
     self.assertAllClose(batch_identity.eval(), sum_grad.eval())
Beispiel #14
0
 def _determinant(self):
     if self.is_positive_definite:
         return math_ops.exp(self.log_abs_determinant())
     # The matrix determinant lemma gives
     # https://en.wikipedia.org/wiki/Matrix_determinant_lemma
     #   det(L + UDV^H) = det(D^{-1} + V^H L^{-1} U) det(D) det(L)
     #                  = det(C) det(D) det(L)
     # where C is sometimes known as the capacitance matrix,
     #   C := D^{-1} + V^H L^{-1} U
     det_c = linalg_ops.matrix_determinant(self._make_capacitance())
     det_d = self.diag_operator.determinant()
     det_l = self.base_operator.determinant()
     return det_c * det_d * det_l
Beispiel #15
0
    def benchmarkMatrixDeterminantOp(self):
        for size in self.sizes:
            data = self._GenerateData(size)

            with ops.Graph().as_default(), session.Session(
            ) as sess, ops.device("/cpu:0"):
                d = linalg_ops.matrix_determinant(data)
                self.run_op_benchmark(
                    sess,
                    control_flow_ops.group(d, ),
                    min_iters=25,
                    name="matrix_determinant_cpu_{size}".format(size=size))

            if test.is_gpu_available(True):
                with ops.Graph().as_default(), session.Session(
                ) as sess, ops.device("/gpu:0"):
                    d = linalg_ops.matrix_determinant(data)
                    self.run_op_benchmark(
                        sess,
                        control_flow_ops.group(d, ),
                        min_iters=25,
                        name="matrix_determinant_gpu_{size}".format(size=size))
 def _determinant(self):
   if self.is_positive_definite:
     return math_ops.exp(self.log_abs_determinant())
   # The matrix determinant lemma gives
   # https://en.wikipedia.org/wiki/Matrix_determinant_lemma
   #   det(L + UDV^H) = det(D^{-1} + V^H L^{-1} U) det(D) det(L)
   #                  = det(C) det(D) det(L)
   # where C is sometimes known as the capacitance matrix,
   #   C := D^{-1} + V^H L^{-1} U
   det_c = linalg_ops.matrix_determinant(self._capacitance)
   det_d = self.diag_operator.determinant()
   det_l = self.base_operator.determinant()
   return det_c * det_d * det_l
 def test_det(self):
   self._skip_if_tests_to_skip_contains("det")
   for use_placeholder in self._use_placeholder_options:
     for build_info in self._operator_build_infos:
       for dtype in self._dtypes_to_test:
         with self.session(graph=ops.Graph()) as sess:
           sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
           operator, mat = self._operator_and_matrix(
               build_info, dtype, use_placeholder=use_placeholder)
           op_det = operator.determinant()
           if not use_placeholder:
             self.assertAllEqual(build_info.shape[:-2], op_det.get_shape())
           op_det_v, mat_det_v = sess.run(
               [op_det, linalg_ops.matrix_determinant(mat)])
           self.assertAC(op_det_v, mat_det_v)
 def test_det(self):
   self._skip_if_tests_to_skip_contains("det")
   for use_placeholder in self._use_placeholder_options:
     for build_info in self._operator_build_infos:
       for dtype in self._dtypes_to_test:
         with self.session(graph=ops.Graph()) as sess:
           sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
           operator, mat = self._operator_and_matrix(
               build_info, dtype, use_placeholder=use_placeholder)
           op_det = operator.determinant()
           if not use_placeholder:
             self.assertAllEqual(build_info.shape[:-2], op_det.get_shape())
           op_det_v, mat_det_v = sess.run(
               [op_det, linalg_ops.matrix_determinant(mat)])
           self.assertAC(op_det_v, mat_det_v)
  def _log_abs_determinant(self):
    # Recall
    #   det(L + UDV^H) = det(D^{-1} + V^H L^{-1} U) det(D) det(L)
    #                  = det(C) det(D) det(L)
    log_abs_det_d = self.diag_operator.log_abs_determinant()
    log_abs_det_l = self.base_operator.log_abs_determinant()

    if self._use_cholesky:
      chol_cap_diag = array_ops.matrix_diag_part(self._chol_capacitance)
      log_abs_det_c = 2 * math_ops.reduce_sum(
          math_ops.log(chol_cap_diag), reduction_indices=[-1])
    else:
      det_c = linalg_ops.matrix_determinant(self._capacitance)
      log_abs_det_c = math_ops.log(math_ops.abs(det_c))

    return log_abs_det_c + log_abs_det_d + log_abs_det_l
 def test_det(self):
   self._skip_if_tests_to_skip_contains("det")
   for use_placeholder in False, True:
     for shape in self._shapes_to_test:
       for dtype in self._dtypes_to_test:
         with self.test_session(graph=ops.Graph()) as sess:
           sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
           operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
               shape, dtype, use_placeholder=use_placeholder)
           op_det = operator.determinant()
           if not use_placeholder:
             self.assertAllEqual(shape[:-2], op_det.get_shape())
           op_det_v, mat_det_v = sess.run(
               [op_det, linalg_ops.matrix_determinant(mat)],
               feed_dict=feed_dict)
           self.assertAC(op_det_v, mat_det_v)
    def _log_abs_determinant(self):
        # Recall
        #   det(L + UDV^H) = det(D^{-1} + V^H L^{-1} U) det(D) det(L)
        #                  = det(C) det(D) det(L)
        log_abs_det_d = self.diag_operator.log_abs_determinant()
        log_abs_det_l = self.base_operator.log_abs_determinant()

        if self._use_cholesky:
            chol_cap_diag = array_ops.matrix_diag_part(self._chol_capacitance)
            log_abs_det_c = 2 * math_ops.reduce_sum(
                math_ops.log(chol_cap_diag), reduction_indices=[-1])
        else:
            det_c = linalg_ops.matrix_determinant(self._capacitance)
            log_abs_det_c = math_ops.log(math_ops.abs(det_c))

        return log_abs_det_c + log_abs_det_d + log_abs_det_l
Beispiel #22
0
    def _log_abs_determinant(self):
        # Recall
        #   det(L + UDV^H) = det(D^{-1} + V^H L^{-1} U) det(D) det(L)
        #                  = det(C) det(D) det(L)
        log_abs_det_d = self.diag_operator.log_abs_determinant()
        log_abs_det_l = self.base_operator.log_abs_determinant()

        if self._use_cholesky:
            chol_cap_diag = array_ops.matrix_diag_part(
                linalg_ops.cholesky(self._make_capacitance()))
            log_abs_det_c = 2 * math_ops.reduce_sum(
                math_ops.log(chol_cap_diag), axis=[-1])
        else:
            det_c = linalg_ops.matrix_determinant(self._make_capacitance())
            log_abs_det_c = math_ops.log(math_ops.abs(det_c))
            if self.dtype.is_complex:
                log_abs_det_c = math_ops.cast(log_abs_det_c, dtype=self.dtype)

        return log_abs_det_c + log_abs_det_d + log_abs_det_l
 def test_det(self):
   self._maybe_skip("det")
   with self.test_session() as sess:
     for use_placeholder in False, True:
       for shape in self._shapes_to_test:
         for dtype in self._dtypes_to_test:
           if dtype.is_complex:
             self.skipTest(
                 "tf.matrix_determinant does not work with complex, so this "
                 "test is being skipped.")
           operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
               shape, dtype, use_placeholder=use_placeholder)
           op_det = operator.determinant()
           if not use_placeholder:
             self.assertAllEqual(shape[:-2], op_det.get_shape())
           op_det_v, mat_det_v = sess.run(
               [op_det, linalg_ops.matrix_determinant(mat)],
               feed_dict=feed_dict)
           self.assertAC(op_det_v, mat_det_v)
Beispiel #24
0
 def test_det(self):
     self._maybe_skip("det")
     with self.test_session() as sess:
         for use_placeholder in False, True:
             for shape in self._shapes_to_test:
                 for dtype in self._dtypes_to_test:
                     if dtype.is_complex:
                         self.skipTest(
                             "tf.matrix_determinant does not work with complex, so this "
                             "test is being skipped.")
                     operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
                         shape, dtype, use_placeholder=use_placeholder)
                     op_det = operator.determinant()
                     if not use_placeholder:
                         self.assertAllEqual(shape[:-2], op_det.get_shape())
                     op_det_v, mat_det_v = sess.run(
                         [op_det,
                          linalg_ops.matrix_determinant(mat)],
                         feed_dict=feed_dict)
                     self.assertAC(op_det_v, mat_det_v)
Beispiel #25
0
  def sqrt_log_abs_det(self):
    """Computes (log o abs o det)(X) for matrix X.

    Doesn't actually do the sqrt! Named as such to agree with API.

    To compute det(M + V D V.T), we use the matrix determinant lemma:
      det(Tril + V D V.T) = det(C) det(D) det(M)
    where C is defined as in `_inverse`, ie,
      C = inv(D) + V.T inv(M) V.

    See: https://en.wikipedia.org/wiki/Matrix_determinant_lemma

    Returns:
      log_abs_det: `Tensor`.
    """
    log_det_c = math_ops.log(math_ops.abs(
        linalg_ops.matrix_determinant(self._woodbury_sandwiched_term())))
    # Reduction is ok because we always prepad inputs to this class.
    log_det_m = math_ops.reduce_sum(math_ops.log(math_ops.abs(
        array_ops.matrix_diag_part(self._m))), axis=[-1])
    return log_det_c + 2. * self._d.sqrt_log_abs_det() + log_det_m
 def test_log_abs_det(self):
   self._skip_if_tests_to_skip_contains("log_abs_det")
   for use_placeholder in False, True:
     for shape in self._shapes_to_test:
       for dtype in self._dtypes_to_test:
         if dtype.is_complex:
           self.skipTest(
               "tf.matrix_determinant does not work with complex, so this "
               "test is being skipped.")
         with self.test_session(graph=ops.Graph()) as sess:
           sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
           operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
               shape, dtype, use_placeholder=use_placeholder)
           op_log_abs_det = operator.log_abs_determinant()
           mat_log_abs_det = math_ops.log(
               math_ops.abs(linalg_ops.matrix_determinant(mat)))
           if not use_placeholder:
             self.assertAllEqual(shape[:-2], op_log_abs_det.get_shape())
           op_log_abs_det_v, mat_log_abs_det_v = sess.run(
               [op_log_abs_det, mat_log_abs_det],
               feed_dict=feed_dict)
           self.assertAC(op_log_abs_det_v, mat_log_abs_det_v)
Beispiel #27
0
def log_noninformative_covariance_prior(covariance):
    """Compute a relatively uninformative prior for noise parameters.

  Helpful for avoiding noise over-estimation, where noise otherwise decreases
  very slowly during optimization.

  See:
    Villegas, C. On the A Priori Distribution of the Covariance Matrix.
    Ann. Math. Statist. 40 (1969), no. 3, 1098--1099.

  Args:
    covariance: A covariance matrix.
  Returns:
    For a [p x p] matrix:
      log(det(covariance)^(-(p + 1) / 2))
  """
    # Avoid zero/negative determinants due to numerical errors
    covariance += array_ops.diag(1e-8 * array_ops.ones(
        shape=[array_ops.shape(covariance)[0]], dtype=covariance.dtype))
    power = -(math_ops.cast(
        array_ops.shape(covariance)[0] + 1, covariance.dtype) / 2.)
    return power * math_ops.log(linalg_ops.matrix_determinant(covariance))
 def test_log_abs_det(self):
   self._skip_if_tests_to_skip_contains("log_abs_det")
   for use_placeholder in False, True:
     for shape in self._shapes_to_test:
       for dtype in self._dtypes_to_test:
         if dtype.is_complex:
           self.skipTest(
               "tf.matrix_determinant does not work with complex, so this "
               "test is being skipped.")
         with self.test_session(graph=ops.Graph()) as sess:
           sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
           operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
               shape, dtype, use_placeholder=use_placeholder)
           op_log_abs_det = operator.log_abs_determinant()
           mat_log_abs_det = math_ops.log(
               math_ops.abs(linalg_ops.matrix_determinant(mat)))
           if not use_placeholder:
             self.assertAllEqual(shape[:-2], op_log_abs_det.get_shape())
           op_log_abs_det_v, mat_log_abs_det_v = sess.run(
               [op_log_abs_det, mat_log_abs_det],
               feed_dict=feed_dict)
           self.assertAC(op_log_abs_det_v, mat_log_abs_det_v)
Beispiel #29
0
def log_noninformative_covariance_prior(covariance):
  """Compute a relatively uninformative prior for noise parameters.

  Helpful for avoiding noise over-estimation, where noise otherwise decreases
  very slowly during optimization.

  See:
    Villegas, C. On the A Priori Distribution of the Covariance Matrix.
    Ann. Math. Statist. 40 (1969), no. 3, 1098--1099.

  Args:
    covariance: A covariance matrix.
  Returns:
    For a [p x p] matrix:
      log(det(covariance)^(-(p + 1) / 2))
  """
  # Avoid zero/negative determinants due to numerical errors
  covariance += array_ops.diag(1e-8 * array_ops.ones(
      shape=[array_ops.shape(covariance)[0]], dtype=covariance.dtype))
  power = -(math_ops.cast(array_ops.shape(covariance)[0] + 1,
                          covariance.dtype) / 2.)
  return power * math_ops.log(linalg_ops.matrix_determinant(covariance))
    def sqrt_log_abs_det(self):
        """Computes (log o abs o det)(X) for matrix X.

    Doesn't actually do the sqrt! Named as such to agree with API.

    To compute det(M + V D V.T), we use the matrix determinant lemma:
      det(Tril + V D V.T) = det(C) det(D) det(M)
    where C is defined as in `_inverse`, ie,
      C = inv(D) + V.T inv(M) V.

    See: https://en.wikipedia.org/wiki/Matrix_determinant_lemma

    Returns:
      log_abs_det: `Tensor`.
    """
        log_det_c = math_ops.log(
            math_ops.abs(
                linalg_ops.matrix_determinant(
                    self._woodbury_sandwiched_term())))
        # Reduction is ok because we always prepad inputs to this class.
        log_det_m = math_ops.reduce_sum(math_ops.log(
            math_ops.abs(array_ops.matrix_diag_part(self._m))),
                                        axis=[-1])
        return log_det_c + 2. * self._d.sqrt_log_abs_det() + log_det_m
Beispiel #31
0
 def _determinant(self):
   if self._can_use_cholesky():
     return math_ops.exp(self.log_abs_determinant())
   return linalg_ops.matrix_determinant(self._matrix)
 def _compareDeterminant(self, matrix_x):
   with test_util.use_gpu():
     self._compareDeterminantBase(matrix_x,
                                  linalg_ops.matrix_determinant(matrix_x))
     self._compareLogDeterminantBase(
         matrix_x, gen_linalg_ops.log_matrix_determinant(matrix_x))
 def _forward_log_det_jacobian(self, x):
   return -linalg_ops.matrix_determinant(x)
Beispiel #34
0
 def _compareDeterminant(self, matrix_x):
     with self.cached_session(use_gpu=True):
         self._compareDeterminantBase(
             matrix_x, linalg_ops.matrix_determinant(matrix_x))
         self._compareLogDeterminantBase(
             matrix_x, gen_linalg_ops.log_matrix_determinant(matrix_x))
 def testNonSquareMatrix(self):
   # When the determinant of a non-square matrix is attempted we should return
   # an error
   with self.assertRaises(ValueError):
     linalg_ops.matrix_determinant(
         np.array([[1., 2., 3.], [3., 5., 4.]]).astype(np.float32))
 def _inverse_log_det_jacobian(self, x):
   return linalg_ops.matrix_determinant(x)
 def _compareDeterminant(self, matrix_x):
     with test_util.use_gpu():
         self._compareDeterminantBase(
             matrix_x, linalg_ops.matrix_determinant(matrix_x))
         self._compareLogDeterminantBase(
             matrix_x, gen_linalg_ops.log_matrix_determinant(matrix_x))
Beispiel #38
0
 def testNonSquareMatrix(self):
     # When the determinant of a non-square matrix is attempted we should return
     # an error
     with self.assertRaises(ValueError):
         linalg_ops.matrix_determinant(
             np.array([[1., 2., 3.], [3., 5., 4.]]).astype(np.float32))
 def _determinant(self):
   if self._is_spd:
     return math_ops.exp(self.log_abs_determinant())
   return linalg_ops.matrix_determinant(self._matrix)
 def testWrongDimensions(self):
   # The input to the determinant should be a 2-dimensional tensor.
   tensor1 = constant_op.constant([1., 2.])
   with self.assertRaises(ValueError):
     linalg_ops.matrix_determinant(tensor1)
 def _compareDeterminant(self, matrix_x):
   with self.cached_session(use_gpu=True):
     self._compareDeterminantBase(matrix_x,
                                  linalg_ops.matrix_determinant(matrix_x))
     self._compareLogDeterminantBase(
         matrix_x, gen_linalg_ops.log_matrix_determinant(matrix_x))
Beispiel #42
0
 def _compareDeterminant(self, matrix_x):
     with self.test_session():
         self._compareDeterminantBase(
             matrix_x, linalg_ops.matrix_determinant(matrix_x))
 def _compareDeterminant(self, matrix_x):
   with self.test_session():
     self._compareDeterminantBase(matrix_x,
                                  linalg_ops.matrix_determinant(matrix_x))
Beispiel #44
0
 def testWrongDimensions(self):
     # The input to the determinant should be a 2-dimensional tensor.
     tensor1 = constant_op.constant([1., 2.])
     with self.assertRaises(ValueError):
         linalg_ops.matrix_determinant(tensor1)
 def _determinant(self):
   if self._is_spd:
     return math_ops.exp(self.log_abs_determinant())
   return linalg_ops.matrix_determinant(self._matrix)