Exemple #1
0
    def testPivoting(self):
        with test_util.use_gpu():
            # This matrix triggers partial pivoting because the first diagonal entry
            # is small.
            data = np.array([[1e-9, 1., 0.], [1., 0., 0], [0., 1., 5]])
            self._verifyLu(data.astype(np.float32))

            for dtype in (np.float32, np.float64):
                self._verifyLu(data.astype(dtype))
                _, p = linalg_ops.lu(data)
                p_val = self.evaluate([p])
                # Make sure p_val is not the identity permutation.
                self.assertNotAllClose(np.arange(3), p_val)

            # rocBLAS on ROCm stack doesn't support complex64 and complex128 types
            if not test.is_built_with_rocm():
                for dtype in (np.complex64, np.complex128):
                    complex_data = np.tril(1j * data, -1).astype(dtype)
                    complex_data += np.triu(-1j * data, 1).astype(dtype)
                    complex_data += data
                    self._verifyLu(complex_data)
                    _, p = linalg_ops.lu(data)
                    p_val = self.evaluate([p])
                    # Make sure p_val is not the identity permutation.
                    self.assertNotAllClose(np.arange(3), p_val)
Exemple #2
0
    def benchmarkLuOp(self):
        for shape in self.shapes:
            with ops.Graph().as_default(), \
                session.Session(config=benchmark.benchmark_config()) as sess, \
                ops.device("/cpu:0"):
                matrix = variables.Variable(self._GenerateMatrix(shape))
                lu, p = linalg_ops.lu(matrix)
                variables.global_variables_initializer().run()
                self.run_op_benchmark(
                    sess,
                    control_flow_ops.group(lu, p),
                    min_iters=25,
                    name="lu_cpu_{shape}".format(shape=shape))

            if test.is_gpu_available(True):
                with ops.Graph().as_default(), \
                    session.Session(config=benchmark.benchmark_config()) as sess, \
                    ops.device("/device:GPU:0"):
                    matrix = variables.Variable(self._GenerateMatrix(shape))
                    lu, p = linalg_ops.lu(matrix)
                    variables.global_variables_initializer().run()
                    self.run_op_benchmark(
                        sess,
                        control_flow_ops.group(lu, p),
                        min_iters=25,
                        name="lu_gpu_{shape}".format(shape=shape))
  def benchmarkLuOp(self):
    for shape in self.shapes:
      with ops.Graph().as_default(), \
          session.Session(config=benchmark.benchmark_config()) as sess, \
          ops.device("/cpu:0"):
        matrix = variables.Variable(self._GenerateMatrix(shape))
        lu, p = linalg_ops.lu(matrix)
        variables.global_variables_initializer().run()
        self.run_op_benchmark(
            sess,
            control_flow_ops.group(lu, p),
            min_iters=25,
            name="lu_cpu_{shape}".format(shape=shape))

      if test.is_gpu_available(True):
        with ops.Graph().as_default(), \
            session.Session(config=benchmark.benchmark_config()) as sess, \
            ops.device("/device:GPU:0"):
          matrix = variables.Variable(self._GenerateMatrix(shape))
          lu, p = linalg_ops.lu(matrix)
          variables.global_variables_initializer().run()
          self.run_op_benchmark(
              sess,
              control_flow_ops.group(lu, p),
              min_iters=25,
              name="lu_gpu_{shape}".format(shape=shape))
Exemple #4
0
 def testConcurrentExecutesWithoutError(self):
     matrix1 = random_ops.random_normal([5, 5], seed=42)
     matrix2 = random_ops.random_normal([5, 5], seed=42)
     lu1, p1 = linalg_ops.lu(matrix1)
     lu2, p2 = linalg_ops.lu(matrix2)
     lu1_val, p1_val, lu2_val, p2_val = self.evaluate([lu1, p1, lu2, p2])
     self.assertAllEqual(lu1_val, lu2_val)
     self.assertAllEqual(p1_val, p2_val)
 def testConcurrentExecutesWithoutError(self):
   matrix1 = random_ops.random_normal([5, 5], seed=42)
   matrix2 = random_ops.random_normal([5, 5], seed=42)
   lu1, p1 = linalg_ops.lu(matrix1)
   lu2, p2 = linalg_ops.lu(matrix2)
   lu1_val, p1_val, lu2_val, p2_val = self.evaluate([lu1, p1, lu2, p2])
   self.assertAllEqual(lu1_val, lu2_val)
   self.assertAllEqual(p1_val, p2_val)
Exemple #6
0
 def testConcurrentExecutesWithoutError(self):
     with self.session(use_gpu=True) as sess:
         matrix1 = random_ops.random_normal([5, 5], seed=42)
         matrix2 = random_ops.random_normal([5, 5], seed=42)
         lu1, p1 = linalg_ops.lu(matrix1)
         lu2, p2 = linalg_ops.lu(matrix2)
         lu1_val, p1_val, lu2_val, p2_val = sess.run([lu1, p1, lu2, p2])
         self.assertAllEqual(lu1_val, lu2_val)
         self.assertAllEqual(p1_val, p2_val)
Exemple #7
0
 def testConcurrentExecutesWithoutError(self):
   matrix_shape = [5, 5]
   seed = [42, 24]
   matrix1 = stateless_random_ops.stateless_random_normal(
       shape=matrix_shape, seed=seed)
   matrix2 = stateless_random_ops.stateless_random_normal(
       shape=matrix_shape, seed=seed)
   self.assertAllEqual(matrix1, matrix2)
   lu1, p1 = linalg_ops.lu(matrix1)
   lu2, p2 = linalg_ops.lu(matrix2)
   lu1_val, p1_val, lu2_val, p2_val = self.evaluate([lu1, p1, lu2, p2])
   self.assertAllEqual(lu1_val, lu2_val)
   self.assertAllEqual(p1_val, p2_val)
Exemple #8
0
 def testInvalidMatrix(self):
     # LU factorization gives an error when the input is singular.
     # Note: A singular matrix may return without error but it won't be a valid
     # factorization.
     for dtype in self.float_types:
         with self.assertRaises(errors.InvalidArgumentError):
             self.evaluate(
                 linalg_ops.lu(
                     np.array([[1., 2., 3.], [2., 4., 6.], [2., 3., 4.]],
                              dtype=dtype)))
         with self.assertRaises(errors.InvalidArgumentError):
             self.evaluate(
                 linalg_ops.lu(
                     np.array([[[1., 2., 3.], [2., 4., 6.], [1., 2., 3.]],
                               [[1., 2., 3.], [3., 4., 5.], [5., 6., 7.]]],
                              dtype=dtype)))
 def testInvalidMatrix(self):
   # LU factorization gives an error when the input is singular.
   # Note: A singular matrix may return without error but it won't be a valid
   # factorization.
   for dtype in self.float_types:
     with self.assertRaises(errors.InvalidArgumentError):
       self.evaluate(
           linalg_ops.lu(
               np.array([[1., 2., 3.], [2., 4., 6.], [2., 3., 4.]],
                        dtype=dtype)))
     with self.assertRaises(errors.InvalidArgumentError):
       self.evaluate(
           linalg_ops.lu(
               np.array([[[1., 2., 3.], [2., 4., 6.], [1., 2., 3.]],
                         [[1., 2., 3.], [3., 4., 5.], [5., 6., 7.]]],
                        dtype=dtype)))
Exemple #10
0
    def _verifyLu(self, x, output_idx_type=dtypes.int64):
        # Verify that Px = LU.
        lu, perm = linalg_ops.lu(x, output_idx_type=output_idx_type)

        # Prepare the lower factor of shape num_rows x num_rows
        lu_shape = np.array(lu.shape.as_list())
        batch_shape = lu_shape[:-2]
        num_rows = lu_shape[-2]
        num_cols = lu_shape[-1]

        lower = array_ops.matrix_band_part(lu, -1, 0)

        if num_rows > num_cols:
            eye = linalg_ops.eye(num_rows,
                                 batch_shape=batch_shape,
                                 dtype=lower.dtype)
            lower = array_ops.concat([lower, eye[..., num_cols:]], axis=-1)
        elif num_rows < num_cols:
            lower = lower[..., :num_rows]

        # Fill the diagonal with ones.
        ones_diag = array_ops.ones(np.append(batch_shape, num_rows),
                                   dtype=lower.dtype)
        lower = array_ops.matrix_set_diag(lower, ones_diag)

        # Prepare the upper factor.
        upper = array_ops.matrix_band_part(lu, 0, -1)

        verification = test_util.matmul_without_tf32(lower, upper)

        # Permute the rows of product of the Cholesky factors.
        if num_rows > 0:
            # Reshape the product of the triangular factors and permutation indices
            # to a single batch dimension. This makes it easy to apply
            # invert_permutation and gather_nd ops.
            perm_reshaped = array_ops.reshape(perm, [-1, num_rows])
            verification_reshaped = array_ops.reshape(verification,
                                                      [-1, num_rows, num_cols])
            # Invert the permutation in each batch.
            inv_perm_reshaped = map_fn.map_fn(array_ops.invert_permutation,
                                              perm_reshaped)
            batch_size = perm_reshaped.shape.as_list()[0]
            # Prepare the batch indices with the same shape as the permutation.
            # The corresponding batch index is paired with each of the `num_rows`
            # permutation indices.
            batch_indices = math_ops.cast(array_ops.broadcast_to(
                math_ops.range(batch_size)[:, None], perm_reshaped.shape),
                                          dtype=output_idx_type)
            if inv_perm_reshaped.shape == [0]:
                inv_perm_reshaped = array_ops.zeros_like(batch_indices)
            permuted_verification_reshaped = array_ops.gather_nd(
                verification_reshaped,
                array_ops.stack([batch_indices, inv_perm_reshaped], axis=-1))

            # Reshape the verification matrix back to the original shape.
            verification = array_ops.reshape(permuted_verification_reshaped,
                                             lu_shape)

        self._verifyLuBase(x, lower, upper, perm, verification,
                           output_idx_type)
Exemple #11
0
  def _verifyLu(self, x, output_idx_type=dtypes.int64):
    # Verify that Px = LU.
    lu, perm = linalg_ops.lu(x, output_idx_type=output_idx_type)

    # Prepare the lower factor of shape num_rows x num_rows
    lu_shape = np.array(lu.shape.as_list())
    batch_shape = lu_shape[:-2]
    num_rows = lu_shape[-2]
    num_cols = lu_shape[-1]

    lower = array_ops.matrix_band_part(lu, -1, 0)

    if num_rows > num_cols:
      eye = linalg_ops.eye(
          num_rows, batch_shape=batch_shape, dtype=lower.dtype)
      lower = array_ops.concat([lower, eye[..., num_cols:]], axis=-1)
    elif num_rows < num_cols:
      lower = lower[..., :num_rows]

    # Fill the diagonal with ones.
    ones_diag = array_ops.ones(
        np.append(batch_shape, num_rows), dtype=lower.dtype)
    lower = array_ops.matrix_set_diag(lower, ones_diag)

    # Prepare the upper factor.
    upper = array_ops.matrix_band_part(lu, 0, -1)

    verification = math_ops.matmul(lower, upper)

    # Permute the rows of product of the Cholesky factors.
    if num_rows > 0:
      # Reshape the product of the triangular factors and permutation indices
      # to a single batch dimension. This makes it easy to apply
      # invert_permutation and gather_nd ops.
      perm_reshaped = array_ops.reshape(perm, [-1, num_rows])
      verification_reshaped = array_ops.reshape(verification,
                                                [-1, num_rows, num_cols])
      # Invert the permutation in each batch.
      inv_perm_reshaped = map_fn.map_fn(array_ops.invert_permutation,
                                        perm_reshaped)
      batch_size = perm_reshaped.shape.as_list()[0]
      # Prepare the batch indices with the same shape as the permutation.
      # The corresponding batch index is paired with each of the `num_rows`
      # permutation indices.
      batch_indices = math_ops.cast(
          array_ops.broadcast_to(
              math_ops.range(batch_size)[:, None], perm_reshaped.shape),
          dtype=output_idx_type)
      permuted_verification_reshaped = array_ops.gather_nd(
          verification_reshaped,
          array_ops.stack([batch_indices, inv_perm_reshaped], axis=-1))

      # Reshape the verification matrix back to the original shape.
      verification = array_ops.reshape(permuted_verification_reshaped,
                                       lu_shape)

    self._verifyLuBase(x, lower, upper, perm, verification,
                       output_idx_type)
Exemple #12
0
    def testPivoting(self):
        # This matrix triggers partial pivoting because the first diagonal entry
        # is small.
        data = np.array([[1e-9, 1., 0.], [1., 0., 0], [0., 1., 5]])
        self._verifyLu(data.astype(np.float32))

        for dtype in (np.float32, np.float64):
            self._verifyLu(data.astype(dtype))
            _, p = linalg_ops.lu(data)
            p_val = self.evaluate([p])
            # Make sure p_val is not the identity permutation.
            self.assertNotAllClose(np.arange(3), p_val)

        for dtype in (np.complex64, np.complex128):
            complex_data = np.tril(1j * data, -1).astype(dtype)
            complex_data += np.triu(-1j * data, 1).astype(dtype)
            complex_data += data
            self._verifyLu(complex_data)
            _, p = linalg_ops.lu(data)
            p_val = self.evaluate([p])
            # Make sure p_val is not the identity permutation.
            self.assertNotAllClose(np.arange(3), p_val)
Exemple #13
0
  def testPivoting(self):
    # This matrix triggers partial pivoting because the first diagonal entry
    # is small.
    data = np.array([[1e-9, 1., 0.], [1., 0., 0], [0., 1., 5]])
    self._verifyLu(data.astype(np.float32))

    for dtype in (np.float32, np.float64):
      self._verifyLu(data.astype(dtype))
      _, p = linalg_ops.lu(data)
      p_val = self.evaluate([p])
      # Make sure p_val is not the identity permutation.
      self.assertNotAllClose(np.arange(3), p_val)

    for dtype in (np.complex64, np.complex128):
      complex_data = np.tril(1j * data, -1).astype(dtype)
      complex_data += np.triu(-1j * data, 1).astype(dtype)
      complex_data += data
      self._verifyLu(complex_data)
      _, p = linalg_ops.lu(data)
      p_val = self.evaluate([p])
      # Make sure p_val is not the identity permutation.
      self.assertNotAllClose(np.arange(3), p_val)