示例#1
0
    def testCompressedRankComputationUsingCorrectDim(self):
        shape = (10, 20)
        rank_factor = 200
        expected_rank = 5  # half of the smaller dimension.
        rank = compression_op_utils.compute_compressed_rank_from_matrix_shape(
            shape, rank_factor)
        self.assertEqual(expected_rank, rank)

        shape = (20, 10)
        rank = compression_op_utils.compute_compressed_rank_from_matrix_shape(
            shape, rank_factor)
        self.assertEqual(expected_rank, rank)
示例#2
0
    def tpu_matrix_compressor(self, a_matrix):
        """Low-rank decomposition of a_matrix using tpu operations.

    For training on tpus, we only use basic tf operations (as py_func is not
    supported).

    Args:
      a_matrix: input matrix.

    Returns:
      A list of two matrices [b_matrix,c_matrix] which is the low-rank
      decomposition of a_matrix. Rank is taken from spec.rank.
    """
        s, u, v = tf.linalg.svd(a_matrix)
        logging.info(
            'Inside tpu_matrix_compressor: u,s,v shapes are: %s, %s, %s',
            u.shape, s.shape, v.shape)
        rank = comp_op_utils.compute_compressed_rank_from_matrix_shape(
            tuple(a_matrix.shape.dims), self._spec.rank)
        b_matrix = u[:, :rank]
        c_matrix = tf.transpose(a=v)[:rank, :]
        s_mat = tf.linalg.tensor_diag(tf.sqrt(s[:rank]))
        b_matrix = tf.matmul(b_matrix, s_mat)
        c_matrix = tf.matmul(s_mat, c_matrix)
        logging.info(
            'Inside tpu_matrix_compressor: a_matrix,b_matrix,c_matrix'
            'shapes are: %s, %s, %s', a_matrix.shape, b_matrix.shape,
            c_matrix.shape)
        return [b_matrix, c_matrix]
示例#3
0
    def static_matrix_compressor(self, a_matrix):
        """Low-rank decomposition of a_matrix.

    Args:
      a_matrix: input matrix.

    Returns:
      A list [b_matrix,c_matrix] which is the low-rank decomposition of
      a_matrix. Rank is taken from spec.rank.
    """
        u, s, vh = np.linalg.svd(a_matrix)
        logging.info(
            'Inside static_matrix_compressor: u,s,vh shapes are: %s, %s, %s',
            u.shape, s.shape, vh.shape)
        # If matrix dimension is smaller than rank specified then adjust rank
        rank = comp_op_utils.compute_compressed_rank_from_matrix_shape(
            a_matrix.shape, self._spec.rank)
        b_matrix = u[:, :rank]
        c_matrix = vh[:rank, :]
        s_mat = np.diag(np.sqrt(s[:rank]))
        b_matrix = np.matmul(b_matrix, s_mat)
        c_matrix = np.matmul(s_mat, c_matrix)
        logging.info(
            'Inside static_matrix_compressor: a_matrix,b_matrix,c_matrix shapes '
            'are: %s, %s, %s', a_matrix.shape, b_matrix.shape, c_matrix.shape)

        self.uncompressed_size = a_matrix.size
        self.compressed_size = b_matrix.size + c_matrix.size

        return [b_matrix, c_matrix]
示例#4
0
 def testCompressedRankComputationUnevenDivision(self):
     shape = (9, 20)
     rank_factor = 200
     rank = compression_op_utils.compute_compressed_rank_from_matrix_shape(
         shape, rank_factor)
     # Expected rank should be half of the original rank -- rounded up.
     self.assertEqual(5, rank)
示例#5
0
 def testCompressedRankComputationLowerBound(self):
     shape = (10, 20)
     # Following rank_factor means 2x the current rank -- which is not
     # compression. Util method should return the current rank.
     rank_factor = 50
     rank = compression_op_utils.compute_compressed_rank_from_matrix_shape(
         shape, rank_factor)
     self.assertEqual(10, rank)