def testCompressedRankComputationUsingCorrectDim(self): shape = (10, 20) rank_factor = 200 expected_rank = 5 # half of the smaller dimension. rank = compression_op_utils.compute_compressed_rank_from_matrix_shape( shape, rank_factor) self.assertEqual(expected_rank, rank) shape = (20, 10) rank = compression_op_utils.compute_compressed_rank_from_matrix_shape( shape, rank_factor) self.assertEqual(expected_rank, rank)
def tpu_matrix_compressor(self, a_matrix): """Low-rank decomposition of a_matrix using tpu operations. For training on tpus, we only use basic tf operations (as py_func is not supported). Args: a_matrix: input matrix. Returns: A list of two matrices [b_matrix,c_matrix] which is the low-rank decomposition of a_matrix. Rank is taken from spec.rank. """ s, u, v = tf.svd(a_matrix) logging.info( 'Inside tpu_matrix_compressor: u,s,v shapes are: %s, %s, %s', u.shape, s.shape, v.shape) rank = comp_op_utils.compute_compressed_rank_from_matrix_shape( tuple(a_matrix.shape.dims), self._spec.rank) b_matrix = u[:, :rank] c_matrix = tf.transpose(v)[:rank, :] s_mat = tf.diag(tf.sqrt(s[:rank])) b_matrix = tf.matmul(b_matrix, s_mat) c_matrix = tf.matmul(s_mat, c_matrix) logging.info( 'Inside tpu_matrix_compressor: a_matrix,b_matrix,c_matrix' 'shapes are: %s, %s, %s', a_matrix.shape, b_matrix.shape, c_matrix.shape) return [b_matrix, c_matrix]
def static_matrix_compressor(self, a_matrix): """Low-rank decomposition of a_matrix. Args: a_matrix: input matrix. Returns: A list [b_matrix,c_matrix] which is the low-rank decomposition of a_matrix. Rank is taken from spec.rank. """ u, s, vh = np.linalg.svd(a_matrix) logging.info( 'Inside static_matrix_compressor: u,s,vh shapes are: %s, %s, %s', u.shape, s.shape, vh.shape) # If matrix dimension is smaller than rank specified then adjust rank rank = comp_op_utils.compute_compressed_rank_from_matrix_shape( a_matrix.shape, self._spec.rank) b_matrix = u[:, :rank] c_matrix = vh[:rank, :] s_mat = np.diag(np.sqrt(s[:rank])) b_matrix = np.matmul(b_matrix, s_mat) c_matrix = np.matmul(s_mat, c_matrix) logging.info( 'Inside static_matrix_compressor: a_matrix,b_matrix,c_matrix shapes ' 'are: %s, %s, %s', a_matrix.shape, b_matrix.shape, c_matrix.shape) return [b_matrix, c_matrix]
def testCompressedRankComputationUnevenDivision(self): shape = (9, 20) rank_factor = 200 rank = compression_op_utils.compute_compressed_rank_from_matrix_shape( shape, rank_factor) # Expected rank should be half of the original rank -- rounded up. self.assertEqual(5, rank)
def testCompressedRankComputationLowerBound(self): shape = (10, 20) # Following rank_factor means 2x the current rank -- which is not # compression. Util method should return the current rank. rank_factor = 50 rank = compression_op_utils.compute_compressed_rank_from_matrix_shape( shape, rank_factor) self.assertEqual(10, rank)