def test_get_apply_matmul(self): with tf.Graph().as_default(): with self.cached_session(): hparams = ("name=input_output_compression," "begin_compression_step=1000," "end_compression_step=120000," "compression_frequency=100," "compress_input=True," "compress_output=True," "input_compression_factor=2," "input_block_size=4," "output_compression_factor=2," "output_block_size=4,") compression_op_spec = ( compression_op.InputOutputCompressionOp.get_default_hparams().parse( hparams)) print(compression_op_spec.compression_option) compressor_spec = ( compression_op.LowRankDecompMatrixCompressor.get_default_hparams()) matrix_compressor = compression_op.LowRankDecompMatrixCompressor( spec=compressor_spec) global_step = tf.compat.v1.get_variable("global_step", initializer=100) apply_comp = compression_wrapper.ApplyCompression( scope="default_scope", compression_spec=compression_op_spec, compressor=matrix_compressor, global_step=global_step) # outer product - creates an 12x8 matrix a_matrix_init = np.outer( np.array([1., 2., 3., 7., 8., 9., 1., 2., 5., -2., -7., -1.]), np.array([4., 5., 6., 3., 1., 8., 3., 2.])) a_matrix = tf.compat.v1.get_variable( "a_matrix", initializer=a_matrix_init.astype(np.float32), dtype=tf.float32) _ = apply_comp.apply_compression( a_matrix, scope="compressor") # input is 1x12 vector left_operand_init = np.expand_dims( np.array([1., 2., 3., 4., 1., 2., 3., 4., 1., 2., 3., 4.]), axis=0) left_operand = tf.compat.v1.get_variable( "left_operand", initializer=left_operand_init.astype(np.float32), dtype=tf.float32) c = apply_comp._compression_ops[-1] tf.compat.v1.global_variables_initializer().run() compressed_matmul = c.get_apply_matmul(left_operand) # check b, c and d matrices have the right shapes self.assertSequenceEqual(list(c.b_matrix_tfvar.eval().shape), [4, 2]) self.assertSequenceEqual(list(c.c_matrix_tfvar.eval().shape), [6, 4]) self.assertSequenceEqual(list(c.d_matrix_tfvar.eval().shape), [2, 4]) # check that we get the expected output shape self.assertSequenceEqual(list(compressed_matmul.eval().shape), [1, 8])
def testSimhashApplyCompression(self): with self.cached_session(): hparams = ("name=cifar10_compression," "begin_compression_step=1000," "end_compression_step=2001," "compression_frequency=100,") spec = simhash.SimhashCompressionOp.get_default_hparams().parse(hparams) spec.set_hparam( "compression_option", compression_op_utils.CompressionOptions.SIMHASH_MATRIX_COMPRESSION) matrix_compressor = simhash.SimhashMatrixCompressor( spec=compression_op.LowRankDecompMatrixCompressor.get_default_hparams( ).parse("num_rows=5,num_cols=5,rank=200")) global_step = tf.compat.v1.get_variable("global_step", initializer=30) apply_comp = compression_wrapper.ApplyCompression( scope="default_scope", compression_spec=spec, compressor=matrix_compressor, global_step=global_step) # Need to add initial value for a_matrix so that we would know what to # expect back. a_matrix_init = np.outer(np.array([1., 2., 3.]), np.array([4., 5., 6.])) jitter = np.tile([0, 1e-1, 2e-2], (3, 1)) a_matrix_init += jitter a_matrix = tf.compat.v1.get_variable( "a_matrix", initializer=a_matrix_init.astype(np.float32), dtype=tf.float32) a_matrix_compressed = apply_comp.apply_compression( a_matrix, scope="first_compressor") c = apply_comp._compression_ops[0] a_matrix2 = tf.compat.v1.get_variable( "a_matrix2", initializer=a_matrix_init.astype(np.float32), dtype=tf.float32) _ = apply_comp.apply_compression(a_matrix2, scope="second_compressor") c2 = apply_comp._compression_ops[1] _ = apply_comp.all_update_op() tf.compat.v1.global_variables_initializer().run() _ = a_matrix_compressed.eval() # Compression won't start until step 1000 + some random_shift amount. # Here we make sure output is as expected at step 30. self.assertEqual(c._global_step.eval(), 30) self.assertEqual(c.alpha.eval(), 1.0) self.assertEqual(c2.alpha.eval(), 1.0) self.assertEqual(c._last_alpha_update_step.eval(), -1) self.assertAllEqual( np.array([ np.linalg.norm(c.a_matrix_tfvar.eval()), np.linalg.norm(c.b_matrix_tfvar.eval()), ]) > 0, [True, False]) self.assertAllEqual( np.array([ np.linalg.norm(c.a_matrix_tfvar.eval()), np.linalg.norm(c.b_matrix_tfvar.eval()), ]) < 0.00001, [False, True]) # At this point compression should have already started being applied; # verify at step 2000 all is as expected. tf.compat.v1.assign(global_step, 2000).eval() apply_comp._all_update_op.run() _ = a_matrix_compressed.eval() self.assertEqual(c._global_step.eval(), 2000) self.assertAlmostEqual(c.alpha.eval(), 0.99) self.assertEqual(c._last_alpha_update_step.eval(), 2000) self.assertAllEqual( np.array([ np.linalg.norm(c.a_matrix_tfvar.eval()), np.linalg.norm(c.b_matrix_tfvar.eval()), ]) > 0, [True, True]) self.assertFalse( np.all(np.abs(np.linalg.norm(c.b_matrix_tfvar.eval())) < 0.00001)) # The static_matrix_compressor was configured with a rank spec of 200 -- # meaning compression by half, i.e. new_rank = orig_rank / 2. self.assertEqual( np.linalg.matrix_rank(c.b_matrix_tfvar.eval()), np.linalg.matrix_rank(c.a_matrix_tfvar.eval()) / 2) b_matrix = matrix_compressor.static_matrix_compressor(a_matrix_init) self.assertAllEqual( np.linalg.norm(np.abs(b_matrix) - np.abs(c.b_matrix_tfvar.eval())) < 0.00001, True)
def testApplyCompression(self): with tf.Graph().as_default(): with self.cached_session(): compression_hparams = ("name=cifar10_compression," "begin_compression_step=1000," "end_compression_step=120000," "compression_frequency=100,") compression_op_spec = ( compression_op.CompressionOp.get_default_hparams().parse( compression_hparams)) compression_op_spec.set_hparam( "compression_option", compression_op_utils. CompressionOptions.LOWRANK_MATRIX_COMPRESSION) compressor_spec = (compression_op.LowRankDecompMatrixCompressor .get_default_hparams().parse( "num_rows=5,num_cols=5,rank=200")) matrix_compressor = compression_op.LowRankDecompMatrixCompressor( spec=compressor_spec) global_step = tf.compat.v1.get_variable("global_step", initializer=30) apply_comp = compression_wrapper.ApplyCompression( scope="default_scope", compression_spec=compression_op_spec, compressor=matrix_compressor, global_step=global_step) # Need to add initial value for a_matrix so that we would know what # to expect back. a_matrix_init = np.outer(np.array([1., 2., 3.]), np.array([4., 5., 6.])) a_matrix = tf.compat.v1.get_variable( "a_matrix", initializer=a_matrix_init.astype(np.float32), dtype=tf.float32) a_matrix_compressed = apply_comp.apply_compression( a_matrix, scope="first_compressor") c = apply_comp._compression_ops[0] a_matrix2 = tf.compat.v1.get_variable( "a_matrix2", initializer=a_matrix_init.astype(np.float32), dtype=tf.float32) _ = apply_comp.apply_compression(a_matrix2, scope="second_compressor") c2 = apply_comp._compression_ops[1] _ = apply_comp.all_update_op() tf.compat.v1.global_variables_initializer().run() _ = a_matrix_compressed.eval() self.assertEqual(c._global_step.eval(), 30) self.assertEqual(c.alpha.eval(), 1.0) self.assertEqual(c2.alpha.eval(), 1.0) self.assertEqual(c._last_alpha_update_step.eval(), -1) self.assertAllEqual( np.array([ np.linalg.norm(c.a_matrix_tfvar.eval()), np.linalg.norm(c.b_matrix_tfvar.eval()), np.linalg.norm(c.c_matrix_tfvar.eval()) ]) > 0, [True, False, False]) self.assertAllEqual( np.all( np.abs(np.linalg.norm(c.a_matrix_tfvar.eval())) < 0.00001), False) self.assertAllEqual( np.all( np.abs(np.linalg.norm(c.b_matrix_tfvar.eval())) < 0.00001), True) self.assertAllEqual( np.all( np.abs(np.linalg.norm(c.c_matrix_tfvar.eval())) < 0.00001), True) tf.compat.v1.assign(global_step, 1001).eval() # apply_comp_update_op.run() apply_comp._all_update_op.run() _ = a_matrix_compressed.eval() self.assertEqual(c._global_step.eval(), 1001) self.assertAlmostEqual(c.alpha.eval(), 0.99) self.assertEqual(c._last_alpha_update_step.eval(), 1001) self.assertAllEqual( np.array([ np.linalg.norm(c.a_matrix_tfvar.eval()), np.linalg.norm(c.b_matrix_tfvar.eval()), np.linalg.norm(c.c_matrix_tfvar.eval()) ]) > 0, [True, True, True]) self.assertAllEqual( np.all( np.abs(np.linalg.norm(c.b_matrix_tfvar.eval())) < 0.00001), False) self.assertAllEqual( np.all( np.abs(np.linalg.norm(c.c_matrix_tfvar.eval())) < 0.00001), False) [b_matrix, c_matrix ] = matrix_compressor.static_matrix_compressor(a_matrix_init) self.assertAllEqual( np.linalg.norm( np.abs(b_matrix) - np.abs(c.b_matrix_tfvar.eval())) < 0.00001, True) self.assertAllEqual( np.linalg.norm( np.abs(c_matrix) - np.abs(c.c_matrix_tfvar.eval())) < 0.00001, True) self.assertAllEqual( np.all( np.abs(np.linalg.norm(c.b_matrix_tfvar.eval())) < 0.00001), False) self.assertAllEqual( np.all( np.abs(np.linalg.norm(c.c_matrix_tfvar.eval())) < 0.00001), False) tf.compat.v1.assign(global_step, 1001).eval() apply_comp._all_update_op.run() _ = a_matrix_compressed.eval() self.assertEqual(c._global_step.eval(), 1001) self.assertAlmostEqual(c.alpha.eval(), 0.99) self.assertEqual(c._last_alpha_update_step.eval(), 1001) self.assertAllEqual( np.array([ np.linalg.norm(c.a_matrix_tfvar.eval()), np.linalg.norm(c.b_matrix_tfvar.eval()), np.linalg.norm(c.c_matrix_tfvar.eval()) ]) > 0, [True, True, True]) tf.compat.v1.assign(global_step, 2001).eval() apply_comp._all_update_op.run() _ = a_matrix_compressed.eval() self.assertEqual(c._global_step.eval(), 2001) self.assertAlmostEqual(c.alpha.eval(), 0.98) self.assertAlmostEqual(c2.alpha.eval(), 0.98) self.assertEqual(c._last_alpha_update_step.eval(), 2001) self.assertAllEqual( np.array([ np.linalg.norm(c.a_matrix_tfvar.eval()), np.linalg.norm(c.b_matrix_tfvar.eval()), np.linalg.norm(c.c_matrix_tfvar.eval()) ]) > 0, [True, True, True])