def test_semantic_equivalence_for_simple_graphdef(self): graph, in_name, out_name = _make_redundant_add_one_graph() graph_def = graph.as_graph_def() init_op = None in_names = [in_name] out_names = [out_name] gs = graph_spec.GraphSpec(graph_def, init_op, in_names, out_names) config_proto = tf.compat.v1.ConfigProto() opt_graph_spec = graph_optimizations.optimize_graph_spec( gs, config_proto) with tf.Graph().as_default() as orig_graph: tf.graph_util.import_graph_def(gs.graph_def, name='') with tf.compat.v1.Session(graph=orig_graph) as sess: orig_out = sess.run(gs.out_names, feed_dict={x: 1 for x in gs.in_names}) with tf.Graph().as_default() as new_graph: tf.graph_util.import_graph_def(opt_graph_spec.graph_def, name='') with tf.compat.v1.Session(graph=new_graph) as sess: new_out = sess.run( opt_graph_spec.out_names, feed_dict={x: 1 for x in opt_graph_spec.in_names}) self.assertEqual(new_out, orig_out)
def test_reduces_graph_size_in_function_lib(self): class StateHolder: pass obj = StateHolder() obj.variable = None @tf.function def foo(x): if obj.variable is None: obj.variable = tf.Variable(initial_value=0.) obj.variable.assign_add(x) return obj.variable.read_value() with tf.Graph().as_default() as g: x = tf.compat.v1.placeholder(shape=[], dtype=tf.float32) y = foo(x) init_op = tf.compat.v1.global_variables_initializer() graph_def = g.as_graph_def() in_name = x.name out_name = y.name init_op_name = init_op.name in_names = [in_name] out_names = [out_name] gs = graph_spec.GraphSpec(graph_def, init_op_name, in_names, out_names) config_proto = tf.compat.v1.ConfigProto() opt_graph_spec = graph_optimizations.optimize_graph_spec( gs, config_proto) self.assertIsInstance(opt_graph_spec, graph_spec.GraphSpec) self.assertLess(opt_graph_spec.graph_def.ByteSize(), graph_def.ByteSize())
def test_semantic_equivalence_for_graphdef_with_variables(self): graph, in_name, out_name = _make_foldable_add_variable_number_graph() with graph.as_default(): init_op = tf.compat.v1.global_variables_initializer().name graph_def = graph.as_graph_def() in_names = [in_name] out_names = [out_name] gs = graph_spec.GraphSpec(graph_def, init_op, in_names, out_names) config_proto = tf.compat.v1.ConfigProto() opt_graph_spec = graph_optimizations.optimize_graph_spec( gs, config_proto) with tf.Graph().as_default() as orig_graph: tf.graph_util.import_graph_def(gs.graph_def, name='') with tf.compat.v1.Session(graph=orig_graph) as sess: sess.run(gs.init_op) orig_out = sess.run(gs.out_names, feed_dict={x: 1 for x in gs.in_names}) with tf.Graph().as_default() as new_graph: tf.graph_util.import_graph_def(opt_graph_spec.graph_def, name='') with tf.compat.v1.Session(graph=new_graph) as new_sess: new_sess.run(opt_graph_spec.init_op) new_out = new_sess.run( opt_graph_spec.out_names, feed_dict={x: 1 for x in opt_graph_spec.in_names}) self.assertEqual(new_out, orig_out)
def test_reduces_bytesize_for_foldable_graphdef_with_variables(self): graph, in_name, out_name = _make_foldable_add_variable_number_graph() with graph.as_default(): init_op = tf.compat.v1.global_variables_initializer().name graph_def = graph.as_graph_def() orig_constants_1 = [] for node in graph_def.node: if node.op == 'Const': for float_val in node.attr['value'].tensor.float_val: if float_val == 1.: orig_constants_1.append(node) in_names = [in_name] out_names = [out_name] gs = graph_spec.GraphSpec(graph_def, init_op, in_names, out_names) config_proto = tf.compat.v1.ConfigProto() opt_graph_spec = graph_optimizations.optimize_graph_spec( gs, config_proto) opt_constants_1 = [] for node in opt_graph_spec.graph_def.node: if node.op == 'Const': for float_val in node.attr['value'].tensor.float_val: if float_val == 1.: opt_constants_1.append(node) self.assertIsInstance(opt_graph_spec, graph_spec.GraphSpec) self.assertLess(opt_graph_spec.graph_def.ByteSize(), graph_def.ByteSize()) self.assertGreater(len(orig_constants_1), 1) self.assertLess(len(opt_constants_1), len(orig_constants_1))
def test_reduces_bytesize_for_simple_graphdef(self): graph, in_name, out_name = _make_redundant_add_one_graph() graph_def = graph.as_graph_def() init_op = None in_names = [in_name] out_names = [out_name] gs = graph_spec.GraphSpec(graph_def, init_op, in_names, out_names) config_proto = tf.compat.v1.ConfigProto() opt_graph_spec = graph_optimizations.optimize_graph_spec(gs, config_proto) self.assertIsInstance(opt_graph_spec, graph_spec.GraphSpec) self.assertLess(opt_graph_spec.graph_def.ByteSize(), graph_def.ByteSize())
def test_reduces_bytesize_for_dataset_reduction(self): ds_graph, _, out = _make_dataset_constructing_graph() graph, _, out_name = _make_manual_reduce_graph(ds_graph, out) with graph.as_default(): init_op = tf.compat.v1.global_variables_initializer().name graph_def = graph.as_graph_def() in_names = [] out_names = [out_name] gs = graph_spec.GraphSpec(graph_def, init_op, in_names, out_names) config_proto = tf.compat.v1.ConfigProto() opt_graph_spec = graph_optimizations.optimize_graph_spec(gs, config_proto) self.assertIsInstance(opt_graph_spec, graph_spec.GraphSpec) self.assertLess(opt_graph_spec.graph_def.ByteSize(), graph_def.ByteSize())
def test_semantic_equivalence_for_graphdef_with_function(self): class StateHolder: pass obj = StateHolder() obj.variable = None @tf.function def foo(x): if obj.variable is None: obj.variable = tf.Variable(initial_value=0.) obj.variable.assign_add(x) return obj.variable.read_value() with tf.Graph().as_default() as g: x = tf.compat.v1.placeholder(shape=[], dtype=tf.float32) y = foo(x) init_op = tf.compat.v1.global_variables_initializer() graph_def = g.as_graph_def() in_name = x.name out_name = y.name init_op_name = init_op.name in_names = [in_name] out_names = [out_name] gs = graph_spec.GraphSpec(graph_def, init_op_name, in_names, out_names) config_proto = tf.compat.v1.ConfigProto() opt_graph_spec = graph_optimizations.optimize_graph_spec( gs, config_proto) with tf.Graph().as_default() as orig_graph: tf.graph_util.import_graph_def(gs.graph_def, name='') with tf.compat.v1.Session(graph=orig_graph) as sess: sess.run(gs.init_op) orig_out = sess.run(gs.out_names, feed_dict={x: 1 for x in gs.in_names}) with tf.Graph().as_default() as new_graph: tf.graph_util.import_graph_def(opt_graph_spec.graph_def, name='') with tf.compat.v1.Session(graph=new_graph) as new_sess: new_sess.run(opt_graph_spec.init_op) new_out = new_sess.run( opt_graph_spec.out_names, feed_dict={x: 1 for x in opt_graph_spec.in_names}) self.assertEqual(new_out, orig_out)
def optimize_tensorflow_comp(tf_computation, config_proto): """Applies configured optimizations to the graphdef backing a TF comp. Args: tf_computation: Instance of `building_blocks.CompiledComputation` backed by TensorFlow. config_proto: Instance of `tf.compat.v1.ConfigProto` specifying the optimizations to apply to the graph backing this TensorFlow computation. Returns: A transformed version of `tf_computation`, which has had the `tf.compat.v1.GraphDef` backing it run through Grappler with the specified configuration. """ py_typecheck.check_type(tf_computation, building_blocks.CompiledComputation) tf_proto = tf_computation.proto graph_spec_obj = _unpack_proto_into_graph_spec(tf_proto) optimized_graph_spec = graph_optimizations.optimize_graph_spec( graph_spec_obj, config_proto) graph_def = serialization_utils.pack_graph_def(optimized_graph_spec.graph_def) original_tf = tf_proto.tensorflow tf_result_proto = computation_pb2.TensorFlow( graph_def=graph_def, initialize_op=(original_tf.initialize_op if original_tf.initialize_op else None), session_token_tensor_name=(original_tf.session_token_tensor_name if original_tf.session_token_tensor_name else None), parameter=(original_tf.parameter if original_tf.HasField('parameter') else None), result=original_tf.result) optimized_proto = computation_pb2.Computation( type=tf_proto.type, tensorflow=tf_result_proto) return building_blocks.CompiledComputation( optimized_proto, type_signature=tf_computation.type_signature)