def export(master_spec_path, params_path, export_path, export_moving_averages, build_runtime_graph): """Restores a model and exports it in SavedModel form. This method loads a graph specified by the spec at master_spec_path and the params in params_path. It then saves the model in SavedModel format to the location specified in export_path. Args: master_spec_path: Path to a proto-text master spec. params_path: Path to the parameters file to export. export_path: Path to export the SavedModel to. export_moving_averages: Whether to export the moving average parameters. build_runtime_graph: Whether to build a graph for use by the runtime. """ graph = tf.Graph() master_spec = spec_pb2.MasterSpec() with tf.gfile.FastGFile(master_spec_path) as fin: text_format.Parse(fin.read(), master_spec) # Remove '/' if it exists at the end of the export path, ensuring that # path utils work correctly. stripped_path = export_path.rstrip('/') saver_lib.clean_output_paths(stripped_path) short_to_original = saver_lib.shorten_resource_paths(master_spec) saver_lib.export_master_spec(master_spec, graph) saver_lib.export_to_graph(master_spec, params_path, stripped_path, graph, export_moving_averages, build_runtime_graph) saver_lib.export_assets(master_spec, short_to_original, stripped_path)
def testModelExport(self): # Get the master spec and params for this graph. master_spec = self.LoadSpec('ud-hungarian.master-spec') params_path = os.path.join( test_flags.source_root(), 'dragnn/python/testdata' '/ud-hungarian.params') # Export the graph via SavedModel. (Here, we maintain a handle to the graph # for comparison, but that's usually not necessary.) export_path = os.path.join(test_flags.temp_dir(), 'export') dragnn_model_saver_lib.clean_output_paths(export_path) saver_graph = tf.Graph() shortened_to_original = dragnn_model_saver_lib.shorten_resource_paths( master_spec) dragnn_model_saver_lib.export_master_spec(master_spec, saver_graph) dragnn_model_saver_lib.export_to_graph(master_spec, params_path, export_path, saver_graph, export_moving_averages=False, build_runtime_graph=False) # Export the assets as well. dragnn_model_saver_lib.export_assets(master_spec, shortened_to_original, export_path) # Validate that the assets are all in the exported directory. path_set = self.ValidateAssetExistence(master_spec, export_path) # This master-spec has 4 unique assets. If there are more, we have not # uniquified the assets properly. self.assertEqual(len(path_set), 4) # Restore the graph from the checkpoint into a new Graph object. restored_graph = tf.Graph() restoration_config = tf.ConfigProto(log_device_placement=False, intra_op_parallelism_threads=10, inter_op_parallelism_threads=10) with tf.Session(graph=restored_graph, config=restoration_config) as sess: tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], export_path) averaged_hook_name, non_averaged_hook_name, _ = self.GetHookNodeNames( master_spec) # Check that the averaged runtime hook node does not exist. with self.assertRaises(KeyError): restored_graph.get_operation_by_name(averaged_hook_name) # Check that the non-averaged version also does not exist. with self.assertRaises(KeyError): restored_graph.get_operation_by_name(non_averaged_hook_name)
def testModelExport(self): # Get the master spec and params for this graph. master_spec = self.LoadSpec('ud-hungarian.master-spec') params_path = os.path.join( test_flags.source_root(), 'dragnn/python/testdata' '/ud-hungarian.params') # Export the graph via SavedModel. (Here, we maintain a handle to the graph # for comparison, but that's usually not necessary.) export_path = os.path.join(test_flags.temp_dir(), 'export') dragnn_model_saver_lib.clean_output_paths(export_path) saver_graph = tf.Graph() shortened_to_original = dragnn_model_saver_lib.shorten_resource_paths( master_spec) dragnn_model_saver_lib.export_master_spec(master_spec, saver_graph) dragnn_model_saver_lib.export_to_graph( master_spec, params_path, export_path, saver_graph, export_moving_averages=False, build_runtime_graph=False) # Export the assets as well. dragnn_model_saver_lib.export_assets(master_spec, shortened_to_original, export_path) # Validate that the assets are all in the exported directory. path_set = self.ValidateAssetExistence(master_spec, export_path) # This master-spec has 4 unique assets. If there are more, we have not # uniquified the assets properly. self.assertEqual(len(path_set), 4) # Restore the graph from the checkpoint into a new Graph object. restored_graph = tf.Graph() restoration_config = tf.ConfigProto( log_device_placement=False, intra_op_parallelism_threads=10, inter_op_parallelism_threads=10) with tf.Session(graph=restored_graph, config=restoration_config) as sess: tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], export_path) averaged_hook_name, non_averaged_hook_name, _ = self.GetHookNodeNames( master_spec) # Check that the averaged runtime hook node does not exist. with self.assertRaises(KeyError): restored_graph.get_operation_by_name(averaged_hook_name) # Check that the non-averaged version also does not exist. with self.assertRaises(KeyError): restored_graph.get_operation_by_name(non_averaged_hook_name)
def testModelExportProducesRunnableModel(self): # Get the master spec and params for this graph. master_spec = self.LoadSpec('ud-hungarian.master-spec') params_path = os.path.join( test_flags.source_root(), 'dragnn/python/testdata' '/ud-hungarian.params') # Export the graph via SavedModel. (Here, we maintain a handle to the graph # for comparison, but that's usually not necessary.) export_path = os.path.join(test_flags.temp_dir(), 'export') dragnn_model_saver_lib.clean_output_paths(export_path) saver_graph = tf.Graph() shortened_to_original = dragnn_model_saver_lib.shorten_resource_paths( master_spec) dragnn_model_saver_lib.export_master_spec(master_spec, saver_graph) dragnn_model_saver_lib.export_to_graph(master_spec, params_path, export_path, saver_graph, export_moving_averages=False, build_runtime_graph=False) # Export the assets as well. dragnn_model_saver_lib.export_assets(master_spec, shortened_to_original, export_path) # Restore the graph from the checkpoint into a new Graph object. restored_graph = tf.Graph() restoration_config = tf.ConfigProto(log_device_placement=False, intra_op_parallelism_threads=10, inter_op_parallelism_threads=10) with tf.Session(graph=restored_graph, config=restoration_config) as sess: tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], export_path) test_doc = sentence_pb2.Sentence() text_format.Parse(_DUMMY_TEST_SENTENCE, test_doc) test_reader_string = test_doc.SerializeToString() test_inputs = [test_reader_string] tf_out = sess.run('annotation/annotations:0', feed_dict={ 'annotation/ComputeSession/InputBatch:0': test_inputs }) # We don't care about accuracy, only that the run sessions don't crash. del tf_out
def testModelExportProducesRunnableModel(self): # Get the master spec and params for this graph. master_spec = self.LoadSpec('ud-hungarian.master-spec') params_path = os.path.join( test_flags.source_root(), 'dragnn/python/testdata' '/ud-hungarian.params') # Export the graph via SavedModel. (Here, we maintain a handle to the graph # for comparison, but that's usually not necessary.) export_path = os.path.join(test_flags.temp_dir(), 'export') dragnn_model_saver_lib.clean_output_paths(export_path) saver_graph = tf.Graph() shortened_to_original = dragnn_model_saver_lib.shorten_resource_paths( master_spec) dragnn_model_saver_lib.export_master_spec(master_spec, saver_graph) dragnn_model_saver_lib.export_to_graph( master_spec, params_path, export_path, saver_graph, export_moving_averages=False, build_runtime_graph=False) # Export the assets as well. dragnn_model_saver_lib.export_assets(master_spec, shortened_to_original, export_path) # Restore the graph from the checkpoint into a new Graph object. restored_graph = tf.Graph() restoration_config = tf.ConfigProto( log_device_placement=False, intra_op_parallelism_threads=10, inter_op_parallelism_threads=10) with tf.Session(graph=restored_graph, config=restoration_config) as sess: tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], export_path) test_doc = sentence_pb2.Sentence() text_format.Parse(_DUMMY_TEST_SENTENCE, test_doc) test_reader_string = test_doc.SerializeToString() test_inputs = [test_reader_string] tf_out = sess.run( 'annotation/annotations:0', feed_dict={'annotation/ComputeSession/InputBatch:0': test_inputs}) # We don't care about accuracy, only that the run sessions don't crash. del tf_out
def export(master_spec_path, params_path, resource_path, export_path, export_moving_averages): """Restores a model and exports it in SavedModel form. This method loads a graph specified by the spec at master_spec_path and the params in params_path. It then saves the model in SavedModel format to the location specified in export_path. Args: master_spec_path: Path to a proto-text master spec. params_path: Path to the parameters file to export. resource_path: Path to resources in the master spec. export_path: Path to export the SavedModel to. export_moving_averages: Whether to export the moving average parameters. """ # Old CoNLL checkpoints did not need a known-word-map. Create a temporary if # that file is missing. if not tf.gfile.Exists(os.path.join(resource_path, 'known-word-map')): with tf.gfile.FastGFile(os.path.join(resource_path, 'known-word-map'), 'w') as out_file: out_file.write('This file intentionally left blank.') graph = tf.Graph() master_spec = spec_pb2.MasterSpec() with tf.gfile.FastGFile(master_spec_path) as fin: text_format.Parse(fin.read(), master_spec) # This is a workaround for an issue where the segmenter master-spec had a # spurious resource in it; this resource was not respected in the spec-builder # and ended up crashing the saver (since it didn't really exist). for component in master_spec.component: del component.resource[:] spec_builder.complete_master_spec(master_spec, None, resource_path) # Remove '/' if it exists at the end of the export path, ensuring that # path utils work correctly. stripped_path = export_path.rstrip('/') saver_lib.clean_output_paths(stripped_path) short_to_original = saver_lib.shorten_resource_paths(master_spec) saver_lib.export_master_spec(master_spec, graph) saver_lib.export_to_graph(master_spec, params_path, stripped_path, graph, export_moving_averages) saver_lib.export_assets(master_spec, short_to_original, stripped_path)
def testModelExportWithAveragesAndHooks(self): # Get the master spec and params for this graph. master_spec = self.LoadSpec('ud-hungarian.master-spec') params_path = os.path.join( test_flags.source_root(), 'dragnn/python/testdata' '/ud-hungarian.params') # Export the graph via SavedModel. (Here, we maintain a handle to the graph # for comparison, but that's usually not necessary.) Note that the export # path must not already exist. export_path = os.path.join(test_flags.temp_dir(), 'export2') dragnn_model_saver_lib.clean_output_paths(export_path) saver_graph = tf.Graph() shortened_to_original = dragnn_model_saver_lib.shorten_resource_paths( master_spec) dragnn_model_saver_lib.export_master_spec(master_spec, saver_graph) dragnn_model_saver_lib.export_to_graph(master_spec, params_path, export_path, saver_graph, export_moving_averages=True, build_runtime_graph=True) # Export the assets as well. dragnn_model_saver_lib.export_assets(master_spec, shortened_to_original, export_path) # Validate that the assets are all in the exported directory. path_set = self.ValidateAssetExistence(master_spec, export_path) # This master-spec has 4 unique assets. If there are more, we have not # uniquified the assets properly. self.assertEqual(len(path_set), 4) # Restore the graph from the checkpoint into a new Graph object. restored_graph = tf.Graph() restoration_config = tf.ConfigProto(log_device_placement=False, intra_op_parallelism_threads=10, inter_op_parallelism_threads=10) with tf.Session(graph=restored_graph, config=restoration_config) as sess: tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], export_path) averaged_hook_name, non_averaged_hook_name, cell_subgraph_hook_name = ( self.GetHookNodeNames(master_spec)) # Check that an averaged runtime hook node exists. restored_graph.get_operation_by_name(averaged_hook_name) # Check that the non-averaged version does not exist. with self.assertRaises(KeyError): restored_graph.get_operation_by_name(non_averaged_hook_name) # Load the cell subgraph. cell_subgraph_bytes = restored_graph.get_tensor_by_name( cell_subgraph_hook_name + ':0') cell_subgraph_bytes = cell_subgraph_bytes.eval( feed_dict={'annotation/ComputeSession/InputBatch:0': []}) cell_subgraph_spec = export_pb2.CellSubgraphSpec() cell_subgraph_spec.ParseFromString(cell_subgraph_bytes) tf.logging.info('cell_subgraph_spec = %s', cell_subgraph_spec) # Sanity check inputs. for cell_input in cell_subgraph_spec.input: self.assertGreater(len(cell_input.name), 0) self.assertGreater(len(cell_input.tensor), 0) self.assertNotEqual( cell_input.type, export_pb2.CellSubgraphSpec.Input.TYPE_UNKNOWN) restored_graph.get_tensor_by_name( cell_input.tensor) # shouldn't raise # Sanity check outputs. for cell_output in cell_subgraph_spec.output: self.assertGreater(len(cell_output.name), 0) self.assertGreater(len(cell_output.tensor), 0) restored_graph.get_tensor_by_name( cell_output.tensor) # shouldn't raise # GetHookNames() finds a component with a fixed feature, so at least the # first feature ID should exist. self.assertTrue( any(cell_input.name == 'fixed_channel_0_index_0_ids' for cell_input in cell_subgraph_spec.input)) # Most dynamic components produce a logits layer. self.assertTrue( any(cell_output.name == 'logits' for cell_output in cell_subgraph_spec.output))
def testModelExportWithAveragesAndHooks(self): # Get the master spec and params for this graph. master_spec = self.LoadSpec('ud-hungarian.master-spec') params_path = os.path.join( test_flags.source_root(), 'dragnn/python/testdata' '/ud-hungarian.params') # Export the graph via SavedModel. (Here, we maintain a handle to the graph # for comparison, but that's usually not necessary.) Note that the export # path must not already exist. export_path = os.path.join(test_flags.temp_dir(), 'export2') dragnn_model_saver_lib.clean_output_paths(export_path) saver_graph = tf.Graph() shortened_to_original = dragnn_model_saver_lib.shorten_resource_paths( master_spec) dragnn_model_saver_lib.export_master_spec(master_spec, saver_graph) dragnn_model_saver_lib.export_to_graph( master_spec, params_path, export_path, saver_graph, export_moving_averages=True, build_runtime_graph=True) # Export the assets as well. dragnn_model_saver_lib.export_assets(master_spec, shortened_to_original, export_path) # Validate that the assets are all in the exported directory. path_set = self.ValidateAssetExistence(master_spec, export_path) # This master-spec has 4 unique assets. If there are more, we have not # uniquified the assets properly. self.assertEqual(len(path_set), 4) # Restore the graph from the checkpoint into a new Graph object. restored_graph = tf.Graph() restoration_config = tf.ConfigProto( log_device_placement=False, intra_op_parallelism_threads=10, inter_op_parallelism_threads=10) with tf.Session(graph=restored_graph, config=restoration_config) as sess: tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], export_path) averaged_hook_name, non_averaged_hook_name, cell_subgraph_hook_name = ( self.GetHookNodeNames(master_spec)) # Check that an averaged runtime hook node exists. restored_graph.get_operation_by_name(averaged_hook_name) # Check that the non-averaged version does not exist. with self.assertRaises(KeyError): restored_graph.get_operation_by_name(non_averaged_hook_name) # Load the cell subgraph. cell_subgraph_bytes = restored_graph.get_tensor_by_name( cell_subgraph_hook_name + ':0') cell_subgraph_bytes = cell_subgraph_bytes.eval( feed_dict={'annotation/ComputeSession/InputBatch:0': []}) cell_subgraph_spec = export_pb2.CellSubgraphSpec() cell_subgraph_spec.ParseFromString(cell_subgraph_bytes) tf.logging.info('cell_subgraph_spec = %s', cell_subgraph_spec) # Sanity check inputs. for cell_input in cell_subgraph_spec.input: self.assertGreater(len(cell_input.name), 0) self.assertGreater(len(cell_input.tensor), 0) self.assertNotEqual(cell_input.type, export_pb2.CellSubgraphSpec.Input.TYPE_UNKNOWN) restored_graph.get_tensor_by_name(cell_input.tensor) # shouldn't raise # Sanity check outputs. for cell_output in cell_subgraph_spec.output: self.assertGreater(len(cell_output.name), 0) self.assertGreater(len(cell_output.tensor), 0) restored_graph.get_tensor_by_name(cell_output.tensor) # shouldn't raise # GetHookNames() finds a component with a fixed feature, so at least the # first feature ID should exist. self.assertTrue( any(cell_input.name == 'fixed_channel_0_index_0_ids' for cell_input in cell_subgraph_spec.input)) # Most dynamic components produce a logits layer. self.assertTrue( any(cell_output.name == 'logits' for cell_output in cell_subgraph_spec.output))