def LoadSpec(self, spec_path): master_spec = spec_pb2.MasterSpec() root_dir = os.path.join(test_flags.source_root(), 'dragnn/python') with open(os.path.join(root_dir, 'testdata', spec_path), 'r') as fin: text_format.Parse(fin.read().replace('TOPDIR', root_dir), master_spec) return master_spec
def LoadSpec(self, spec_path): master_spec = spec_pb2.MasterSpec() testdata = os.path.join(test_flags.source_root(), 'dragnn/core/testdata') with open(os.path.join(testdata, spec_path), 'r') as fin: text_format.Parse(fin.read().replace('TESTDATA', testdata), master_spec) return master_spec
def testModelExport(self): # Get the master spec and params for this graph. master_spec = self.LoadSpec('ud-hungarian.master-spec') params_path = os.path.join( test_flags.source_root(), 'dragnn/python/testdata' '/ud-hungarian.params') # Export the graph via SavedModel. (Here, we maintain a handle to the graph # for comparison, but that's usually not necessary.) export_path = os.path.join(test_flags.temp_dir(), 'export') dragnn_model_saver_lib.clean_output_paths(export_path) saver_graph = tf.Graph() shortened_to_original = dragnn_model_saver_lib.shorten_resource_paths( master_spec) dragnn_model_saver_lib.export_master_spec(master_spec, saver_graph) dragnn_model_saver_lib.export_to_graph(master_spec, params_path, export_path, saver_graph, export_moving_averages=False, build_runtime_graph=False) # Export the assets as well. dragnn_model_saver_lib.export_assets(master_spec, shortened_to_original, export_path) # Validate that the assets are all in the exported directory. path_set = self.ValidateAssetExistence(master_spec, export_path) # This master-spec has 4 unique assets. If there are more, we have not # uniquified the assets properly. self.assertEqual(len(path_set), 4) # Restore the graph from the checkpoint into a new Graph object. restored_graph = tf.Graph() restoration_config = tf.ConfigProto(log_device_placement=False, intra_op_parallelism_threads=10, inter_op_parallelism_threads=10) with tf.Session(graph=restored_graph, config=restoration_config) as sess: tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], export_path) averaged_hook_name, non_averaged_hook_name, _ = self.GetHookNodeNames( master_spec) # Check that the averaged runtime hook node does not exist. with self.assertRaises(KeyError): restored_graph.get_operation_by_name(averaged_hook_name) # Check that the non-averaged version also does not exist. with self.assertRaises(KeyError): restored_graph.get_operation_by_name(non_averaged_hook_name)
def testModelExport(self): # Get the master spec and params for this graph. master_spec = self.LoadSpec('ud-hungarian.master-spec') params_path = os.path.join( test_flags.source_root(), 'dragnn/python/testdata' '/ud-hungarian.params') # Export the graph via SavedModel. (Here, we maintain a handle to the graph # for comparison, but that's usually not necessary.) export_path = os.path.join(test_flags.temp_dir(), 'export') dragnn_model_saver_lib.clean_output_paths(export_path) saver_graph = tf.Graph() shortened_to_original = dragnn_model_saver_lib.shorten_resource_paths( master_spec) dragnn_model_saver_lib.export_master_spec(master_spec, saver_graph) dragnn_model_saver_lib.export_to_graph( master_spec, params_path, export_path, saver_graph, export_moving_averages=False, build_runtime_graph=False) # Export the assets as well. dragnn_model_saver_lib.export_assets(master_spec, shortened_to_original, export_path) # Validate that the assets are all in the exported directory. path_set = self.ValidateAssetExistence(master_spec, export_path) # This master-spec has 4 unique assets. If there are more, we have not # uniquified the assets properly. self.assertEqual(len(path_set), 4) # Restore the graph from the checkpoint into a new Graph object. restored_graph = tf.Graph() restoration_config = tf.ConfigProto( log_device_placement=False, intra_op_parallelism_threads=10, inter_op_parallelism_threads=10) with tf.Session(graph=restored_graph, config=restoration_config) as sess: tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], export_path) averaged_hook_name, non_averaged_hook_name, _ = self.GetHookNodeNames( master_spec) # Check that the averaged runtime hook node does not exist. with self.assertRaises(KeyError): restored_graph.get_operation_by_name(averaged_hook_name) # Check that the non-averaged version also does not exist. with self.assertRaises(KeyError): restored_graph.get_operation_by_name(non_averaged_hook_name)
def setUp(self): # Creates a task context with the correct testing paths. initial_task_context = os.path.join(test_flags.source_root(), 'syntaxnet/' 'testdata/context.pbtxt') self._task_context = os.path.join(test_flags.temp_dir(), 'context.pbtxt') with open(initial_task_context, 'r') as fin: with open(self._task_context, 'w') as fout: fout.write(fin.read().replace('SRCDIR', test_flags.source_root()) .replace('OUTPATH', test_flags.temp_dir())) # Creates necessary term maps. with self.test_session() as sess: gen_parser_ops.lexicon_builder(task_context=self._task_context, corpus_name='training-corpus').run() self._num_features, self._num_feature_ids, _, self._num_actions = ( sess.run(gen_parser_ops.feature_size(task_context=self._task_context, arg_prefix='brain_parser')))
def testModelExportProducesRunnableModel(self): # Get the master spec and params for this graph. master_spec = self.LoadSpec('ud-hungarian.master-spec') params_path = os.path.join( test_flags.source_root(), 'dragnn/python/testdata' '/ud-hungarian.params') # Export the graph via SavedModel. (Here, we maintain a handle to the graph # for comparison, but that's usually not necessary.) export_path = os.path.join(test_flags.temp_dir(), 'export') dragnn_model_saver_lib.clean_output_paths(export_path) saver_graph = tf.Graph() shortened_to_original = dragnn_model_saver_lib.shorten_resource_paths( master_spec) dragnn_model_saver_lib.export_master_spec(master_spec, saver_graph) dragnn_model_saver_lib.export_to_graph(master_spec, params_path, export_path, saver_graph, export_moving_averages=False, build_runtime_graph=False) # Export the assets as well. dragnn_model_saver_lib.export_assets(master_spec, shortened_to_original, export_path) # Restore the graph from the checkpoint into a new Graph object. restored_graph = tf.Graph() restoration_config = tf.ConfigProto(log_device_placement=False, intra_op_parallelism_threads=10, inter_op_parallelism_threads=10) with tf.Session(graph=restored_graph, config=restoration_config) as sess: tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], export_path) test_doc = sentence_pb2.Sentence() text_format.Parse(_DUMMY_TEST_SENTENCE, test_doc) test_reader_string = test_doc.SerializeToString() test_inputs = [test_reader_string] tf_out = sess.run('annotation/annotations:0', feed_dict={ 'annotation/ComputeSession/InputBatch:0': test_inputs }) # We don't care about accuracy, only that the run sessions don't crash. del tf_out
def testModelExportProducesRunnableModel(self): # Get the master spec and params for this graph. master_spec = self.LoadSpec('ud-hungarian.master-spec') params_path = os.path.join( test_flags.source_root(), 'dragnn/python/testdata' '/ud-hungarian.params') # Export the graph via SavedModel. (Here, we maintain a handle to the graph # for comparison, but that's usually not necessary.) export_path = os.path.join(test_flags.temp_dir(), 'export') dragnn_model_saver_lib.clean_output_paths(export_path) saver_graph = tf.Graph() shortened_to_original = dragnn_model_saver_lib.shorten_resource_paths( master_spec) dragnn_model_saver_lib.export_master_spec(master_spec, saver_graph) dragnn_model_saver_lib.export_to_graph( master_spec, params_path, export_path, saver_graph, export_moving_averages=False, build_runtime_graph=False) # Export the assets as well. dragnn_model_saver_lib.export_assets(master_spec, shortened_to_original, export_path) # Restore the graph from the checkpoint into a new Graph object. restored_graph = tf.Graph() restoration_config = tf.ConfigProto( log_device_placement=False, intra_op_parallelism_threads=10, inter_op_parallelism_threads=10) with tf.Session(graph=restored_graph, config=restoration_config) as sess: tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], export_path) test_doc = sentence_pb2.Sentence() text_format.Parse(_DUMMY_TEST_SENTENCE, test_doc) test_reader_string = test_doc.SerializeToString() test_inputs = [test_reader_string] tf_out = sess.run( 'annotation/annotations:0', feed_dict={'annotation/ComputeSession/InputBatch:0': test_inputs}) # We don't care about accuracy, only that the run sessions don't crash. del tf_out
def setUp(self): # This dataset contains 54 sentences. self.filepath = os.path.join(test_flags.source_root(), 'syntaxnet/testdata/mini-training-set') self.batch_size = 20
def testModelExportWithAveragesAndHooks(self): # Get the master spec and params for this graph. master_spec = self.LoadSpec('ud-hungarian.master-spec') params_path = os.path.join( test_flags.source_root(), 'dragnn/python/testdata' '/ud-hungarian.params') # Export the graph via SavedModel. (Here, we maintain a handle to the graph # for comparison, but that's usually not necessary.) Note that the export # path must not already exist. export_path = os.path.join(test_flags.temp_dir(), 'export2') dragnn_model_saver_lib.clean_output_paths(export_path) saver_graph = tf.Graph() shortened_to_original = dragnn_model_saver_lib.shorten_resource_paths( master_spec) dragnn_model_saver_lib.export_master_spec(master_spec, saver_graph) dragnn_model_saver_lib.export_to_graph(master_spec, params_path, export_path, saver_graph, export_moving_averages=True, build_runtime_graph=True) # Export the assets as well. dragnn_model_saver_lib.export_assets(master_spec, shortened_to_original, export_path) # Validate that the assets are all in the exported directory. path_set = self.ValidateAssetExistence(master_spec, export_path) # This master-spec has 4 unique assets. If there are more, we have not # uniquified the assets properly. self.assertEqual(len(path_set), 4) # Restore the graph from the checkpoint into a new Graph object. restored_graph = tf.Graph() restoration_config = tf.ConfigProto(log_device_placement=False, intra_op_parallelism_threads=10, inter_op_parallelism_threads=10) with tf.Session(graph=restored_graph, config=restoration_config) as sess: tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], export_path) averaged_hook_name, non_averaged_hook_name, cell_subgraph_hook_name = ( self.GetHookNodeNames(master_spec)) # Check that an averaged runtime hook node exists. restored_graph.get_operation_by_name(averaged_hook_name) # Check that the non-averaged version does not exist. with self.assertRaises(KeyError): restored_graph.get_operation_by_name(non_averaged_hook_name) # Load the cell subgraph. cell_subgraph_bytes = restored_graph.get_tensor_by_name( cell_subgraph_hook_name + ':0') cell_subgraph_bytes = cell_subgraph_bytes.eval( feed_dict={'annotation/ComputeSession/InputBatch:0': []}) cell_subgraph_spec = export_pb2.CellSubgraphSpec() cell_subgraph_spec.ParseFromString(cell_subgraph_bytes) tf.logging.info('cell_subgraph_spec = %s', cell_subgraph_spec) # Sanity check inputs. for cell_input in cell_subgraph_spec.input: self.assertGreater(len(cell_input.name), 0) self.assertGreater(len(cell_input.tensor), 0) self.assertNotEqual( cell_input.type, export_pb2.CellSubgraphSpec.Input.TYPE_UNKNOWN) restored_graph.get_tensor_by_name( cell_input.tensor) # shouldn't raise # Sanity check outputs. for cell_output in cell_subgraph_spec.output: self.assertGreater(len(cell_output.name), 0) self.assertGreater(len(cell_output.tensor), 0) restored_graph.get_tensor_by_name( cell_output.tensor) # shouldn't raise # GetHookNames() finds a component with a fixed feature, so at least the # first feature ID should exist. self.assertTrue( any(cell_input.name == 'fixed_channel_0_index_0_ids' for cell_input in cell_subgraph_spec.input)) # Most dynamic components produce a logits layer. self.assertTrue( any(cell_output.name == 'logits' for cell_output in cell_subgraph_spec.output))
def testModelExportWithAveragesAndHooks(self): # Get the master spec and params for this graph. master_spec = self.LoadSpec('ud-hungarian.master-spec') params_path = os.path.join( test_flags.source_root(), 'dragnn/python/testdata' '/ud-hungarian.params') # Export the graph via SavedModel. (Here, we maintain a handle to the graph # for comparison, but that's usually not necessary.) Note that the export # path must not already exist. export_path = os.path.join(test_flags.temp_dir(), 'export2') dragnn_model_saver_lib.clean_output_paths(export_path) saver_graph = tf.Graph() shortened_to_original = dragnn_model_saver_lib.shorten_resource_paths( master_spec) dragnn_model_saver_lib.export_master_spec(master_spec, saver_graph) dragnn_model_saver_lib.export_to_graph( master_spec, params_path, export_path, saver_graph, export_moving_averages=True, build_runtime_graph=True) # Export the assets as well. dragnn_model_saver_lib.export_assets(master_spec, shortened_to_original, export_path) # Validate that the assets are all in the exported directory. path_set = self.ValidateAssetExistence(master_spec, export_path) # This master-spec has 4 unique assets. If there are more, we have not # uniquified the assets properly. self.assertEqual(len(path_set), 4) # Restore the graph from the checkpoint into a new Graph object. restored_graph = tf.Graph() restoration_config = tf.ConfigProto( log_device_placement=False, intra_op_parallelism_threads=10, inter_op_parallelism_threads=10) with tf.Session(graph=restored_graph, config=restoration_config) as sess: tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], export_path) averaged_hook_name, non_averaged_hook_name, cell_subgraph_hook_name = ( self.GetHookNodeNames(master_spec)) # Check that an averaged runtime hook node exists. restored_graph.get_operation_by_name(averaged_hook_name) # Check that the non-averaged version does not exist. with self.assertRaises(KeyError): restored_graph.get_operation_by_name(non_averaged_hook_name) # Load the cell subgraph. cell_subgraph_bytes = restored_graph.get_tensor_by_name( cell_subgraph_hook_name + ':0') cell_subgraph_bytes = cell_subgraph_bytes.eval( feed_dict={'annotation/ComputeSession/InputBatch:0': []}) cell_subgraph_spec = export_pb2.CellSubgraphSpec() cell_subgraph_spec.ParseFromString(cell_subgraph_bytes) tf.logging.info('cell_subgraph_spec = %s', cell_subgraph_spec) # Sanity check inputs. for cell_input in cell_subgraph_spec.input: self.assertGreater(len(cell_input.name), 0) self.assertGreater(len(cell_input.tensor), 0) self.assertNotEqual(cell_input.type, export_pb2.CellSubgraphSpec.Input.TYPE_UNKNOWN) restored_graph.get_tensor_by_name(cell_input.tensor) # shouldn't raise # Sanity check outputs. for cell_output in cell_subgraph_spec.output: self.assertGreater(len(cell_output.name), 0) self.assertGreater(len(cell_output.tensor), 0) restored_graph.get_tensor_by_name(cell_output.tensor) # shouldn't raise # GetHookNames() finds a component with a fixed feature, so at least the # first feature ID should exist. self.assertTrue( any(cell_input.name == 'fixed_channel_0_index_0_ids' for cell_input in cell_subgraph_spec.input)) # Most dynamic components produce a logits layer. self.assertTrue( any(cell_output.name == 'logits' for cell_output in cell_subgraph_spec.output))