def _v1_multi_metagraph_saved_model(self): export_graph = ops.Graph() with export_graph.as_default(): start = array_ops.placeholder( shape=[None], dtype=dtypes.float32, name="start") v = resource_variable_ops.ResourceVariable(21.) first_output = array_ops.identity(start * v, name="first_output") second_output = array_ops.identity(v, name="second_output") with session_lib.Session() as session: session.run(v.initializer) path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid())) builder = builder_impl.SavedModelBuilder(path) builder.add_meta_graph_and_variables( session, tags=["first"], signature_def_map={ "first_key": signature_def_utils.build_signature_def( {"first_start": utils_impl.build_tensor_info(start)}, {"first_output": utils_impl.build_tensor_info( first_output)})}) builder.add_meta_graph( tags=["second"], signature_def_map={ "second_key": signature_def_utils.build_signature_def( {"second_start": utils_impl.build_tensor_info(start)}, {"second_output": utils_impl.build_tensor_info( second_output)})}) builder.save() return path
def setUp(self): """Write test SavedModels to a temp directory.""" with session.Session(graph=ops.Graph()) as sess: x = variables.Variable(5, name="x") y = variables.Variable(11, name="y") z = x + y sess.run(variables.global_variables_initializer()) foo_sig_def = signature_def_utils.build_signature_def( {"foo_input": utils.build_tensor_info(x)}, {"foo_output": utils.build_tensor_info(z)}) bar_sig_def = signature_def_utils.build_signature_def( {"bar_x": utils.build_tensor_info(x), "bar_y": utils.build_tensor_info(y)}, {"bar_z": utils.build_tensor_info(z)}) builder = saved_model_builder.SavedModelBuilder(SIMPLE_ADD_SAVED_MODEL) builder.add_meta_graph_and_variables( sess, ["foo_graph"], {"foo": foo_sig_def, "bar": bar_sig_def}) builder.save() # Write SavedModel with a main_op assign_op = control_flow_ops.group(state_ops.assign(y, 7)) builder = saved_model_builder.SavedModelBuilder(SAVED_MODEL_WITH_MAIN_OP) builder.add_meta_graph_and_variables( sess, ["foo_graph"], {"foo": foo_sig_def, "bar": bar_sig_def}, main_op=assign_op) builder.save()
def testSignatureDefs(self): export_dir = os.path.join(tf.test.get_temp_dir(), "test_signature_defs") builder = saved_model_builder.SavedModelBuilder(export_dir) # Graph with a single variable and a single entry in the signature def map. # SavedModel is invoked to add with weights. with self.test_session(graph=tf.Graph()) as sess: self._init_and_validate_variable(sess, "v", 42) # Build and populate an empty SignatureDef for testing. foo_signature = signature_def_utils.build_signature_def( dict(), dict(), "foo") builder.add_meta_graph_and_variables( sess, ["foo"], signature_def_map={"foo_key": foo_signature}) # Graph with the same single variable and multiple entries in the signature # def map. No weights are saved by SavedModel. with self.test_session(graph=tf.Graph()) as sess: self._init_and_validate_variable(sess, "v", 43) # Build and populate a different SignatureDef for testing. bar_signature = signature_def_utils.build_signature_def( dict(), dict(), "bar") # Also, build a different SignatureDef corresponding to "foo_key" defined # in the previous graph. foo_new_signature = signature_def_utils.build_signature_def( dict(), dict(), "foo_new") builder.add_meta_graph(["bar"], signature_def_map={ "bar_key": bar_signature, "foo_key": foo_new_signature }) # Save the SavedModel to disk. builder.save() # Restore the graph with tag "foo". The single entry in the SignatureDef map # corresponding to "foo_key" should exist. with self.test_session(graph=tf.Graph()) as sess: foo_graph = loader.load(sess, ["foo"], export_dir) self.assertEqual( 42, tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)[0].eval()) foo_signature = foo_graph.signature_def self.assertEqual(len(foo_signature), 1) self.assertEqual("foo", foo_signature["foo_key"].method_name) # Restore the graph with tag "bar". The SignatureDef map should have two # entries. One corresponding to "bar_key" and another corresponding to the # new value of "foo_key". with self.test_session(graph=tf.Graph()) as sess: bar_graph = loader.load(sess, ["bar"], export_dir) self.assertEqual( 42, tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)[0].eval()) bar_signature = bar_graph.signature_def self.assertEqual(len(bar_signature), 2) self.assertEqual("bar", bar_signature["bar_key"].method_name) self.assertEqual("foo_new", bar_signature["foo_key"].method_name)
def setUp(self): """Write test SavedModels to a temp directory.""" with session.Session(graph=ops.Graph()) as sess: x = variables.VariableV1(5, name="x") y = variables.VariableV1(11, name="y") z = x + y self.evaluate(variables.global_variables_initializer()) foo_sig_def = signature_def_utils.build_signature_def( {"foo_input": utils.build_tensor_info(x)}, {"foo_output": utils.build_tensor_info(z)}) bar_sig_def = signature_def_utils.build_signature_def( {"bar_x": utils.build_tensor_info(x), "bar_y": utils.build_tensor_info(y)}, {"bar_z": utils.build_tensor_info(z)}) builder = saved_model_builder.SavedModelBuilder(SIMPLE_ADD_SAVED_MODEL) builder.add_meta_graph_and_variables( sess, ["foo_graph"], {"foo": foo_sig_def, "bar": bar_sig_def}) builder.save() # Write SavedModel with a main_op assign_op = control_flow_ops.group(state_ops.assign(y, 7)) builder = saved_model_builder.SavedModelBuilder(SAVED_MODEL_WITH_MAIN_OP) builder.add_meta_graph_and_variables( sess, ["foo_graph"], {"foo": foo_sig_def, "bar": bar_sig_def}, main_op=assign_op) builder.save()
def testSignatureDefs(self): export_dir = self._get_export_dir("test_signature_defs") builder = saved_model_builder.SavedModelBuilder(export_dir) # Graph with a single variable and a single entry in the signature def map. # SavedModel is invoked to add with weights. with self.test_session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v", 42) # Build and populate an empty SignatureDef for testing. foo_signature = signature_def_utils.build_signature_def(dict(), dict(), "foo") builder.add_meta_graph_and_variables( sess, ["foo"], signature_def_map={"foo_key": foo_signature}) # Graph with the same single variable and multiple entries in the signature # def map. No weights are saved by SavedModel. with self.test_session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v", 43) # Build and populate a different SignatureDef for testing. bar_signature = signature_def_utils.build_signature_def(dict(), dict(), "bar") # Also, build a different SignatureDef corresponding to "foo_key" defined # in the previous graph. foo_new_signature = signature_def_utils.build_signature_def(dict(), dict(), "foo_new") builder.add_meta_graph( ["bar"], signature_def_map={ "bar_key": bar_signature, "foo_key": foo_new_signature }) # Save the SavedModel to disk. builder.save() # Restore the graph with tag "foo". The single entry in the SignatureDef map # corresponding to "foo_key" should exist. with self.test_session(graph=ops.Graph()) as sess: foo_graph = loader.load(sess, ["foo"], export_dir) self.assertEqual( 42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval()) foo_signature = foo_graph.signature_def self.assertEqual(len(foo_signature), 1) self.assertEqual("foo", foo_signature["foo_key"].method_name) # Restore the graph with tag "bar". The SignatureDef map should have two # entries. One corresponding to "bar_key" and another corresponding to the # new value of "foo_key". with self.test_session(graph=ops.Graph()) as sess: bar_graph = loader.load(sess, ["bar"], export_dir) self.assertEqual( 42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval()) bar_signature = bar_graph.signature_def self.assertEqual(len(bar_signature), 2) self.assertEqual("bar", bar_signature["bar_key"].method_name) self.assertEqual("foo_new", bar_signature["foo_key"].method_name)
def testGetSignatureDefByKey(self): x = array_ops.placeholder(dtypes.float32, 1, name="x") x_tensor_info = utils.build_tensor_info(x) y = array_ops.placeholder(dtypes.float32, name="y") y_tensor_info = utils.build_tensor_info(y) foo_signature_def = signature_def_utils.build_signature_def( {"foo-input": x_tensor_info}, {"foo-output": y_tensor_info}, "foo-method-name") bar_signature_def = signature_def_utils.build_signature_def( {"bar-input": x_tensor_info}, {"bar-output": y_tensor_info}, "bar-method-name") meta_graph_def = meta_graph_pb2.MetaGraphDef() self._add_to_signature_def_map(meta_graph_def, { "foo": foo_signature_def, "bar": bar_signature_def }) # Look up a key that does not exist in the SignatureDefMap. missing_key = "missing-key" with self.assertRaisesRegexp( ValueError, "No SignatureDef with key '%s' found in MetaGraphDef" % missing_key): signature_def_contrib_utils.get_signature_def_by_key( meta_graph_def, missing_key) # Look up the key, `foo` which exists in the SignatureDefMap. foo_signature_def = signature_def_contrib_utils.get_signature_def_by_key( meta_graph_def, "foo") self.assertTrue("foo-method-name", foo_signature_def.method_name) # Check inputs in signature def. self.assertEqual(1, len(foo_signature_def.inputs)) self._check_tensor_info(foo_signature_def.inputs, "foo-input", "x:0") # Check outputs in signature def. self.assertEqual(1, len(foo_signature_def.outputs)) self._check_tensor_info(foo_signature_def.outputs, "foo-output", "y:0") # Look up the key, `bar` which exists in the SignatureDefMap. bar_signature_def = signature_def_contrib_utils.get_signature_def_by_key( meta_graph_def, "bar") self.assertTrue("bar-method-name", bar_signature_def.method_name) # Check inputs in signature def. self.assertEqual(1, len(bar_signature_def.inputs)) self._check_tensor_info(bar_signature_def.inputs, "bar-input", "x:0") # Check outputs in signature def. self.assertEqual(1, len(bar_signature_def.outputs)) self._check_tensor_info(bar_signature_def.outputs, "bar-output", "y:0")
def testGetSignatureDefByKey(self): x = array_ops.placeholder(dtypes.float32, 1, name="x") x_tensor_info = utils.build_tensor_info(x) y = array_ops.placeholder(dtypes.float32, name="y") y_tensor_info = utils.build_tensor_info(y) foo_signature_def = signature_def_utils.build_signature_def({ "foo-input": x_tensor_info }, {"foo-output": y_tensor_info}, "foo-method-name") bar_signature_def = signature_def_utils.build_signature_def({ "bar-input": x_tensor_info }, {"bar-output": y_tensor_info}, "bar-method-name") meta_graph_def = meta_graph_pb2.MetaGraphDef() self._add_to_signature_def_map( meta_graph_def, {"foo": foo_signature_def, "bar": bar_signature_def}) # Look up a key that does not exist in the SignatureDefMap. missing_key = "missing-key" with self.assertRaisesRegexp( ValueError, "No SignatureDef with key '%s' found in MetaGraphDef" % missing_key): signature_def_contrib_utils.get_signature_def_by_key( meta_graph_def, missing_key) # Look up the key, `foo` which exists in the SignatureDefMap. foo_signature_def = signature_def_contrib_utils.get_signature_def_by_key( meta_graph_def, "foo") self.assertTrue("foo-method-name", foo_signature_def.method_name) # Check inputs in signature def. self.assertEqual(1, len(foo_signature_def.inputs)) self._check_tensor_info(foo_signature_def.inputs, "foo-input", "x:0") # Check outputs in signature def. self.assertEqual(1, len(foo_signature_def.outputs)) self._check_tensor_info(foo_signature_def.outputs, "foo-output", "y:0") # Look up the key, `bar` which exists in the SignatureDefMap. bar_signature_def = signature_def_contrib_utils.get_signature_def_by_key( meta_graph_def, "bar") self.assertTrue("bar-method-name", bar_signature_def.method_name) # Check inputs in signature def. self.assertEqual(1, len(bar_signature_def.inputs)) self._check_tensor_info(bar_signature_def.inputs, "bar-input", "x:0") # Check outputs in signature def. self.assertEqual(1, len(bar_signature_def.outputs)) self._check_tensor_info(bar_signature_def.outputs, "bar-output", "y:0")
def build_graph_helper(): g = ops.Graph() with g.as_default(): x = variables.VariableV1(5, name="x") y = variables.VariableV1(11, name="y") z = x + y foo_sig_def = signature_def_utils.build_signature_def({ "foo_input": utils.build_tensor_info(x) }, {"foo_output": utils.build_tensor_info(z)}) bar_sig_def = signature_def_utils.build_signature_def({ "bar_x": utils.build_tensor_info(x), "bar_y": utils.build_tensor_info(y) }, {"bar_z": utils.build_tensor_info(z)}) return g, {"foo": foo_sig_def, "bar": bar_sig_def}, y
def export_model(): """Exports the model""" trained_checkpoint_prefix = 'linear_regression' loaded_graph = tf.Graph() with tf.Session(graph=loaded_graph) as sess: sess.run(tf.global_variables_initializer()) # Restore from checkpoint: loader = tf.train.import_meta_graph(trained_checkpoint_prefix + '.meta') loader.restore(sess, trained_checkpoint_prefix) # Add signature: graph = tf.get_default_graph() inputs = tf.saved_model.utils.build_tensor_info(graph.get_tensor_by_name('X:0')) outputs = tf.saved_model.utils.build_tensor_info(graph.get_tensor_by_name('y_model:0')) signature = signature_def_utils.build_signature_def(inputs={'X': inputs}, outputs={'y_model': outputs}, method_name=signature_constants.PREDICT_METHOD_NAME) signature_map = {signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature} # Export model: builder = tf.saved_model.builder.SavedModelBuilder('./my_model') builder.add_meta_graph_and_variables(sess, signature_def_map=signature_map, tags=[tf.saved_model.tag_constants.SERVING]) builder.save()
def main(): # create instance of config config = Config() # build model model = NERModel(config) model.build() model.restore_session(config.dir_model) a_tensor = model.sess.graph.get_tensor_by_name(model.input_name + ':0') sum_tensor = model.sess.graph.get_tensor_by_name(model.output_name + ':0') model_input = build_tensor_info(a_tensor) model_output = build_tensor_info(sum_tensor) # Create a signature definition for tfserving signature_definition = signature_def_utils.build_signature_def( inputs={model.input_name: model_input}, outputs={model.output_name: model_output}, method_name=signature_constants.PREDICT_METHOD_NAME) builder = saved_model_builder.SavedModelBuilder(export_loc) builder.add_meta_graph_and_variables( model.sess, [tag_constants.SERVING], signature_def_map={ signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature_definition }, legacy_init_op=tf.tables_initializer()) # Save the model so we can serve it with a model server :) builder.save()
def classification_signature_def(examples, prob): if examples is None: raise ValueError('RankClassifier examples cannot be None.') if not isinstance(examples, ops.Tensor): raise ValueError('RankClassifier examples must be a string Tensor.') if prob is None: raise ValueError( 'RankClassifier classes and scores cannot both be None.') input_tensor_info = utils.build_tensor_info(examples) if input_tensor_info.dtype != types_pb2.DT_STRING: raise ValueError('RankClassifier examples must be a string Tensor.') signature_inputs = {signature_constants.CLASSIFY_INPUTS: input_tensor_info} signature_outputs = {} if prob is not None: scores_tensor_info = utils.build_tensor_info(prob) if scores_tensor_info.dtype != types_pb2.DT_FLOAT: raise ValueError('RankClassifier scores must be a float Tensor.') signature_outputs['prob'] = (scores_tensor_info) signature_def = signature_def_utils.build_signature_def( signature_inputs, signature_outputs, signature_constants.PREDICT_METHOD_NAME) return signature_def
def save_signature(self, sess, directory, params): signature = signature_def_utils.build_signature_def( # inputs={ # 'input': # saved_model_utils.build_tensor_info(self.input), # 'dropout_rate': # saved_model_utils.build_tensor_info(self.dropout_rate) # }, # outputs={ # 'output': saved_model_utils.build_tensor_info(self.output) # }, inputs={ k: saved_model_utils.build_tensor_info(v) for k, v in self.get_features(params).items() }, outputs={ k: saved_model_utils.build_tensor_info(v) for k, v in self.predictions.items() }, method_name=signature_constants.PREDICT_METHOD_NAME) signature_map = { signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature } model_builder = saved_model_builder.SavedModelBuilder(directory) model_builder.add_meta_graph_and_variables( sess, tags=[tag_constants.SERVING], signature_def_map=signature_map, clear_devices=True) model_builder.save(as_text=False)
def testBuildSignatureDef(self): x = array_ops.placeholder(dtypes.float32, 1, name="x") x_tensor_info = utils.build_tensor_info(x) inputs = dict() inputs["foo-input"] = x_tensor_info y = array_ops.placeholder(dtypes.float32, name="y") y_tensor_info = utils.build_tensor_info(y) outputs = dict() outputs["foo-output"] = y_tensor_info signature_def = signature_def_utils.build_signature_def(inputs, outputs, "foo-method-name") self.assertEqual("foo-method-name", signature_def.method_name) # Check inputs in signature def. self.assertEqual(1, len(signature_def.inputs)) x_tensor_info_actual = signature_def.inputs["foo-input"] self.assertEqual("x:0", x_tensor_info_actual.name) self.assertEqual(types_pb2.DT_FLOAT, x_tensor_info_actual.dtype) self.assertEqual(1, len(x_tensor_info_actual.tensor_shape.dim)) self.assertEqual(1, x_tensor_info_actual.tensor_shape.dim[0].size) # Check outputs in signature def. self.assertEqual(1, len(signature_def.outputs)) y_tensor_info_actual = signature_def.outputs["foo-output"] self.assertEqual("y:0", y_tensor_info_actual.name) self.assertEqual(types_pb2.DT_FLOAT, y_tensor_info_actual.dtype) self.assertEqual(0, len(y_tensor_info_actual.tensor_shape.dim))
def _predict_signature_def(inputs, outputs): """Creates prediction signature from given inputs and outputs. Args: inputs: dict of string to `Tensor`. outputs: dict of string to `Tensor`. Returns: A prediction-flavored signature_def. Raises: ValueError: If inputs or outputs is `None`. """ if inputs is None or not inputs: raise ValueError('inputs cannot be None or empty for prediction.') if outputs is None: raise ValueError('outputs cannot be None or empty for prediction.') signature_inputs = { key: saved_model_utils.build_tensor_info(tensor) for key, tensor in inputs.items() } signature_outputs = { key: saved_model_utils.build_tensor_info(tensor) for key, tensor in outputs.items() } return signature_def_utils.build_signature_def( signature_inputs, signature_outputs, signature_constants.PREDICT_METHOD_NAME)
def testBuildSignatureDef(self): x = array_ops.placeholder(dtypes.float32, 1, name="x") x_tensor_info = utils.build_tensor_info(x) inputs = dict() inputs["foo-input"] = x_tensor_info y = array_ops.placeholder(dtypes.float32, name="y") y_tensor_info = utils.build_tensor_info(y) outputs = dict() outputs["foo-output"] = y_tensor_info signature_def = signature_def_utils.build_signature_def( inputs, outputs, "foo-method-name") self.assertEqual("foo-method-name", signature_def.method_name) # Check inputs in signature def. self.assertEqual(1, len(signature_def.inputs)) x_tensor_info_actual = signature_def.inputs["foo-input"] self.assertEqual("x:0", x_tensor_info_actual.name) self.assertEqual(types_pb2.DT_FLOAT, x_tensor_info_actual.dtype) self.assertEqual(1, len(x_tensor_info_actual.tensor_shape.dim)) self.assertEqual(1, x_tensor_info_actual.tensor_shape.dim[0].size) # Check outputs in signature def. self.assertEqual(1, len(signature_def.outputs)) y_tensor_info_actual = signature_def.outputs["foo-output"] self.assertEqual("y:0", y_tensor_info_actual.name) self.assertEqual(types_pb2.DT_FLOAT, y_tensor_info_actual.dtype) self.assertEqual(0, len(y_tensor_info_actual.tensor_shape.dim))
def export_model(): model_path = "model" model_version = 1 print("Export the model file: {}, version: {}".format( model_path, model_version)) model_signature = signature_def_utils.build_signature_def( inputs={ "image": utils.build_tensor_info(model_features_placeholder) }, outputs={ "softmax": utils.build_tensor_info(model_softmax), "prediction": utils.build_tensor_info(model_prediction) }, method_name=signature_constants.PREDICT_METHOD_NAME) export_path = os.path.join(compat.as_bytes(model_path), compat.as_bytes(str(model_version))) legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op') builder = saved_model_builder.SavedModelBuilder(export_path) builder.add_meta_graph_and_variables( sess, [tag_constants.SERVING], clear_devices=True, signature_def_map={ signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: model_signature, }, legacy_init_op=legacy_init_op) builder.save()
def fit(self, train_data, val_data): self.model_optimizer() with tf.Session() as sess: tf.global_variables_initializer().run() losses = [] num_samples = 0 for epoch in range(self.epochs): st = time.time() for i in range(len(train_data)): data_batch = pickle.loads(train_data[i]) feed_dict = { self.cont_feats: data_batch["cont_feats"], self.vector_feats: data_batch["vector_feats"], self.cate_feats: data_batch["cate_feats"], self.input_data_size: [len(data_batch["cont_feats"])], self.label: data_batch["labels"] } self.loss_train, op = sess.run([self.loss, self.optimizer], feed_dict=feed_dict) losses.append(self.loss_train * self.batch_size) num_samples += self.batch_size end_time = time.time() total_loss = float(np.sum(losses) / num_samples) valid_metric = self.evaluate(sess, val_data) print('[%s] valid-%s=%.5f\tloss=%.5f [%.1f s]' % (epoch + 1, self.metric_type, valid_metric, total_loss, end_time - st)) sys.stdout.flush() # **************************保存为pb模型****************************** model_signature = signature_def_utils.build_signature_def( inputs={ "cont_feats": utils.build_tensor_info(self.cont_feats), "vector_feats": utils.build_tensor_info(self.vector_feats), "cate_feats": utils.build_tensor_info(self.cate_feats), "input_data_size": utils.build_tensor_info(self.input_data_size) }, outputs={"output": utils.build_tensor_info(self.score)}, method_name=signature_constants.PREDICT_METHOD_NAME) try: legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op') builder = saved_model_builder.SavedModelBuilder(self.model_pb) builder.add_meta_graph_and_variables( sess, [tag_constants.SERVING], clear_devices=True, signature_def_map={ signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: model_signature, }, legacy_init_op=legacy_init_op) builder.save() except Exception as e: print("Fail to export saved model, exception: {}".format(e)) sys.stdout.flush()
def export(self): with tf.Session(graph=self.GRAPH) as SESSION: # restore the session GRAPH_WRITER = tf.train.Saver() GRAPH_WRITER.restore(SESSION, self.model_file) export_path = 'Results/SavedModels/' print('Exporting trained model to ', export_path) builder = saved_model_builder.SavedModelBuilder(export_path) # input_configs = {'x': tf.FixedLenFeature(shape=[np.prod(self.fbanks_shape),], dtype=tf.float32),} # tf_example = tf.parse_example(self.fbanks, input_configs) # x = tf.identity(tf_example['x'], name='x') inputs_fbanks = utils.build_tensor_info(self.fbanks) outputs_d_vector = utils.build_tensor_info(self.d_vector) prediction_signature = signature_def_utils.build_signature_def( inputs={'fbanks': inputs_fbanks}, outputs={'d_vector': outputs_d_vector}, method_name=signature_constants.PREDICT_METHOD_NAME) legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op') builder.add_meta_graph_and_variables( SESSION, [tag_constants.SERVING], signature_def_map={ signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: prediction_signature, }, legacy_init_op=legacy_init_op) builder.save() print('Done Exporting!')
def convert_model_to_tf(trained_checkpoint_prefix, version): export_dir = os.path.join('models', str(version)) loaded_graph = tf.Graph() with tf.Session(graph=loaded_graph) as sess: loader = tf.train.import_meta_graph(trained_checkpoint_prefix + '.meta') loader.restore(sess, trained_checkpoint_prefix) serialized_tf_yolo = tf.placeholder(tf.string, name='tf_yolo') feature_configs = { 'x': tf.FixedLenFeature(shape=[1, 3, 416, 416], dtype=tf.float32), } tf_yolo = tf.parse_example(serialized_tf_yolo, feature_configs) x = tf_yolo['x'] tensor_info_x = utils.build_tensor_info(x) tensor_info_y = utils.build_tensor_info( tf.placeholder(tf.float32, shape=(None, 8))) prediction_signature = signature_def_utils.build_signature_def( inputs={'image': tensor_info_x}, outputs={'scores': tensor_info_y}, method_name=signature_constants.PREDICT_METHOD_NAME) legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op') builder = tf.saved_model.builder.SavedModelBuilder(export_dir) builder.add_meta_graph_and_variables( sess, [tf.saved_model.tag_constants.SERVING], signature_def_map={ 'predict_image': prediction_signature, }, legacy_init_op=legacy_init_op) builder.save()
def main(argv): if len(argv) > 1: raise app.UsageError('Too many command-line arguments.') shutil.rmtree(FLAGS.saved_model_path) # Create the graph x = array_ops.placeholder(dtypes.int32, shape=(1, 3), name='input') r = array_ops.identity(x) sess = session.Session() sm_builder = builder.SavedModelBuilder(FLAGS.saved_model_path) tensor_info_r = utils.build_tensor_info(r) toy_signature = ( signature_def_utils.build_signature_def( outputs={'r': tensor_info_r}, method_name=signature_constants.PREDICT_METHOD_NAME)) sm_builder.add_meta_graph_and_variables( sess, [tag_constants.SERVING], signature_def_map={ signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: toy_signature, }, strip_default_attrs=True) sm_builder.save()
def save_tf_model(sess, model_name, model_version, input_tensor_dict, out_tensor_dict, force_write=False): if (type(model_version) is not int): print("Error! input model_version must be a int number! eg. 1") return export_path = os.path.join(compat.as_bytes(model_name), compat.as_bytes(str(model_version))) if (force_write and os.path.exists(export_path)): shutil.rmtree(export_path) print('Exporting trained model to', export_path) builder = saved_model_builder.SavedModelBuilder(export_path) signature_inputs = {key: utils.build_tensor_info(tensor) for key, tensor in input_tensor_dict.items()} signature_outputs = {key: utils.build_tensor_info(tensor) for key, tensor in out_tensor_dict.items()} prediction_signature = signature_def_utils.build_signature_def( inputs=signature_inputs, outputs=signature_outputs, method_name=signature_constants.PREDICT_METHOD_NAME) # legacy_init_op = tf.group(tf.initialize_all_tables(), name='legacy_init_op') builder.add_meta_graph_and_variables(sess, [tag_constants.SERVING], signature_def_map={model_name: prediction_signature}, # legacy_init_op=legacy_init_op, clear_devices=True) builder.save() print('Done exporting!')
def export(self, last_checkpoint, output_dir): """Builds a prediction graph and xports the model. Args: last_checkpoint: The latest checkpoint from training. output_dir: Path to the folder to be used to output the model. """ logging.info('Exporting prediction graph to %s', output_dir) with tf.Session(graph=tf.Graph()) as sess: # Build and save prediction meta graph and trained variable values. input_signatures, output_signatures = self.build_prediction_graph() # Remove this if once Tensorflow 0.12 is standard. try: init_op = tf.global_variables_initializer() except AttributeError: init_op = tf.initialize_all_variables() sess.run(init_op) trained_saver = tf.train.Saver() trained_saver.restore(sess, last_checkpoint) predict_signature_def = signature_def_utils.build_signature_def( input_signatures, output_signatures, signature_constants.PREDICT_METHOD_NAME) # Create a saver for writing SavedModel training checkpoints. build = builder.SavedModelBuilder( os.path.join(output_dir, 'saved_model')) build.add_meta_graph_and_variables( sess, [tag_constants.SERVING], signature_def_map={ signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: predict_signature_def }, assets_collection=tf.get_collection(tf.GraphKeys.ASSET_FILEPATHS)) build.save()
def main(argv): if len(argv) > 1: raise app.UsageError('Too many command-line arguments.') shutil.rmtree(FLAGS.saved_model_path) # Create the graph ten = constant_op.constant(10) one = constant_op.constant(1) x = array_ops.placeholder(dtypes.int32, shape=(), name='input') r = control_flow_ops.while_loop(lambda a: a < ten, lambda a: math_ops.add(a, one), [x]) sess = session.Session() sm_builder = builder.SavedModelBuilder(FLAGS.saved_model_path) tensor_info_x = utils.build_tensor_info(x) tensor_info_r = utils.build_tensor_info(r) func_signature = (signature_def_utils.build_signature_def( inputs={'x': tensor_info_x}, outputs={'r': tensor_info_r}, method_name=signature_constants.PREDICT_METHOD_NAME)) sm_builder.add_meta_graph_and_variables( sess, [tag_constants.SERVING], signature_def_map={ 'serving_default': func_signature, signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: func_signature, }, strip_default_attrs=True) sm_builder.save()
def freeze_pb_graph(self, folder, i_tensors, o_tensors): if os.path.isdir(folder): shutil.rmtree(folder) model_input = { x[0]: tf.saved_model.build_tensor_info(x[1]) for x in i_tensors } model_output = { x[0]: tf.saved_model.build_tensor_info(x[1]) for x in o_tensors } builder = tf.saved_model.builder.SavedModelBuilder(folder) signature_definition = build_signature_def( inputs=model_input, outputs=model_output, method_name=signature_constants.PREDICT_METHOD_NAME) builder.add_meta_graph_and_variables( self.session, [tag_constants.SERVING], signature_def_map={ signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature_definition }) builder.save()
def export(self, last_checkpoint, output_dir): """Builds a prediction graph and xports the model. Args: last_checkpoint: The latest checkpoint from training. output_dir: Path to the folder to be used to output the model. """ logging.info('Exporting prediction graph to %s', output_dir) with tf.Session(graph=tf.Graph()) as sess: # Build and save prediction meta graph and trained variable values. input_signatures, output_signatures = self.build_prediction_graph() # Remove this if once Tensorflow 0.12 is standard. try: init_op = tf.global_variables_initializer() except AttributeError: init_op = tf.initialize_all_variables() sess.run(init_op) trained_saver = tf.train.Saver() trained_saver.restore(sess, last_checkpoint) predict_signature_def = signature_def_utils.build_signature_def( input_signatures, output_signatures, signature_constants.PREDICT_METHOD_NAME) # Create a saver for writing SavedModel training checkpoints. build = builder.SavedModelBuilder( os.path.join(output_dir, 'saved_model')) build.add_meta_graph_and_variables( sess, [tag_constants.SERVING], signature_def_map={ signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: predict_signature_def }, assets_collection=tf.get_collection( tf.GraphKeys.ASSET_FILEPATHS)) build.save()
def export_model(self, model_dir, global_step_val, last_checkpoint): """Exports the model so that it can used for batch predictions.""" with self.graph.as_default(): with tf.Session() as session: session.run(tf.global_variables_initializer()) self.saver.restore(session, last_checkpoint) signature = signature_def_utils.build_signature_def( inputs=self.inputs, outputs=self.outputs, method_name=signature_constants.PREDICT_METHOD_NAME) signature_map = { signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature } model_builder = saved_model_builder.SavedModelBuilder( model_dir) model_builder.add_meta_graph_and_variables( session, tags=[tag_constants.SERVING], signature_def_map=signature_map, clear_devices=True) model_builder.save()
def export_model(session, m): #只需要修改这一段,定义输入输出,其他保持默认即可 model_signature = signature_def_utils.build_signature_def( inputs={"input": utils.build_tensor_info(m.p)}, outputs={"output": utils.build_tensor_info(m.y)}, method_name=signature_constants.PREDICT_METHOD_NAME) export_path = "pb_model" # if os.path.exists(export_path): # os.system("rm -rf "+ export_path) print("Export the model to {}".format(export_path)) try: legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op') builder = saved_model_builder.SavedModelBuilder(export_path) builder.add_meta_graph_and_variables( session, [tag_constants.SERVING], clear_devices=True, signature_def_map={ signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: model_signature, }, legacy_init_op=legacy_init_op) builder.save() except Exception as e: print("Fail to export saved model, exception: {}".format(e))
def main(argv): if len(argv) > 1: raise app.UsageError('Too many command-line arguments.') shutil.rmtree(FLAGS.saved_model_path) # Create the graph x = array_ops.placeholder(dtypes.int32, shape=None, name='input') y = variables.Variable(name='y', dtype=dtypes.int32, initial_value=[[1, 2], [3, 4]]) r = y.sparse_read(x, name='result') sess = session.Session() sess.run(variables.global_variables_initializer()) sm_builder = builder.SavedModelBuilder(FLAGS.saved_model_path) tensor_info_x = utils.build_tensor_info(x) tensor_info_r = utils.build_tensor_info(r) toy_signature = (signature_def_utils.build_signature_def( inputs={'x': tensor_info_x}, outputs={'r': tensor_info_r}, method_name=signature_constants.PREDICT_METHOD_NAME)) sm_builder.add_meta_graph_and_variables( sess, [tag_constants.SERVING], signature_def_map={ signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: toy_signature, }, strip_default_attrs=True) sm_builder.save()
def build_signature(inputs, outputs): """Build the signature. Not using predic_signature_def in saved_model because it is replacing the tensor name, b/35900497. Args: inputs: a dictionary of tensor name to tensor outputs: a dictionary of tensor name to tensor Returns: The signature, a SignatureDef proto. """ signature_inputs = { key: saved_model_utils.build_tensor_info(tensor) for key, tensor in inputs.items() } signature_outputs = { key: saved_model_utils.build_tensor_info(tensor) for key, tensor in outputs.items() } signature_def = signature_def_utils.build_signature_def( signature_inputs, signature_outputs, signature_constants.PREDICT_METHOD_NAME) return signature_def
def _SaveModel(self, model_dir, output_dir): saved_model_builder = builder.SavedModelBuilder(output_dir) graph = ops.Graph() with session.Session(graph=graph) as sess: with graph.device('/GPU:0'): x = array_ops.placeholder(shape=(None, 28, 28, 1), dtype=dtypes.float32, name=INPUT_NODE_NAME) self._BuildGraph(x) self._LoadWeights(model_dir, sess) input_tensor = graph.get_tensor_by_name(INPUT_NODE_NAME + ':0') output = graph.get_tensor_by_name(OUTPUT_NODE_NAME + ':0') signature_def = signature_def_utils.build_signature_def( inputs={ 'input': saved_model_utils.build_tensor_info(input_tensor) }, outputs={ 'output': saved_model_utils.build_tensor_info(output) }, method_name=signature_constants. DEFAULT_SERVING_SIGNATURE_DEF_KEY) saved_model_builder.add_meta_graph_and_variables( sess, [tag_constants.SERVING], signature_def_map={ signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature_def }) saved_model_builder.save()
def _MakeSavedModelV1(self, run_params): """Write the saved model as an input for testing.""" params = self._GetParamsCached() g = ops.Graph() with g.as_default(): inputs = [] for spec in params.input_specs: inp = array_ops.placeholder( dtype=spec.dtype, shape=spec.shape, name=spec.name) inputs.append(inp) outputs = params.graph_fn(*inputs) if not isinstance(outputs, list) and not isinstance(outputs, tuple): outputs = [outputs] signature_def = signature_def_utils.build_signature_def( inputs={inp.op.name: utils.build_tensor_info(inp) for inp in inputs}, outputs={out.op.name: utils.build_tensor_info(out) for out in outputs}, method_name=signature_constants.PREDICT_METHOD_NAME) saved_model_dir = self._GetSavedModelDir(run_params, GraphState.ORIGINAL) saved_model_builder = builder.SavedModelBuilder(saved_model_dir) with self.session( graph=g, config=self._GetConfigProto(run_params, GraphState.ORIGINAL)) as sess: saved_model_builder.add_meta_graph_and_variables( sess, [tag_constants.SERVING], signature_def_map={ signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature_def }) saved_model_builder.save() return saved_model_dir
def _v1_multi_input_saved_model(self): export_graph = ops.Graph() with export_graph.as_default(): input1 = array_ops.placeholder(shape=[None], dtype=dtypes.float32, name="input1") input2 = array_ops.placeholder(shape=[None], dtype=dtypes.float32, name="input2") v = resource_variable_ops.ResourceVariable(21.) output = array_ops.identity(input1 * v + input2, name="output") with session_lib.Session() as session: session.run(v.initializer) path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid())) builder = builder_impl.SavedModelBuilder(path) builder.add_meta_graph_and_variables( session, tags=[tag_constants.SERVING], signature_def_map={ "serving_default": signature_def_utils.build_signature_def( { "input1": utils_impl.build_tensor_info(input1), "input2": utils_impl.build_tensor_info(input2) }, {"output": utils_impl.build_tensor_info(output)}) }) builder.save() return path
def main(): if not os.path.exists(CHECK_POINT_DIR): print("Wrong path to checkpoint:", CHECK_POINT_DIR) exit() final_input = tf.placeholder(tf.uint8, shape=[1, CROP_SIZE, CROP_SIZE, 3], name="final_input") final_dropout = tf.placeholder(tf.float32, name='final_dropout') with tf.variable_scope("generator") as scope: inputs = preprocess( tf.image.convert_image_dtype(final_input, dtype=tf.float32)) outputs = create_generator(inputs, generator_outputs_channels=NUM_OF_CLASSESS, dropout_rate=final_dropout) final_output = tf.argmax(outputs, axis=-1, name="final_output") init_op = tf.global_variables_initializer() restore_saver = tf.train.Saver() export_saver = tf.train.Saver() with tf.Session() as sess: sess.run(init_op) print("loading model from checkpoint") checkpoint = tf.train.latest_checkpoint(CHECK_POINT_DIR) print("latest checkpoint:", checkpoint) restore_saver.restore(sess, checkpoint) print("exporting model") tensor_info_x = utils.build_tensor_info(final_input) tensor_info_dropout = utils.build_tensor_info(final_dropout) tensor_info_y = utils.build_tensor_info(final_output) prediction_signature = signature_def_utils.build_signature_def( inputs={ 'images': tensor_info_x, 'dropout': tensor_info_dropout }, outputs={'predictions': tensor_info_y}, method_name=signature_constants.PREDICT_METHOD_NAME) legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op') exports_dir = os.path.abspath( os.path.join(CHECK_POINT_DIR, "exports_gen")) builder = saved_model_builder.SavedModelBuilder(exports_dir) builder.add_meta_graph_and_variables(sess, [tag_constants.SERVING], signature_def_map={ 'predict_images': prediction_signature, }, legacy_init_op=legacy_init_op) builder.save()
def main(_): # Import data mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True) # Create the model x = tf.placeholder(tf.float32, [None, 784], name='x') W = tf.Variable(tf.zeros([784, 10])) b = tf.Variable(tf.zeros([10])) y = tf.matmul(x, W) + b prediction = tf.nn.softmax(y, name='prediction') # Ground truth labels y_ = tf.placeholder(tf.float32, [None, 10]) # Define loss and optimizer # The raw formulation of cross-entropy, # # tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)), # reduction_indices=[1])) # # can be numerically unstable. # # So here we use tf.nn.softmax_cross_entropy_with_logits on the raw # outputs of 'y', and then average across the batch. cross_entropy = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y)) train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy, name='train') sess = tf.InteractiveSession() # builder = SavedModelBuilder('saved_model') tf.global_variables_initializer().run() # Train for _ in range(2000): batch_xs, batch_ys = mnist.train.next_batch(100) sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys}) # Test trained model correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) print( sess.run(accuracy, feed_dict={ x: mnist.test.images, y_: mnist.test.labels })) signature_inputs = {"x": build_tensor_info(x), "y": build_tensor_info(y_)} signature_outputs = {"prediction": build_tensor_info(prediction)} signature_def = build_signature_def(signature_inputs, signature_outputs, REGRESS_METHOD_NAME) builder.add_meta_graph_and_variables( sess, [TRAINING, SERVING], signature_def_map={REGRESS_METHOD_NAME: signature_def}, assets_collection=tf.get_collection(tf.GraphKeys.ASSET_FILEPATHS)) builder.save(as_text=False)
def _generate_signatures(signature_functions, resource_map): """Validates and calls `signature_functions` in the default graph. Args: signature_functions: A dictionary mapping string keys to concrete TensorFlow functions (e.g. from `_canonicalize_signatures`) which will be used to generate SignatureDefs. resource_map: A dictionary mapping from resource tensors in the eager context to resource tensors in the Graph being exported. This dictionary is used to re-bind resources captured by functions to tensors which will exist in the SavedModel. Returns: Each function in the `signature_functions` dictionary is called with placeholder Tensors, generating a function call operation and output Tensors. The placeholder Tensors, the function call operation, and the output Tensors from the function call are part of the default Graph. This function then returns a dictionary with the same structure as `signature_functions`, with the concrete functions replaced by SignatureDefs implicitly containing information about how to call each function from a TensorFlow 1.x Session / the C++ Loader API. These SignatureDefs reference the generated placeholders and Tensor outputs by name. The caller is expected to include the default Graph set while calling this function as a MetaGraph in a SavedModel, including the returned SignatureDefs as part of that MetaGraph. """ signatures = {} for signature_key, func in sorted(signature_functions.items()): # Register the inference function for this signature in the exported # graph. There is no direct use for the gradient of this function, so we # don't generate/register a gradient function here (but may end up with one # if another function relies on it). Users can still take symbolic gradients # of the function on import, the gradient just won't be in the saved # graph. When exporting a signature which already computes gradients, this # stops us from taking needless second-order gradients. func.add_to_graph(register_gradient_functions=False) export_captures = _map_captured_resources_to_created_resources( func.graph.captures, resource_map) mapped_inputs, exterior_argument_placeholders = ( _map_function_inputs_to_created_inputs(func.inputs, export_captures, signature_key, func.name)) # Calls the function quite directly, since we have new captured resource # tensors we need to feed in which weren't part of the original function # definition. # pylint: disable=protected-access outputs = _normalize_outputs( func._build_call_outputs( func._inference_function.call(context.context(), mapped_inputs)), func.name, signature_key) # pylint: enable=protected-access signatures[signature_key] = signature_def_utils.build_signature_def( _tensor_dict_to_tensorinfo(exterior_argument_placeholders), _tensor_dict_to_tensorinfo(outputs)) return signatures
def _validate_outputs_tensor_info_accept(self, builder, tensor_info): with self.test_session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v", 42) foo_signature = signature_def_utils.build_signature_def( dict(), {"foo_outputs": tensor_info}, "foo") builder.add_meta_graph_and_variables( sess, ["foo"], signature_def_map={"foo_key": foo_signature})
def _generate_signatures(signature_functions, resource_map): """Validates and calls `signature_functions` in the default graph. Args: signature_functions: A dictionary mapping string keys to concrete TensorFlow functions (e.g. from `_canonicalize_signatures`) which will be used to generate SignatureDefs. resource_map: A dictionary mapping from resource tensors in the eager context to resource tensors in the Graph being exported. This dictionary is used to re-bind resources captured by functions to tensors which will exist in the SavedModel. Returns: Each function in the `signature_functions` dictionary is called with placeholder Tensors, generating a function call operation and output Tensors. The placeholder Tensors, the function call operation, and the output Tensors from the function call are part of the default Graph. This function then returns a dictionary with the same structure as `signature_functions`, with the concrete functions replaced by SignatureDefs implicitly containing information about how to call each function from a TensorFlow 1.x Session / the C++ Loader API. These SignatureDefs reference the generated placeholders and Tensor outputs by name. The caller is expected to include the default Graph set while calling this function as a MetaGraph in a SavedModel, including the returned SignatureDefs as part of that MetaGraph. """ signatures = {} for signature_key, func in sorted(signature_functions.items()): # Register the inference function for this signature in the exported # graph. There is no direct use for the gradient of this function, so we # don't generate/register a gradient function here (but may end up with one # if another function relies on it). Users can still take symbolic gradients # of the function on import, the gradient just won't be in the saved # graph. When exporting a signature which already computes gradients, this # stops us from taking needless second-order gradients. func.add_to_graph(register_gradient_functions=False) export_captures = _map_captured_resources_to_created_resources( func.graph.captures, resource_map) mapped_inputs, exterior_argument_placeholders = ( _map_function_inputs_to_created_inputs( func.inputs, export_captures, signature_key, func.name)) # Calls the function quite directly, since we have new captured resource # tensors we need to feed in which weren't part of the original function # definition. # pylint: disable=protected-access outputs = _normalize_outputs( func._build_call_outputs( func._inference_function.call(context.context(), mapped_inputs)), func.name, signature_key) # pylint: enable=protected-access signatures[signature_key] = signature_def_utils.build_signature_def( _tensor_dict_to_tensorinfo(exterior_argument_placeholders), _tensor_dict_to_tensorinfo(outputs)) return signatures
def export(model_version, model_dir, sess, x, y_op): """导出tensorflow_serving可用的模型 SavedModel(tensorflow.python.saved_model)提供了一种跨语言格式来保存和恢复训练后的TensorFlow模型。它使用方法签名来定义Graph的输入和输出,使上层系统能够更方便地生成、调用或转换TensorFlow模型。 SavedModelBuilder类提供保存Graphs、Variables及Assets的方法。所保存的Graphs必须标注用途标签。在这个实例中我们打算将模型用于服务而非训练,因此我们用SavedModel预定义好的tag_constant.Serving标签。 为了方便地构建签名,SavedModel提供了signature_def_utils API。我们通过signature_def_utils.build_signature_def()来构建predict_signature。一个predict_signature至少包含以下参数: * inputs = {'x': tensor_info_x} 指定输入的tensor信息 * outputs = {'y': tensor_info_y} 指定输出的tensor信息 * method_name = signature_constants.PREDICT_METHOD_NAME method_name定义方法名,它的值应该是tensorflow/serving/predict、tensorflow/serving/classify和tensorflow/serving/regress三者之一。Builder标签用来明确Meta Graph被加载的方式,只接受serve和train两种类型。 """ if model_version <= 0: logging.warning('Please specify a positive value for version number.') sys.exit() path = os.path.dirname(os.path.abspath(model_dir)) if os.path.isdir(path) == False: logging.warning('Path (%s) not exists, making directories...', path) os.makedirs(path) export_path = os.path.join( compat.as_bytes(model_dir), compat.as_bytes(str(model_version))) if os.path.isdir(export_path) == True: logging.warning('Path (%s) exists, removing directories...', export_path) shutil.rmtree(export_path) builder = saved_model_builder.SavedModelBuilder(export_path) tensor_info_x = utils.build_tensor_info(x) tensor_info_y = utils.build_tensor_info(y_op) prediction_signature = signature_def_utils.build_signature_def( inputs={'x': tensor_info_x}, outputs={'y': tensor_info_y}, # signature_constants.CLASSIFY_METHOD_NAME = "tensorflow/serving/classify" # signature_constants.PREDICT_METHOD_NAME = "tensorflow/serving/predict" # signature_constants.REGRESS_METHOD_NAME = "tensorflow/serving/regress" # 如果缺失method_name会报错: # grpc.framework.interfaces.face.face.AbortionError: AbortionError(code=StatusCode.INTERNAL, details="Expected prediction signature method_name to be one of {tensorflow/serving/predict, tensorflow/serving/classify, tensorflow/serving/regress}. Was: ") method_name=signature_constants.PREDICT_METHOD_NAME) builder.add_meta_graph_and_variables( sess, # tag_constants.SERVING = "serve" # tag_constants.TRAINING = "train" # 如果只有train标签,TensorFlow Serving加载时会报错: # E tensorflow_serving/core/aspired_versions_manager.cc:351] Servable {name: default version: 2} cannot be loaded: Not found: Could not find meta graph def matching supplied tags. [tag_constants.SERVING], signature_def_map={ 'predict_text': prediction_signature, # 如果缺失会报错: # grpc.framework.interfaces.face.face.AbortionError: AbortionError(code=StatusCode.FAILED_PRECONDITION, details="Default serving signature key not found.") signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: prediction_signature }) builder.save()
def _validate_inputs_tensor_info_fail(self, builder, tensor_info): with self.test_session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v", 42) foo_signature = signature_def_utils.build_signature_def({ "foo_inputs": tensor_info }, dict(), "foo") self.assertRaises( AssertionError, builder.add_meta_graph_and_variables, sess, ["foo"], signature_def_map={"foo_key": foo_signature})
def _WriteInputSavedModel(self, input_saved_model_dir): """Write the saved model as an input for testing.""" g, var, inp, out = self._GetGraph() signature_def = signature_def_utils.build_signature_def( inputs={"myinput": utils.build_tensor_info(inp)}, outputs={"myoutput": utils.build_tensor_info(out)}, method_name=signature_constants.PREDICT_METHOD_NAME) saved_model_builder = builder.SavedModelBuilder(input_saved_model_dir) with self.session(graph=g, config=self._GetConfigProto()) as sess: sess.run(var.initializer) saved_model_builder.add_meta_graph_and_variables( sess, [tag_constants.SERVING], signature_def_map={"mypredict": signature_def}) saved_model_builder.save()
def _signature_with_no_inputs(self): export_graph = ops.Graph() with export_graph.as_default(): array_ops.placeholder(name="x", shape=[], dtype=dtypes.float32) output = random_ops.random_normal([2]) with session_lib.Session() as session: path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid())) b = builder_impl.SavedModelBuilder(path) b.add_meta_graph_and_variables( session, tags=[tag_constants.SERVING], signature_def_map={ "key": signature_def_utils.build_signature_def( {}, dict(value=utils_impl.build_tensor_info(output)))}) b.save() return path
def _generate_signatures(signature_functions, resource_map): """Validates and calls `signature_functions` in the default graph. Args: signature_functions: A dictionary mapping string keys to concrete TensorFlow functions (e.g. from `_canonicalize_signatures`) which will be used to generate SignatureDefs. resource_map: A dictionary mapping from resource tensors in the eager context to resource tensors in the Graph being exported. This dictionary is used to re-bind resources captured by functions to tensors which will exist in the SavedModel. Returns: Each function in the `signature_functions` dictionary is called with placeholder Tensors, generating a function call operation and output Tensors. The placeholder Tensors, the function call operation, and the output Tensors from the function call are part of the default Graph. This function then returns a dictionary with the same structure as `signature_functions`, with the concrete functions replaced by SignatureDefs implicitly containing information about how to call each function from a TensorFlow 1.x Session / the C++ Loader API. These SignatureDefs reference the generated placeholders and Tensor outputs by name. The caller is expected to include the default Graph set while calling this function as a MetaGraph in a SavedModel, including the returned SignatureDefs as part of that MetaGraph. """ signatures = {} for signature_key, function in sorted(signature_functions.items()): if function.graph.captures: argument_inputs = function.graph.inputs[:-len(function.graph.captures)] else: argument_inputs = function.graph.inputs mapped_inputs, exterior_argument_placeholders = ( _map_function_arguments_to_created_inputs( argument_inputs, signature_key, function.name)) outputs = _normalize_outputs( _call_function_with_mapped_captures( function, mapped_inputs, resource_map), function.name, signature_key) signatures[signature_key] = signature_def_utils.build_signature_def( _tensor_dict_to_tensorinfo(exterior_argument_placeholders), _tensor_dict_to_tensorinfo(outputs), method_name=signature_constants.PREDICT_METHOD_NAME) return signatures
def test_load_saved_model_with_no_variables(self, builder_cls): """Test that SavedModel runs saver when there appear to be no variables. When no variables are detected, this may mean that the variables were saved to different collections, or the collections weren't saved to the SavedModel. If the SavedModel MetaGraphDef contains a saver, it should still run in either of these cases. Args: builder_cls: SavedModelBuilder or _SavedModelBuilder class """ path = _get_export_dir("no_variable_saved_model") with session.Session(graph=ops.Graph()) as sess: x = variables.VariableV1( 5, name="x", collections=["not_global_variable"]) y = variables.VariableV1( 11, name="y", collections=["not_global_variable"]) self.assertFalse(variables._all_saveable_objects()) z = x + y self.evaluate(variables.variables_initializer([x, y])) foo_sig_def = signature_def_utils.build_signature_def( {"foo_input": utils.build_tensor_info(x)}, {"foo_output": utils.build_tensor_info(z)}) builder = saved_model_builder.SavedModelBuilder(path) builder.add_meta_graph_and_variables( sess, ["foo_graph"], {"foo": foo_sig_def}, saver=tf_saver.Saver([x, y])) builder.save() loader = loader_impl.SavedModelLoader(path) with self.session(graph=ops.Graph()) as sess: saver, _ = loader.load_graph(sess.graph, ["foo_graph"]) self.assertFalse(variables._all_saveable_objects()) self.assertIsNotNone(saver) with self.session(graph=ops.Graph()) as sess: loader.load(sess, ["foo_graph"]) self.assertEqual(5, sess.graph.get_tensor_by_name("x:0").eval()) self.assertEqual(11, sess.graph.get_tensor_by_name("y:0").eval())
def _v1_output_shape_saved_model(self): export_graph = ops.Graph() with export_graph.as_default(): start = array_ops.placeholder( shape=[None], dtype=dtypes.float32, name="start") output = array_ops.identity(start, name="output") output.set_shape([1]) # Ok to use [1] because shape is only informational with session_lib.Session() as session: path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid())) builder = builder_impl.SavedModelBuilder(path) builder.add_meta_graph_and_variables( session, tags=[tag_constants.SERVING], signature_def_map={ "serving_default": signature_def_utils.build_signature_def( {"start": utils_impl.build_tensor_info(start)}, {"output": utils_impl.build_tensor_info(output)}) }) builder.save() return path
def main(): # test_preprocess() # test_postprocess() preprocess_function_string = marshal.dumps(preprocess.func_code) tf.add_to_collection("preprocess_function", preprocess_function_string) postprocess_function_string = marshal.dumps(postprocess.func_code) tf.add_to_collection("postprocess_function", postprocess_function_string) model_path = "preprocess_model" model_version = 1 keys_placeholder = tf.placeholder(tf.int32, shape=[None], name="keys") keys_identity = tf.identity(keys_placeholder, name="inference_keys") sess = tf.Session() sess.run(tf.global_variables_initializer()) model_signature = signature_def_utils.build_signature_def( inputs={ "keys": utils.build_tensor_info(keys_placeholder), }, outputs={ "keys": utils.build_tensor_info(keys_identity), }, method_name=signature_constants.PREDICT_METHOD_NAME) export_path = os.path.join( compat.as_bytes(model_path), compat.as_bytes(str(model_version))) builder = saved_model_builder.SavedModelBuilder(export_path) builder.add_meta_graph_and_variables( sess, [tag_constants.SERVING], clear_devices=True, signature_def_map={ signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: model_signature, }) builder.save()
def export_model(self, model_dir, global_step_val, last_checkpoint): """Exports the model so that it can used for batch predictions.""" with self.graph.as_default(): with tf.Session() as session: session.run(tf.global_variables_initializer()) self.saver.restore(session, last_checkpoint) signature = signature_def_utils.build_signature_def( inputs=self.inputs, outputs=self.outputs, method_name=signature_constants.PREDICT_METHOD_NAME) signature_map = {signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature} model_builder = saved_model_builder.SavedModelBuilder(model_dir) model_builder.add_meta_graph_and_variables(session, tags=[tag_constants.SERVING], signature_def_map=signature_map, clear_devices=True) model_builder.save()
def export(model_version, model_dir, sess, inputs, y_op): """导出tensorflow_serving可用的模型(Saved Model方式)(推荐) prediction_signature必备的三个参数分别是输入inputs、输出outputs和方法名method_name,如果缺失方法名将会报错:“grpc.framework.interfaces.face.face.AbortionError: AbortionError(code=StatusCode.INTERNAL, details="Expected prediction signature method_name to be one of {tensorflow/serving/predict, tensorflow/serving/classify, tensorflow/serving/regress}. Was: ")”。每一个SavedModel关联着一个独立的checkpoint。每一个图元都绑定一个或多个标签,这些标签用来明确图元被加载的方式。标签只接受两种类型:serve或者train,保存时可以同时包含两个标签。其中tag_constants.SERVING = "serve",tag_constants.TRAINING = "train"。模型用于TensorFlow Serving时,标签必须包含serve类型。如果标签只包含train类型,TensorFlow Serving加载模型时会报错:“E tensorflow_serving/core/aspired_versions_manager.cc:351] Servable {name: default version: 2} cannot be loaded: Not found: Could not find meta graph def matching supplied tags.”。定义signature_def_map时注意定义默认服务签名键,如果缺少则会报错:“grpc.framework.interfaces.face.face.AbortionError: AbortionError(code=StatusCode.FAILED_PRECONDITION, details="Default serving signature key not found.")”。 """ if model_version <= 0: print('Please specify a positive value for version number.') sys.exit() path = os.path.dirname(os.path.abspath(model_dir)) if os.path.isdir(path) == False: logging.warning('Path (%s) not exists, making directories...', path) os.makedirs(path) export_path = os.path.join( compat.as_bytes(model_dir), compat.as_bytes(str(model_version))) if os.path.isdir(export_path) == True: logging.warning('Path (%s) exists, removing directories...', export_path) shutil.rmtree(export_path) builder = saved_model_builder.SavedModelBuilder(export_path) tensor_info_x = utils.build_tensor_info(inputs) tensor_info_y = utils.build_tensor_info(y_op) prediction_signature = signature_def_utils.build_signature_def( inputs={'x': tensor_info_x}, outputs={'y': tensor_info_y}, method_name=signature_constants.PREDICT_METHOD_NAME) builder.add_meta_graph_and_variables( sess, [tag_constants.SERVING], signature_def_map={ 'predict_text': prediction_signature, signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: prediction_signature }) builder.save()
def build_signature(inputs, outputs): """Build the signature. Not using predic_signature_def in saved_model because it is replacing the tensor name, b/35900497. Args: inputs: a dictionary of tensor name to tensor outputs: a dictionary of tensor name to tensor Returns: The signature, a SignatureDef proto. """ signature_inputs = {key: saved_model_utils.build_tensor_info(tensor) for key, tensor in inputs.items()} signature_outputs = {key: saved_model_utils.build_tensor_info(tensor) for key, tensor in outputs.items()} signature_def = signature_def_utils.build_signature_def( signature_inputs, signature_outputs, signature_constants.PREDICT_METHOD_NAME) return signature_def
def _generate_signatures(signature_functions): """Validates and calls `signature_functions` in the default graph. Args: signature_functions: A dictionary mapping string keys to concrete TensorFlow functions (e.g. from `_canonicalize_signatures`) which will be used to generate SignatureDefs. Returns: Each function in the `signature_functions` dictionary is called with placeholder Tensors, generating a function call operation and output Tensors. The placeholder Tensors, the function call operation, and the output Tensors from the function call are part of the default Graph. This function then returns a dictionary with the same structure as `signature_functions`, with the concrete functions replaced by SignatureDefs implicitly containing information about how to call each function from a TensorFlow 1.x Session / the C++ Loader API. These SignatureDefs reference the generated placeholders and Tensor outputs by name. The caller is expected to include the default Graph set while calling this function as a MetaGraph in a SavedModel, including the returned SignatureDefs as part of that MetaGraph. """ signatures = {} for signature_key, func in sorted(signature_functions.items()): function.register_concrete(func) # `exterior_placeholders` holds placeholders which are outside the function # body, directly contained in a MetaGraph of the SavedModel. The function # body itself contains nearly identical placeholders used when running the # function, but these exterior placeholders allow Session-based APIs to call # the function using feeds and fetches which name Tensors in the MetaGraph. exterior_placeholders = {} args = [] for placeholder in func.inputs: user_input_name = compat.as_str_any( placeholder.op.get_attr("_user_specified_name")) # If the internal placeholders for a function have names which were # uniquified by TensorFlow, then a single user-specified argument name # must refer to multiple Tensors. The resulting signatures would be # confusing to call. Instead, we throw an exception telling the user to # specify explicit names. if user_input_name != placeholder.op.name: raise ValueError( ("Got non-flat/non-unique argument names for SavedModel " "signature '{}': more than one argument to '{}' was named '{}'. " "Signatures have one Tensor per named input, so to have " "predictable names Python functions used to generate these " "signatures should avoid *args and Tensors in nested " "structures unless unique names are specified for each. Use " "tf.TensorSpec(..., name=...) to provide a name for a Tensor " "input.") .format(signature_key, compat.as_str_any(func.name), user_input_name)) arg_placeholder = array_ops.placeholder( shape=placeholder.shape, dtype=placeholder.dtype, name="{}_{}".format(signature_key, user_input_name)) exterior_placeholders[user_input_name] = arg_placeholder args.append(arg_placeholder) outputs = _normalize_outputs( func(*args), func.name, signature_key) signatures[signature_key] = signature_def_utils.build_signature_def( _tensor_dict_to_tensorinfo(exterior_placeholders), _tensor_dict_to_tensorinfo(outputs)) return signatures
def _generate_saved_model_for_half_plus_two(export_dir, as_text=False): """Generates SavedModel for half plus two. Args: export_dir: The directory to which the SavedModel should be written. as_text: Writes the SavedModel protocol buffer in text format to disk. """ builder = saved_model_builder.SavedModelBuilder(export_dir) with tf.Session(graph=tf.Graph()) as sess: # Set up the model parameters as variables to exercise variable loading # functionality upon restore. a = tf.Variable(0.5, name="a") b = tf.Variable(2.0, name="b") # Create a placeholder for serialized tensorflow.Example messages to be fed. serialized_tf_example = tf.placeholder(tf.string, name="tf_example") # Parse the tensorflow.Example looking for a feature named "x" with a single # floating point value. feature_configs = {"x": tf.FixedLenFeature([1], dtype=tf.float32)} tf_example = tf.parse_example(serialized_tf_example, feature_configs) # Use tf.identity() to assign name x = tf.identity(tf_example["x"], name="x") y = tf.add(tf.mul(a, x), b, name="y") # Create an assets file that can be saved and restored as part of the # SavedModel. original_assets_directory = "/tmp/original/export/assets" original_assets_filename = "foo.txt" original_assets_filepath = _write_assets(original_assets_directory, original_assets_filename) # Set up the assets collection. assets_filepath = tf.constant(original_assets_filepath) tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, assets_filepath) filename_tensor = tf.Variable(original_assets_filename, name="filename_tensor", trainable=False, collections=[]) assign_filename_op = filename_tensor.assign(original_assets_filename) # Set up the signature for regression with input and output tensor # specification. input_tensor = meta_graph_pb2.TensorInfo() input_tensor.name = serialized_tf_example.name signature_inputs = {signature_constants.REGRESS_INPUTS: input_tensor} output_tensor = meta_graph_pb2.TensorInfo() output_tensor.name = tf.identity(y).name signature_outputs = {signature_constants.REGRESS_OUTPUTS: output_tensor} signature_def = signature_def_utils.build_signature_def( signature_inputs, signature_outputs, signature_constants.REGRESS_METHOD_NAME ) # Set up the signature for Predict with input and output tensor # specification. predict_input_tensor = meta_graph_pb2.TensorInfo() predict_input_tensor.name = x.name predict_signature_inputs = {"x": predict_input_tensor} predict_signature_def = signature_def_utils.build_signature_def( {"x": predict_input_tensor}, {"y": output_tensor}, signature_constants.PREDICT_METHOD_NAME ) # Initialize all variables and then save the SavedModel. sess.run(tf.global_variables_initializer()) builder.add_meta_graph_and_variables( sess, [tag_constants.SERVING], signature_def_map={ signature_constants.REGRESS_METHOD_NAME: signature_def, signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: predict_signature_def, }, assets_collection=tf.get_collection(tf.GraphKeys.ASSET_FILEPATHS), legacy_init_op=tf.group(assign_filename_op), ) builder.save(as_text)
def main(_): if len(sys.argv) < 2 or sys.argv[-1].startswith('-'): print('Usage: mnist_dist_export.py ' '[--model_version=y] [--checkpoint_path=checkpoint_store_path] export_dir') sys.exit(-1) if FLAGS.model_version <= 0: print 'Please specify a positive value for exported serveable version number.' sys.exit(-1) if not FLAGS.checkpoint_path: print 'Please specify the correct path where checkpoints stored locally or in OSS.' sys.exit(-1) checkpoint_basename="model.ckpt" default_meta_graph_suffix='.meta' ckpt_path=os.path.join(FLAGS.checkpoint_path, checkpoint_basename + '-0') meta_graph_file=ckpt_path + default_meta_graph_suffix with tf.Session() as new_sess: # with new_sess.graph.as_default(): # tf.reset_default_graph() # new_sess.run(tf.initialize_all_variables()) new_saver = tf.train.import_meta_graph(meta_graph_file, clear_devices=True) #'/test/mnistoutput/ckpt.meta') new_saver.restore(new_sess, ckpt_path) #'/test/mnistoutput/ckpt') new_graph = tf.get_default_graph() new_x = new_graph.get_tensor_by_name('input/x-input:0') print(new_x) new_y = new_graph.get_tensor_by_name('cross_entropy/logits:0') print(new_y) # Export model # WARNING(break-tutorial-inline-code): The following code snippet is # in-lined in tutorials, please update tutorial documents accordingly # whenever code changes. export_path_base = sys.argv[-1] export_path = os.path.join( compat.as_bytes(export_path_base), compat.as_bytes(str(FLAGS.model_version))) print 'Exporting trained model to', export_path builder = saved_model_builder.SavedModelBuilder(export_path) # Build the signature_def_map. tensor_info_x = utils.build_tensor_info(new_x) tensor_info_y = utils.build_tensor_info(new_y) prediction_signature = signature_def_utils.build_signature_def( inputs={'images': tensor_info_x}, outputs={'scores': tensor_info_y}, method_name=signature_constants.PREDICT_METHOD_NAME) legacy_init_op = tf.group(tf.initialize_all_tables(), name='legacy_init_op') builder.add_meta_graph_and_variables( new_sess, [tag_constants.SERVING], signature_def_map={ 'predict_images': prediction_signature, }, legacy_init_op=legacy_init_op, clear_devices=True) builder.save() print 'Done exporting!'
def main(): # Get hyperparameters if FLAGS.enable_colored_log: import coloredlogs coloredlogs.install() logging.basicConfig(level=logging.INFO) INPUT_FILE_FORMAT = FLAGS.input_file_format if INPUT_FILE_FORMAT not in ["tfrecord", "csv"]: logging.error("Unknow input file format: {}".format(INPUT_FILE_FORMAT)) exit(1) FEATURE_SIZE = FLAGS.feature_size LABEL_SIZE = FLAGS.label_size EPOCH_NUMBER = FLAGS.epoch_number if EPOCH_NUMBER <= 0: EPOCH_NUMBER = None BATCH_THREAD_NUMBER = FLAGS.batch_thread_number MIN_AFTER_DEQUEUE = FLAGS.min_after_dequeue BATCH_CAPACITY = BATCH_THREAD_NUMBER * FLAGS.batch_size + MIN_AFTER_DEQUEUE MODE = FLAGS.mode MODEL = FLAGS.model CHECKPOINT_PATH = FLAGS.checkpoint_path if not CHECKPOINT_PATH.startswith("fds://") and not os.path.exists( CHECKPOINT_PATH): os.makedirs(CHECKPOINT_PATH) CHECKPOINT_FILE = CHECKPOINT_PATH + "/checkpoint.ckpt" LATEST_CHECKPOINT = tf.train.latest_checkpoint(CHECKPOINT_PATH) OUTPUT_PATH = FLAGS.output_path if not OUTPUT_PATH.startswith("fds://") and not os.path.exists(OUTPUT_PATH): os.makedirs(OUTPUT_PATH) pprint.PrettyPrinter().pprint(FLAGS.__flags) # Process TFRecoreds files def read_and_decode_tfrecord(filename_queue): reader = tf.TFRecordReader() _, serialized_example = reader.read(filename_queue) features = tf.parse_single_example( serialized_example, features={ "label": tf.FixedLenFeature([], tf.float32), "features": tf.FixedLenFeature([FEATURE_SIZE], tf.float32), }) label = features["label"] features = features["features"] return label, features def read_and_decode_csv(filename_queue): # TODO: Not generic for all datasets reader = tf.TextLineReader() key, value = reader.read(filename_queue) # Default values, in case of empty columns. Also specifies the type of the # decoded result. #record_defaults = [[1], [1], [1], [1], [1]] record_defaults = [[1], [1.0], [1.0], [1.0], [1.0]] col1, col2, col3, col4, col5 = tf.decode_csv( value, record_defaults=record_defaults) label = col1 features = tf.stack([col2, col3, col4, col4]) return label, features # Read TFRecords files for training filename_queue = tf.train.string_input_producer( tf.train.match_filenames_once(FLAGS.train_file), num_epochs=EPOCH_NUMBER) if INPUT_FILE_FORMAT == "tfrecord": label, features = read_and_decode_tfrecord(filename_queue) elif INPUT_FILE_FORMAT == "csv": label, features = read_and_decode_csv(filename_queue) batch_labels, batch_features = tf.train.shuffle_batch( [label, features], batch_size=FLAGS.batch_size, num_threads=BATCH_THREAD_NUMBER, capacity=BATCH_CAPACITY, min_after_dequeue=MIN_AFTER_DEQUEUE) # Read TFRecords file for validatioin validate_filename_queue = tf.train.string_input_producer( tf.train.match_filenames_once(FLAGS.validate_file), num_epochs=EPOCH_NUMBER) if INPUT_FILE_FORMAT == "tfrecord": validate_label, validate_features = read_and_decode_tfrecord( validate_filename_queue) elif INPUT_FILE_FORMAT == "csv": validate_label, validate_features = read_and_decode_csv( validate_filename_queue) validate_batch_labels, validate_batch_features = tf.train.shuffle_batch( [validate_label, validate_features], batch_size=FLAGS.validate_batch_size, num_threads=BATCH_THREAD_NUMBER, capacity=BATCH_CAPACITY, min_after_dequeue=MIN_AFTER_DEQUEUE) # Define the model input_units = FEATURE_SIZE output_units = LABEL_SIZE model_network_hidden_units = [int(i) for i in FLAGS.model_network.split()] def full_connect(inputs, weights_shape, biases_shape, is_train=True): weights = tf.get_variable("weights", weights_shape, initializer=tf.random_normal_initializer()) biases = tf.get_variable("biases", biases_shape, initializer=tf.random_normal_initializer()) layer = tf.matmul(inputs, weights) + biases if FLAGS.enable_bn and is_train: mean, var = tf.nn.moments(layer, axes=[0]) scale = tf.get_variable("scale", biases_shape, initializer=tf.random_normal_initializer()) shift = tf.get_variable("shift", biases_shape, initializer=tf.random_normal_initializer()) layer = tf.nn.batch_normalization(layer, mean, var, shift, scale, FLAGS.bn_epsilon) return layer def full_connect_relu(inputs, weights_shape, biases_shape, is_train=True): layer = full_connect(inputs, weights_shape, biases_shape, is_train) layer = tf.nn.relu(layer) return layer def customized_inference(inputs, is_train=True): hidden1_units = 128 hidden2_units = 32 hidden3_units = 8 with tf.variable_scope("input"): layer = full_connect_relu(inputs, [input_units, hidden1_units], [hidden1_units], is_train) with tf.variable_scope("layer0"): layer = full_connect_relu(layer, [hidden1_units, hidden2_units], [hidden2_units], is_train) with tf.variable_scope("layer1"): layer = full_connect_relu(layer, [hidden2_units, hidden3_units], [hidden3_units], is_train) if FLAGS.enable_dropout and is_train: layer = tf.nn.dropout(layer, FLAGS.dropout_keep_prob) with tf.variable_scope("output"): layer = full_connect(layer, [hidden3_units, output_units], [output_units], is_train) return layer def dnn_inference(inputs, is_train=True): with tf.variable_scope("input"): layer = full_connect_relu(inputs, [input_units, model_network_hidden_units[0]], [model_network_hidden_units[0]], is_train) for i in range(len(model_network_hidden_units) - 1): with tf.variable_scope("layer{}".format(i)): layer = full_connect_relu( layer, [model_network_hidden_units[i], model_network_hidden_units[i + 1]], [model_network_hidden_units[i + 1]], is_train) with tf.variable_scope("output"): layer = full_connect(layer, [model_network_hidden_units[-1], output_units], [output_units], is_train) return layer def lr_inference(inputs, is_train=True): with tf.variable_scope("lr"): layer = full_connect(inputs, [input_units, output_units], [output_units]) return layer def wide_and_deep_inference(inputs, is_train=True): return lr_inference(inputs, is_train) + dnn_inference(inputs, is_train) def cnn_inference(inputs, is_train=True): # TODO: Change if validate_batch_size is different # [BATCH_SIZE, 512 * 512 * 1] -> [BATCH_SIZE, 512, 512, 1] inputs = tf.reshape(inputs, [FLAGS.batch_size, 512, 512, 1]) # [BATCH_SIZE, 512, 512, 1] -> [BATCH_SIZE, 128, 128, 8] with tf.variable_scope("conv0"): weights = tf.get_variable("weights", [3, 3, 1, 8], initializer=tf.random_normal_initializer()) bias = tf.get_variable("bias", [8], initializer=tf.random_normal_initializer()) layer = tf.nn.conv2d(inputs, weights, strides=[1, 1, 1, 1], padding="SAME") layer = tf.nn.bias_add(layer, bias) layer = tf.nn.relu(layer) layer = tf.nn.max_pool(layer, ksize=[1, 4, 4, 1], strides=[1, 4, 4, 1], padding="SAME") # [BATCH_SIZE, 128, 128, 8] -> [BATCH_SIZE, 32, 32, 8] with tf.variable_scope("conv1"): weights = tf.get_variable("weights", [3, 3, 8, 8], initializer=tf.random_normal_initializer()) bias = tf.get_variable("bias", [8], initializer=tf.random_normal_initializer()) layer = tf.nn.conv2d(layer, weights, strides=[1, 1, 1, 1], padding="SAME") layer = tf.nn.bias_add(layer, bias) layer = tf.nn.relu(layer) layer = tf.nn.max_pool(layer, ksize=[1, 4, 4, 1], strides=[1, 4, 4, 1], padding="SAME") # [BATCH_SIZE, 32, 32, 8] -> [BATCH_SIZE, 8, 8, 8] with tf.variable_scope("conv2"): weights = tf.get_variable("weights", [3, 3, 8, 8], initializer=tf.random_normal_initializer()) bias = tf.get_variable("bias", [8], initializer=tf.random_normal_initializer()) layer = tf.nn.conv2d(layer, weights, strides=[1, 1, 1, 1], padding="SAME") layer = tf.nn.bias_add(layer, bias) layer = tf.nn.relu(layer) layer = tf.nn.max_pool(layer, ksize=[1, 4, 4, 1], strides=[1, 4, 4, 1], padding="SAME") # [BATCH_SIZE, 8, 8, 8] -> [BATCH_SIZE, 8 * 8 * 8] layer = tf.reshape(layer, [-1, 8 * 8 * 8]) # [BATCH_SIZE, 8 * 8 * 8] -> [BATCH_SIZE, LABEL_SIZE] with tf.variable_scope("output"): weights = tf.get_variable("weights", [8 * 8 * 8, LABEL_SIZE], initializer=tf.random_normal_initializer()) bias = tf.get_variable("bias", [LABEL_SIZE], initializer=tf.random_normal_initializer()) layer = tf.add(tf.matmul(layer, weights), bias) return layer def inference(inputs, is_train=True): if MODEL == "dnn": return dnn_inference(inputs, is_train) elif MODEL == "lr": return lr_inference(inputs, is_train) elif MODEL == "wide_and_deep": return wide_and_deep_inference(inputs, is_train) elif MODEL == "customized": return customized_inference(inputs, is_train) elif MODEL == "cnn": return cnn_inference(inputs, is_train) else: logging.error("Unknown model, exit now") exit(1) logging.info("Use the model: {}, model network: {}".format( MODEL, FLAGS.model_network)) logits = inference(batch_features, True) batch_labels = tf.to_int64(batch_labels) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=batch_labels) loss = tf.reduce_mean(cross_entropy, name="loss") global_step = tf.Variable(0, name="global_step", trainable=False) if FLAGS.enable_lr_decay: logging.info("Enable learning rate decay rate: {}".format( FLAGS.lr_decay_rate)) starter_learning_rate = FLAGS.learning_rate learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step, 100000, FLAGS.lr_decay_rate, staircase=True) else: learning_rate = FLAGS.learning_rate optimizer = get_optimizer(FLAGS.optimizer, learning_rate) train_op = optimizer.minimize(loss, global_step=global_step) tf.get_variable_scope().reuse_variables() # Define accuracy op for train data train_accuracy_logits = inference(batch_features, False) train_softmax = tf.nn.softmax(train_accuracy_logits) train_correct_prediction = tf.equal( tf.argmax(train_softmax, 1), batch_labels) train_accuracy = tf.reduce_mean(tf.cast(train_correct_prediction, tf.float32)) # Define auc op for train data batch_labels = tf.cast(batch_labels, tf.int32) sparse_labels = tf.reshape(batch_labels, [-1, 1]) derived_size = tf.shape(batch_labels)[0] indices = tf.reshape(tf.range(0, derived_size, 1), [-1, 1]) concated = tf.concat(axis=1, values=[indices, sparse_labels]) outshape = tf.stack([derived_size, LABEL_SIZE]) new_batch_labels = tf.sparse_to_dense(concated, outshape, 1.0, 0.0) _, train_auc = tf.contrib.metrics.streaming_auc(train_softmax, new_batch_labels) # Define accuracy op for validate data validate_accuracy_logits = inference(validate_batch_features, False) validate_softmax = tf.nn.softmax(validate_accuracy_logits) validate_batch_labels = tf.to_int64(validate_batch_labels) validate_correct_prediction = tf.equal( tf.argmax(validate_softmax, 1), validate_batch_labels) validate_accuracy = tf.reduce_mean(tf.cast(validate_correct_prediction, tf.float32)) # Define auc op for validate data validate_batch_labels = tf.cast(validate_batch_labels, tf.int32) sparse_labels = tf.reshape(validate_batch_labels, [-1, 1]) derived_size = tf.shape(validate_batch_labels)[0] indices = tf.reshape(tf.range(0, derived_size, 1), [-1, 1]) concated = tf.concat(axis=1, values=[indices, sparse_labels]) outshape = tf.stack([derived_size, LABEL_SIZE]) new_validate_batch_labels = tf.sparse_to_dense(concated, outshape, 1.0, 0.0) _, validate_auc = tf.contrib.metrics.streaming_auc(validate_softmax, new_validate_batch_labels) # Define inference op inference_features = tf.placeholder("float", [None, FEATURE_SIZE]) inference_logits = inference(inference_features, False) inference_softmax = tf.nn.softmax(inference_logits) inference_op = tf.argmax(inference_softmax, 1) keys_placeholder = tf.placeholder(tf.int32, shape=[None, 1]) keys = tf.identity(keys_placeholder) model_signature = { "inputs": exporter.generic_signature({"keys": keys_placeholder, "features": inference_features}), "outputs": exporter.generic_signature({"keys": keys, "softmax": inference_softmax, "prediction": inference_op}) } # Initialize saver and summary saver = tf.train.Saver() tf.summary.scalar("loss", loss) tf.summary.scalar("train_accuracy", train_accuracy) tf.summary.scalar("train_auc", train_auc) tf.summary.scalar("validate_accuracy", validate_accuracy) tf.summary.scalar("validate_auc", validate_auc) summary_op = tf.summary.merge_all() init_op = [tf.global_variables_initializer(), tf.local_variables_initializer()] # Create session to run with tf.Session() as sess: logging.info("Start to run with mode: {}".format(MODE)) writer = tf.summary.FileWriter(OUTPUT_PATH, sess.graph) sess.run(init_op) if MODE == "train": # Restore session and start queue runner restore_session_from_checkpoint(sess, saver, LATEST_CHECKPOINT) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord, sess=sess) start_time = datetime.datetime.now() try: while not coord.should_stop(): _, loss_value, step = sess.run([train_op, loss, global_step]) # Print state while training if step % FLAGS.steps_to_validate == 0: train_accuracy_value, train_auc_value, validate_accuracy_value, validate_auc_value, summary_value = sess.run( [train_accuracy, train_auc, validate_accuracy, validate_auc, summary_op]) end_time = datetime.datetime.now() logging.info( "[{}] Step: {}, loss: {}, train_acc: {}, train_auc: {}, valid_acc: {}, valid_auc: {}".format( end_time - start_time, step, loss_value, train_accuracy_value, train_auc_value, validate_accuracy_value, validate_auc_value)) writer.add_summary(summary_value, step) saver.save(sess, CHECKPOINT_FILE, global_step=step) start_time = end_time except tf.errors.OutOfRangeError: # Export the model after training export_model(sess, saver, model_signature, FLAGS.model_path, FLAGS.model_version) finally: coord.request_stop() coord.join(threads) elif MODE == "export": if not restore_session_from_checkpoint(sess, saver, LATEST_CHECKPOINT): logging.error("No checkpoint found, exit now") exit(1) # Export the model export_model(sess, saver, model_signature, FLAGS.model_path, FLAGS.model_version) elif MODE == "savedmodel": if not restore_session_from_checkpoint(sess, saver, LATEST_CHECKPOINT): logging.error("No checkpoint found, exit now") exit(1) logging.info("Export the saved model to {}".format( FLAGS.saved_model_path)) export_path_base = FLAGS.saved_model_path export_path = os.path.join( compat.as_bytes(export_path_base), compat.as_bytes(str(FLAGS.model_version))) model_signature = signature_def_utils.build_signature_def( inputs={ "keys": utils.build_tensor_info(keys_placeholder), "features": utils.build_tensor_info(inference_features) }, outputs={ "keys": utils.build_tensor_info(keys), "softmax": utils.build_tensor_info(inference_softmax), "prediction": utils.build_tensor_info(inference_op) }, method_name=signature_constants.PREDICT_METHOD_NAME) try: builder = saved_model_builder.SavedModelBuilder(export_path) builder.add_meta_graph_and_variables( sess, [tag_constants.SERVING], clear_devices=True, signature_def_map={ signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: model_signature, }, #legacy_init_op=legacy_init_op) legacy_init_op=tf.group(tf.initialize_all_tables(), name="legacy_init_op")) builder.save() except Exception as e: logging.error("Fail to export saved model, exception: {}".format(e)) elif MODE == "inference": if not restore_session_from_checkpoint(sess, saver, LATEST_CHECKPOINT): logging.error("No checkpoint found, exit now") exit(1) # Load inference test data inference_result_file_name = FLAGS.inference_result_file inference_test_file_name = FLAGS.inference_test_file inference_data = np.genfromtxt(inference_test_file_name, delimiter=",") inference_data_features = inference_data[:, 0:9] inference_data_labels = inference_data[:, 9] # Run inference start_time = datetime.datetime.now() prediction, prediction_softmax = sess.run( [inference_op, inference_softmax], feed_dict={inference_features: inference_data_features}) end_time = datetime.datetime.now() # Compute accuracy label_number = len(inference_data_labels) correct_label_number = 0 for i in range(label_number): if inference_data_labels[i] == prediction[i]: correct_label_number += 1 accuracy = float(correct_label_number) / label_number # Compute auc y_true = np.array(inference_data_labels) y_score = prediction_softmax[:, 1] fpr, tpr, thresholds = metrics.roc_curve(y_true, y_score, pos_label=1) auc = metrics.auc(fpr, tpr) logging.info("[{}] Inference accuracy: {}, auc: {}".format( end_time - start_time, accuracy, auc)) # Save result into the file np.savetxt(inference_result_file_name, prediction_softmax, delimiter=",") logging.info("Save result to file: {}".format( inference_result_file_name))
def save_model(): with tf.Graph().as_default(): # definimos placeholders _images = tf.placeholder(tf.float32, shape=[None, FLAGS.image_height, FLAGS.image_width, 3]) # Inference. logits = reconobook_modelo.inference(_images) # clase = tf.argmax(logits, 1) values, indices = tf.nn.top_k(logits, 10) prediction_classes = tf.contrib.lookup.index_to_string( tf.to_int64(indices), mapping=tf.constant([str(i) for i in range(10)])) with tf.Session() as sess: # Cargar modelo ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir) variable_averages = tf.train.ExponentialMovingAverage(FLAGS.moving_average_decay) variables_to_restore = variable_averages.variables_to_restore() saver = tf.train.Saver(variables_to_restore) saver.restore(sess, ckpt.model_checkpoint_path) # Definimos la ruta donde se guardará el modelo export_path = os.path.join( compat.as_bytes(FLAGS.export_model_dir), compat.as_bytes(str(FLAGS.model_version))) # creamos el directorio de export si no existe, y si existe lo borramos y creamos de nuevo if os.path.exists(export_path): shutil.rmtree(export_path) print('Exportando modelo a %s' % export_path) # Creamos el "builder" builder = saved_model_builder.SavedModelBuilder(export_path) # Build the signature_def_map. classification_inputs = utils.build_tensor_info(_images) classification_outputs_classes = utils.build_tensor_info(prediction_classes) classification_outputs_scores = utils.build_tensor_info(values) classification_signature = signature_def_utils.build_signature_def( inputs={signature_constants.CLASSIFY_INPUTS: classification_inputs}, outputs={ signature_constants.CLASSIFY_OUTPUT_CLASSES: classification_outputs_classes, signature_constants.CLASSIFY_OUTPUT_SCORES: classification_outputs_scores }, method_name=signature_constants.CLASSIFY_METHOD_NAME) tensor_info_x = utils.build_tensor_info(_images) tensor_info_y = utils.build_tensor_info(logits) prediction_signature = signature_def_utils.build_signature_def( inputs={'images': tensor_info_x}, outputs={'scores': tensor_info_y}, method_name=signature_constants.PREDICT_METHOD_NAME) legacy_init_op = tf.group(tf.initialize_all_tables(), name='legacy_init_op') builder.add_meta_graph_and_variables( sess, [tag_constants.SERVING], signature_def_map={ 'predict_images': prediction_signature, signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: classification_signature, }, legacy_init_op=legacy_init_op) builder.save() print('Modelo exportado')
def main(): if os.path.exists(FLAGS.checkpoint_path) == False: os.makedirs(FLAGS.checkpoint_path) checkpoint_file_path = FLAGS.checkpoint_path + "/checkpoint.ckpt" latest_checkpoint_file_path = tf.train.latest_checkpoint( FLAGS.checkpoint_path) if os.path.exists(FLAGS.output_path) == False: os.makedirs(FLAGS.output_path) # Step 1: Construct the dataset op epoch_number = FLAGS.epoch_number if epoch_number <= 0: epoch_number = -1 train_buffer_size = FLAGS.train_batch_size * 3 validation_buffer_size = FLAGS.train_batch_size * 3 train_filename_list = [filename for filename in FLAGS.train_files.split(",")] train_filename_placeholder = tf.placeholder(tf.string, shape=[None]) train_dataset = tf.data.TFRecordDataset(train_filename_placeholder) train_dataset = train_dataset.map(parse_tfrecords_function).repeat( epoch_number).batch(FLAGS.train_batch_size).shuffle( buffer_size=train_buffer_size) train_dataset_iterator = train_dataset.make_initializable_iterator() batch_labels, batch_ids, batch_values = train_dataset_iterator.get_next() validation_filename_list = [ filename for filename in FLAGS.validation_files.split(",") ] validation_filename_placeholder = tf.placeholder(tf.string, shape=[None]) validation_dataset = tf.data.TFRecordDataset(validation_filename_placeholder) validation_dataset = validation_dataset.map(parse_tfrecords_function).repeat( ).batch(FLAGS.validation_batch_size).shuffle( buffer_size=validation_buffer_size) validation_dataset_iterator = validation_dataset.make_initializable_iterator( ) validation_labels, validation_ids, validation_values = validation_dataset_iterator.get_next( ) # Define the model logits = inference(batch_ids, batch_values, True) batch_labels = tf.to_int64(batch_labels) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=batch_labels) loss = tf.reduce_mean(cross_entropy, name="loss") global_step = tf.Variable(0, name="global_step", trainable=False) if FLAGS.enable_lr_decay: logging.info( "Enable learning rate decay rate: {}".format(FLAGS.lr_decay_rate)) starter_learning_rate = FLAGS.learning_rate learning_rate = tf.train.exponential_decay( starter_learning_rate, global_step, 100000, FLAGS.lr_decay_rate, staircase=True) else: learning_rate = FLAGS.learning_rate optimizer = util.get_optimizer_by_name(FLAGS.optimizer, learning_rate) train_op = optimizer.minimize(loss, global_step=global_step) tf.get_variable_scope().reuse_variables() # Define accuracy op for train data train_accuracy_logits = inference(batch_ids, batch_values, False) train_softmax = tf.nn.softmax(train_accuracy_logits) train_correct_prediction = tf.equal( tf.argmax(train_softmax, 1), batch_labels) train_accuracy = tf.reduce_mean( tf.cast(train_correct_prediction, tf.float32)) # Define auc op for train data batch_labels = tf.cast(batch_labels, tf.int32) sparse_labels = tf.reshape(batch_labels, [-1, 1]) derived_size = tf.shape(batch_labels)[0] indices = tf.reshape(tf.range(0, derived_size, 1), [-1, 1]) concated = tf.concat(axis=1, values=[indices, sparse_labels]) outshape = tf.stack([derived_size, FLAGS.label_size]) new_train_batch_labels = tf.sparse_to_dense(concated, outshape, 1.0, 0.0) _, train_auc = tf.contrib.metrics.streaming_auc(train_softmax, new_train_batch_labels) # Define accuracy op for validate data validate_accuracy_logits = inference(validation_ids, validation_values, False) validate_softmax = tf.nn.softmax(validate_accuracy_logits) validate_batch_labels = tf.to_int64(validation_labels) validate_correct_prediction = tf.equal( tf.argmax(validate_softmax, 1), validate_batch_labels) validate_accuracy = tf.reduce_mean( tf.cast(validate_correct_prediction, tf.float32)) # Define auc op for validate data validate_batch_labels = tf.cast(validate_batch_labels, tf.int32) sparse_labels = tf.reshape(validate_batch_labels, [-1, 1]) derived_size = tf.shape(validate_batch_labels)[0] indices = tf.reshape(tf.range(0, derived_size, 1), [-1, 1]) concated = tf.concat(axis=1, values=[indices, sparse_labels]) outshape = tf.stack([derived_size, FLAGS.label_size]) new_validate_batch_labels = tf.sparse_to_dense(concated, outshape, 1.0, 0.0) _, validate_auc = tf.contrib.metrics.streaming_auc(validate_softmax, new_validate_batch_labels) # Define inference op sparse_index = tf.placeholder(tf.int64, [None, 2]) sparse_ids = tf.placeholder(tf.int64, [None]) sparse_values = tf.placeholder(tf.float32, [None]) sparse_shape = tf.placeholder(tf.int64, [2]) inference_ids = tf.SparseTensor(sparse_index, sparse_ids, sparse_shape) inference_values = tf.SparseTensor(sparse_index, sparse_values, sparse_shape) inference_logits = inference(inference_ids, inference_values, False) inference_softmax = tf.nn.softmax(inference_logits) inference_op = tf.argmax(inference_softmax, 1) keys_placeholder = tf.placeholder(tf.int32, shape=[None, 1]) keys = tf.identity(keys_placeholder) signature_def_map = { signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature_def_utils.build_signature_def( inputs={ "keys": utils.build_tensor_info(keys_placeholder), "indexs": utils.build_tensor_info(sparse_index), "ids": utils.build_tensor_info(sparse_ids), "values": utils.build_tensor_info(sparse_values), "shape": utils.build_tensor_info(sparse_shape) }, outputs={ "keys": utils.build_tensor_info(keys), "softmax": utils.build_tensor_info(inference_softmax), "prediction": utils.build_tensor_info(inference_op) }, method_name=signature_constants.PREDICT_METHOD_NAME) } # Initialize saver and summary saver = tf.train.Saver() tf.summary.scalar("loss", loss) tf.summary.scalar("train_accuracy", train_accuracy) tf.summary.scalar("train_auc", train_auc) tf.summary.scalar("validate_accuracy", validate_accuracy) tf.summary.scalar("validate_auc", validate_auc) summary_op = tf.summary.merge_all() init_op = [ tf.global_variables_initializer(), tf.local_variables_initializer() ] # Create session to run with tf.Session() as sess: writer = tf.summary.FileWriter(FLAGS.output_path, sess.graph) sess.run(init_op) sess.run( train_dataset_iterator.initializer, feed_dict={train_filename_placeholder: train_filename_list}) sess.run( validation_dataset_iterator.initializer, feed_dict={validation_filename_placeholder: validation_filename_list}) if FLAGS.mode == "train": # Restore session and start queue runner util.restore_from_checkpoint(sess, saver, latest_checkpoint_file_path) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord, sess=sess) start_time = datetime.datetime.now() try: while not coord.should_stop(): if FLAGS.benchmark_mode: sess.run(train_op) else: _, step = sess.run([train_op, global_step]) # Print state while training if step % FLAGS.steps_to_validate == 0: loss_value, train_accuracy_value, train_auc_value, validate_accuracy_value, auc_value, summary_value = sess.run( [ loss, train_accuracy, train_auc, validate_accuracy, validate_auc, summary_op ]) end_time = datetime.datetime.now() logging.info( "[{}] Step: {}, loss: {}, train_acc: {}, train_auc: {}, valid_acc: {}, valid_auc: {}". format(end_time - start_time, step, loss_value, train_accuracy_value, train_auc_value, validate_accuracy_value, auc_value)) writer.add_summary(summary_value, step) saver.save(sess, checkpoint_file_path, global_step=step) start_time = end_time except tf.errors.OutOfRangeError: if FLAGS.benchmark_mode: print("Finish training for benchmark") exit(0) else: # Export the model after training util.save_model( FLAGS.model_path, FLAGS.model_version, sess, signature_def_map, is_save_graph=False) finally: coord.request_stop() coord.join(threads) elif FLAGS.mode == "save_model": if not util.restore_from_checkpoint(sess, saver, latest_checkpoint_file_path): logging.error("No checkpoint found, exit now") exit(1) util.save_model( FLAGS.model_path, FLAGS.model_version, sess, signature_def_map, is_save_graph=False) elif FLAGS.mode == "inference": if not util.restore_from_checkpoint(sess, saver, latest_checkpoint_file_path): logging.error("No checkpoint found, exit now") exit(1) # Load inference test data inference_result_file_name = "./inference_result.txt" inference_test_file_name = "./data/a8a_test.libsvm" labels = [] feature_ids = [] feature_values = [] feature_index = [] ins_num = 0 for line in open(inference_test_file_name, "r"): tokens = line.split(" ") labels.append(int(tokens[0])) feature_num = 0 for feature in tokens[1:]: feature_id, feature_value = feature.split(":") feature_ids.append(int(feature_id)) feature_values.append(float(feature_value)) feature_index.append([ins_num, feature_num]) feature_num += 1 ins_num += 1 # Run inference start_time = datetime.datetime.now() prediction, prediction_softmax = sess.run( [inference_op, inference_softmax], feed_dict={ sparse_index: feature_index, sparse_ids: feature_ids, sparse_values: feature_values, sparse_shape: [ins_num, FLAGS.feature_size] }) end_time = datetime.datetime.now() # Compute accuracy label_number = len(labels) correct_label_number = 0 for i in range(label_number): if labels[i] == prediction[i]: correct_label_number += 1 accuracy = float(correct_label_number) / label_number # Compute auc expected_labels = np.array(labels) predict_labels = prediction_softmax[:, 0] fpr, tpr, thresholds = metrics.roc_curve( expected_labels, predict_labels, pos_label=0) auc = metrics.auc(fpr, tpr) logging.info("[{}] Inference accuracy: {}, auc: {}".format( end_time - start_time, accuracy, auc)) # Save result into the file np.savetxt(inference_result_file_name, prediction_softmax, delimiter=",") logging.info( "Save result to file: {}".format(inference_result_file_name)) elif FLAGS.mode == "inference_with_tfrecords": if not util.restore_from_checkpoint(sess, saver, latest_checkpoint_file_path): logging.error("No checkpoint found, exit now") exit(1) # Load inference test data inference_result_file_name = "./inference_result.txt" inference_test_file_name = "./data/a8a/a8a_test.libsvm.tfrecords" batch_feature_index = [] batch_labels = [] batch_ids = [] batch_values = [] ins_num = 0 # Read from TFRecords files for serialized_example in tf.python_io.tf_record_iterator( inference_test_file_name): # Get serialized example from file example = tf.train.Example() example.ParseFromString(serialized_example) label = example.features.feature["label"].float_list.value ids = example.features.feature["ids"].int64_list.value values = example.features.feature["values"].float_list.value #print("label: {}, features: {}".format(label, " ".join([str(id) + ":" + str(value) for id, value in zip(ids, values)]))) batch_labels.append(label) # Notice that using extend() instead of append() to flatten the values batch_ids.extend(ids) batch_values.extend(values) for i in xrange(len(ids)): batch_feature_index.append([ins_num, i]) ins_num += 1 # Run inference start_time = datetime.datetime.now() prediction, prediction_softmax = sess.run( [inference_op, inference_softmax], feed_dict={ sparse_index: batch_feature_index, sparse_ids: batch_ids, sparse_values: batch_values, sparse_shape: [ins_num, FLAGS.feature_size] }) end_time = datetime.datetime.now() # Compute accuracy label_number = len(batch_labels) correct_label_number = 0 for i in range(label_number): if batch_labels[i] == prediction[i]: correct_label_number += 1 accuracy = float(correct_label_number) / label_number # Compute auc expected_labels = np.array(batch_labels) predict_labels = prediction_softmax[:, 0] fpr, tpr, thresholds = metrics.roc_curve( expected_labels, predict_labels, pos_label=0) auc = metrics.auc(fpr, tpr) logging.info("[{}] Inference accuracy: {}, auc: {}".format( end_time - start_time, accuracy, auc)) # Save result into the file np.savetxt(inference_result_file_name, prediction_softmax, delimiter=",") logging.info( "Save result to file: {}".format(inference_result_file_name))
def main(_): if len(sys.argv) < 2 or sys.argv[-1].startswith('-'): print('Usage: mnist_export.py [--training_iteration=x] ' '[--model_version=y] export_dir') sys.exit(-1) if FLAGS.training_iteration <= 0: print 'Please specify a positive value for training iteration.' sys.exit(-1) if FLAGS.model_version <= 0: print 'Please specify a positive value for version number.' sys.exit(-1) # Train model print 'Training model...' mnist = mnist_input_data.read_data_sets(FLAGS.work_dir, one_hot=True) sess = tf.InteractiveSession() serialized_tf_example = tf.placeholder(tf.string, name='tf_example') feature_configs = {'x': tf.FixedLenFeature(shape=[784], dtype=tf.float32),} tf_example = tf.parse_example(serialized_tf_example, feature_configs) x = tf.identity(tf_example['x'], name='x') # use tf.identity() to assign name y_ = tf.placeholder('float', shape=[None, 10]) w = tf.Variable(tf.zeros([784, 10])) b = tf.Variable(tf.zeros([10])) sess.run(tf.initialize_all_variables()) y = tf.nn.softmax(tf.matmul(x, w) + b, name='y') cross_entropy = -tf.reduce_sum(y_ * tf.log(y)) train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy) values, indices = tf.nn.top_k(y, 10) prediction_classes = tf.contrib.lookup.index_to_string( tf.to_int64(indices), mapping=tf.constant([str(i) for i in xrange(10)])) for _ in range(FLAGS.training_iteration): batch = mnist.train.next_batch(50) train_step.run(feed_dict={x: batch[0], y_: batch[1]}) correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float')) print 'training accuracy %g' % sess.run( accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}) print 'Done training!' # Export model # WARNING(break-tutorial-inline-code): The following code snippet is # in-lined in tutorials, please update tutorial documents accordingly # whenever code changes. export_path_base = sys.argv[-1] export_path = os.path.join( compat.as_bytes(export_path_base), compat.as_bytes(str(FLAGS.model_version))) print 'Exporting trained model to', export_path builder = saved_model_builder.SavedModelBuilder(export_path) # Build the signature_def_map. classification_inputs = utils.build_tensor_info(serialized_tf_example) classification_outputs_classes = utils.build_tensor_info(prediction_classes) classification_outputs_scores = utils.build_tensor_info(values) classification_signature = signature_def_utils.build_signature_def( inputs={signature_constants.CLASSIFY_INPUTS: classification_inputs}, outputs={ signature_constants.CLASSIFY_OUTPUT_CLASSES: classification_outputs_classes, signature_constants.CLASSIFY_OUTPUT_SCORES: classification_outputs_scores }, method_name=signature_constants.CLASSIFY_METHOD_NAME) tensor_info_x = utils.build_tensor_info(x) tensor_info_y = utils.build_tensor_info(y) prediction_signature = signature_def_utils.build_signature_def( inputs={'images': tensor_info_x}, outputs={'scores': tensor_info_y}, method_name=signature_constants.PREDICT_METHOD_NAME) legacy_init_op = tf.group(tf.initialize_all_tables(), name='legacy_init_op') builder.add_meta_graph_and_variables( sess, [tag_constants.SERVING], signature_def_map={ 'predict_images': prediction_signature, signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: classification_signature, }, legacy_init_op=legacy_init_op) builder.save() print 'Done exporting!'
def export_fn(estimator, export_dir_base, checkpoint_path=None, eval_result=None): with ops.Graph().as_default() as g: contrib_variables.create_global_step(g) input_ops = feature_transforms.build_csv_serving_tensors_for_training_step( args.analysis, features, schema, stats, keep_target) model_fn_ops = estimator._call_model_fn(input_ops.features, None, model_fn_lib.ModeKeys.INFER) output_fetch_tensors = make_prediction_output_tensors( args=args, features=features, input_ops=input_ops, model_fn_ops=model_fn_ops, keep_target=keep_target) # Don't use signature_def_utils.predict_signature_def as that renames # tensor names if there is only 1 input/output tensor! signature_inputs = {key: tf.saved_model.utils.build_tensor_info(tensor) for key, tensor in six.iteritems(input_ops.default_inputs)} signature_outputs = {key: tf.saved_model.utils.build_tensor_info(tensor) for key, tensor in six.iteritems(output_fetch_tensors)} signature_def_map = { 'serving_default': signature_def_utils.build_signature_def( signature_inputs, signature_outputs, tf.saved_model.signature_constants.PREDICT_METHOD_NAME)} if not checkpoint_path: # Locate the latest checkpoint checkpoint_path = saver.latest_checkpoint(estimator._model_dir) if not checkpoint_path: raise ValueError("Couldn't find trained model at %s." % estimator._model_dir) export_dir = saved_model_export_utils.get_timestamped_export_dir( export_dir_base) if (model_fn_ops.scaffold is not None and model_fn_ops.scaffold.saver is not None): saver_for_restore = model_fn_ops.scaffold.saver else: saver_for_restore = saver.Saver(sharded=True) with tf_session.Session('') as session: saver_for_restore.restore(session, checkpoint_path) init_op = control_flow_ops.group( variables.local_variables_initializer(), resources.initialize_resources(resources.shared_resources()), tf.tables_initializer()) # Perform the export builder = saved_model_builder.SavedModelBuilder(export_dir) builder.add_meta_graph_and_variables( session, [tag_constants.SERVING], signature_def_map=signature_def_map, assets_collection=ops.get_collection( ops.GraphKeys.ASSET_FILEPATHS), legacy_init_op=init_op) builder.save(False) # Add the extra assets if assets_extra: assets_extra_path = os.path.join(compat.as_bytes(export_dir), compat.as_bytes('assets.extra')) for dest_relative, source in assets_extra.items(): dest_absolute = os.path.join(compat.as_bytes(assets_extra_path), compat.as_bytes(dest_relative)) dest_path = os.path.dirname(dest_absolute) file_io.recursive_create_dir(dest_path) file_io.copy(source, dest_absolute) # only keep the last 3 models saved_model_export_utils.garbage_collect_exports( export_dir_base, exports_to_keep=3) # save the last model to the model folder. # export_dir_base = A/B/intermediate_models/ if keep_target: final_dir = os.path.join(args.job_dir, 'evaluation_model') else: final_dir = os.path.join(args.job_dir, 'model') if file_io.is_directory(final_dir): file_io.delete_recursively(final_dir) file_io.recursive_create_dir(final_dir) recursive_copy(export_dir, final_dir) return export_dir
def main(): """ Train the TensorFlow models. """ # Get hyper-parameters if os.path.exists(FLAGS.checkpoint_path) == False: os.makedirs(FLAGS.checkpoint_path) checkpoint_file_path = FLAGS.checkpoint_path + "/checkpoint.ckpt" latest_checkpoint_file_path = tf.train.latest_checkpoint( FLAGS.checkpoint_path) if os.path.exists(FLAGS.output_path) == False: os.makedirs(FLAGS.output_path) # Step 1: Construct the dataset op epoch_number = FLAGS.epoch_number if epoch_number <= 0: epoch_number = -1 train_buffer_size = FLAGS.train_batch_size * 3 validation_buffer_size = FLAGS.train_batch_size * 3 train_filename_list = [filename for filename in FLAGS.train_files.split(",")] train_filename_placeholder = tf.placeholder(tf.string, shape=[None]) if FLAGS.file_format == "tfrecords": train_dataset = tf.data.TFRecordDataset(train_filename_placeholder) train_dataset = train_dataset.map(parse_tfrecords_function).repeat( epoch_number).batch(FLAGS.train_batch_size).shuffle( buffer_size=train_buffer_size) elif FLAGS.file_format == "csv": # Skip the header or not train_dataset = tf.data.TextLineDataset(train_filename_placeholder) train_dataset = train_dataset.map(parse_csv_function).repeat( epoch_number).batch(FLAGS.train_batch_size).shuffle( buffer_size=train_buffer_size) train_dataset_iterator = train_dataset.make_initializable_iterator() train_features_op, train_label_op = train_dataset_iterator.get_next() validation_filename_list = [ filename for filename in FLAGS.validation_files.split(",") ] validation_filename_placeholder = tf.placeholder(tf.string, shape=[None]) if FLAGS.file_format == "tfrecords": validation_dataset = tf.data.TFRecordDataset( validation_filename_placeholder) validation_dataset = validation_dataset.map( parse_tfrecords_function).repeat(epoch_number).batch( FLAGS.validation_batch_size).shuffle( buffer_size=validation_buffer_size) elif FLAGS.file_format == "csv": validation_dataset = tf.data.TextLineDataset( validation_filename_placeholder) validation_dataset = validation_dataset.map(parse_csv_function).repeat( epoch_number).batch(FLAGS.validation_batch_size).shuffle( buffer_size=validation_buffer_size) validation_dataset_iterator = validation_dataset.make_initializable_iterator( ) validation_features_op, validation_label_op = validation_dataset_iterator.get_next( ) # Step 2: Define the model input_units = FLAGS.feature_size output_units = FLAGS.label_size logits = inference(train_features_op, input_units, output_units, True) if FLAGS.loss == "sparse_cross_entropy": cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=train_label_op) loss = tf.reduce_mean(cross_entropy, name="loss") elif FLAGS.loss == "cross_entropy": cross_entropy = tf.nn.cross_entropy_with_logits( logits=logits, labels=train_label_op) loss = tf.reduce_mean(cross_entropy, name="loss") elif FLAGS.loss == "mean_square": msl = tf.square(logits - train_label_op, name="msl") loss = tf.reduce_mean(msl, name="loss") global_step = tf.Variable(0, name="global_step", trainable=False) learning_rate = FLAGS.learning_rate if FLAGS.enable_lr_decay: logging.info( "Enable learning rate decay rate: {}".format(FLAGS.lr_decay_rate)) starter_learning_rate = FLAGS.learning_rate learning_rate = tf.train.exponential_decay( starter_learning_rate, global_step, 100000, FLAGS.lr_decay_rate, staircase=True) optimizer = util.get_optimizer_by_name(FLAGS.optimizer, learning_rate) train_op = optimizer.minimize(loss, global_step=global_step) # Need to re-use the Variables for training and validation tf.get_variable_scope().reuse_variables() # Define accuracy op and auc op for train train_accuracy_logits = inference(train_features_op, input_units, output_units, False) train_softmax_op, train_accuracy_op = model.compute_softmax_and_accuracy( train_accuracy_logits, train_label_op) train_auc_op = model.compute_auc(train_softmax_op, train_label_op, FLAGS.label_size) # Define accuracy op and auc op for validation validation_accuracy_logits = inference(validation_features_op, input_units, output_units, False) validation_softmax_op, validation_accuracy_op = model.compute_softmax_and_accuracy( validation_accuracy_logits, validation_label_op) validation_auc_op = model.compute_auc(validation_softmax_op, validation_label_op, FLAGS.label_size) # Define inference op inference_features = tf.placeholder( "float", [None, FLAGS.feature_size], name="features") inference_logits = inference(inference_features, input_units, output_units, False) inference_softmax_op = tf.nn.softmax( inference_logits, name="inference_softmax") inference_prediction_op = tf.argmax( inference_softmax_op, 1, name="inference_prediction") keys_placeholder = tf.placeholder(tf.int32, shape=[None, 1], name="keys") keys_identity = tf.identity(keys_placeholder, name="inference_keys") signature_def_map = { signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature_def_utils.build_signature_def( inputs={ "keys": utils.build_tensor_info(keys_placeholder), "features": utils.build_tensor_info(inference_features) }, outputs={ "keys": utils.build_tensor_info(keys_identity), "prediction": utils.build_tensor_info(inference_prediction_op), }, method_name="tensorflow/serving/predictss"), "serving_detail": signature_def_utils.build_signature_def( inputs={ "keys": utils.build_tensor_info(keys_placeholder), "features": utils.build_tensor_info(inference_features) }, outputs={ "keys": utils.build_tensor_info(keys_identity), "prediction": utils.build_tensor_info(inference_prediction_op), "softmax": utils.build_tensor_info(inference_softmax_op), }, method_name="sdfas") } # Initialize saver and summary saver = tf.train.Saver() tf.summary.scalar("loss", loss) if FLAGS.scenario == "classification": tf.summary.scalar("train_accuracy", train_accuracy_op) tf.summary.scalar("train_auc", train_auc_op) tf.summary.scalar("validate_accuracy", validation_accuracy_op) tf.summary.scalar("validate_auc", validation_auc_op) summary_op = tf.summary.merge_all() init_op = [ tf.global_variables_initializer(), tf.local_variables_initializer() ] # Step 3: Create session to run with tf.Session() as sess: writer = tf.summary.FileWriter(FLAGS.output_path, sess.graph) sess.run(init_op) sess.run( [ train_dataset_iterator.initializer, validation_dataset_iterator.initializer ], feed_dict={ train_filename_placeholder: train_filename_list, validation_filename_placeholder: validation_filename_list }) if FLAGS.mode == "train": if FLAGS.resume_from_checkpoint: util.restore_from_checkpoint(sess, saver, latest_checkpoint_file_path) try: start_time = datetime.datetime.now() while True: if FLAGS.enable_benchmark: sess.run(train_op) else: _, global_step_value = sess.run([train_op, global_step]) # Step 4: Display training metrics after steps if global_step_value % FLAGS.steps_to_validate == 0: if FLAGS.scenario == "classification": loss_value, train_accuracy_value, train_auc_value, validate_accuracy_value, validate_auc_value, summary_value = sess.run( [ loss, train_accuracy_op, train_auc_op, validation_accuracy_op, validation_auc_op, summary_op ]) end_time = datetime.datetime.now() logging.info( "[{}] Step: {}, loss: {}, train_acc: {}, train_auc: {}, valid_acc: {}, valid_auc: {}". format(end_time - start_time, global_step_value, loss_value, train_accuracy_value, train_auc_value, validate_accuracy_value, validate_auc_value)) elif FLAGS.scenario == "regression": loss_value, summary_value = sess.run([loss, summary_op]) end_time = datetime.datetime.now() logging.info("[{}] Step: {}, loss: {}".format( end_time - start_time, global_step_value, loss_value)) writer.add_summary(summary_value, global_step_value) saver.save( sess, checkpoint_file_path, global_step=global_step_value) start_time = end_time except tf.errors.OutOfRangeError: if FLAGS.enable_benchmark: logging.info("Finish training for benchmark") else: # Step 5: Export the model after training util.save_model( FLAGS.model_path, FLAGS.model_version, sess, signature_def_map, is_save_graph=False) elif FLAGS.mode == "savedmodel": if util.restore_from_checkpoint(sess, saver, latest_checkpoint_file_path) == False: logging.error("No checkpoint for exporting model, exit now") return util.save_model( FLAGS.model_path, FLAGS.model_version, sess, signature_def_map, is_save_graph=False) elif FLAGS.mode == "inference": if util.restore_from_checkpoint(sess, saver, latest_checkpoint_file_path) == False: logging.error("No checkpoint for inference, exit now") return # Load test data inference_result_file_name = FLAGS.inference_result_file inference_test_file_name = FLAGS.inference_data_file inference_data = np.genfromtxt(inference_test_file_name, delimiter=",") inference_data_features = inference_data[:, 0:9] inference_data_labels = inference_data[:, 9] # Run inference start_time = datetime.datetime.now() prediction, prediction_softmax = sess.run( [inference_prediction_op, inference_softmax_op], feed_dict={inference_features: inference_data_features}) end_time = datetime.datetime.now() # Compute accuracy label_number = len(inference_data_labels) correct_label_number = 0 for i in range(label_number): if inference_data_labels[i] == prediction[i]: correct_label_number += 1 accuracy = float(correct_label_number) / label_number # Compute auc y_true = np.array(inference_data_labels) y_score = prediction_softmax[:, 1] fpr, tpr, thresholds = metrics.roc_curve(y_true, y_score, pos_label=1) auc = metrics.auc(fpr, tpr) logging.info("[{}] Inference accuracy: {}, auc: {}".format( end_time - start_time, accuracy, auc)) # Save result into the file np.savetxt(inference_result_file_name, prediction_softmax, delimiter=",") logging.info( "Save result to file: {}".format(inference_result_file_name))
signature_constants, signature_def_utils, tag_constants, utils) from tensorflow.python.util import compat model_path = "model" model_version = 1 keys_placeholder = tf.placeholder(tf.int32, shape=[None, 1], name="keys") keys_identity = tf.identity(keys_placeholder, name="inference_keys") sess = tf.Session() sess.run(tf.global_variables_initializer()) model_signature = signature_def_utils.build_signature_def( inputs={ "keys": utils.build_tensor_info(keys_placeholder), }, outputs={ "keys": utils.build_tensor_info(keys_identity), }, method_name=signature_constants.PREDICT_METHOD_NAME) export_path = os.path.join( compat.as_bytes(model_path), compat.as_bytes(str(model_version))) builder = saved_model_builder.SavedModelBuilder(export_path) builder.add_meta_graph_and_variables( sess, [tag_constants.SERVING], clear_devices=True, signature_def_map={ signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: model_signature, })