def test_classify_scores_must_be_float(self): scores = constant_op.constant("1", dtype=dtypes.string, name="output-tensor-1") with self.assertRaisesRegexp( ValueError, "Classification scores must be a float32 Tensor;"): export_output_lib.ClassificationOutput(scores=scores)
def _model_fn_with_x_y(features, labels, mode): _ = labels variables.Variable(1., name='weight') scores = constant_op.constant([3.]) classes = constant_op.constant(['wumpus']) if mode == model_fn_lib.ModeKeys.PREDICT: variables.Variable(36., name='name_collision') return model_fn_lib.EstimatorSpec( mode, predictions=constant_op.constant(10.), export_outputs={ 'test': export_output.ClassificationOutput(scores, classes)}) else: prefix = 'eval_' if mode == model_fn_lib.ModeKeys.EVAL else '' multiplied = math_ops.multiply( features['x'], features['y'], name='{}multiplied'.format(prefix)) metrics = {'mean': metrics_lib.mean(features['x'] - features['y'], name='{}mean'.format(prefix))} variables.Variable(1., name='later_var') variables.Variable(3., name='name_collision') return model_fn_lib.EstimatorSpec( mode, predictions=multiplied, loss=constant_op.constant(1.), train_op=state_ops.assign_add(training.get_global_step(), 1), eval_metric_ops=metrics)
def test_classify_classes_must_be_strings(self): classes = constant_op.constant(1.0, dtype=dtypes.float32, name="output-tensor-1") with self.assertRaisesRegexp( ValueError, "Classification classes must be a string Tensor;"): export_output_lib.ClassificationOutput(classes=classes)
def testAllArgumentsSet(self): """Tests that no errors are raised when all arguments are set.""" with ops.Graph().as_default(), self.cached_session(): loss = constant_op.constant(1.) predictions = {'loss': loss} classes = constant_op.constant('hello') metric_obj = metrics.Mean() metric_obj.update_state(loss) model_fn.EstimatorSpec( mode=ModeKeys.TRAIN, predictions=predictions, loss=loss, train_op=control_flow_ops.no_op(), eval_metric_ops={ 'loss': (control_flow_ops.no_op(), loss), 'mean': metric_obj, }, export_outputs={ 'head_name': export_output.ClassificationOutput(classes=classes) }, training_chief_hooks=[_FakeHook()], training_hooks=[_FakeHook()], scaffold=monitored_session.Scaffold(), evaluation_hooks=[_FakeHook()], prediction_hooks=[_FakeHook()])
def test_build_all_signature_defs_without_receiver_alternatives(self): receiver_tensor = array_ops.placeholder(dtypes.string) output_1 = constant_op.constant([1.]) output_2 = constant_op.constant(["2"]) output_3 = constant_op.constant(["3"]) export_outputs = { signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: export_output.RegressionOutput(value=output_1), "head-2": export_output.ClassificationOutput(classes=output_2), "head-3": export_output.PredictOutput(outputs={ "some_output_3": output_3 }), } signature_defs = export.build_all_signature_defs( receiver_tensor, export_outputs) expected_signature_defs = { "serving_default": signature_def_utils.regression_signature_def(receiver_tensor, output_1), "head-2": signature_def_utils.classification_signature_def(receiver_tensor, output_2, None), "head-3": signature_def_utils.predict_signature_def({ "input": receiver_tensor }, {"some_output_3": output_3}) } self.assertDictEqual(expected_signature_defs, signature_defs)
def test_build_standardized_signature_def_classify_scores_only(self): """Tests classification without classes tensor.""" input_tensors = { "input-1": array_ops.placeholder(dtypes.string, 1, name="input-tensor-1") } scores = array_ops.placeholder(dtypes.float32, 1, name="output-tensor-scores") export_output = export_output_lib.ClassificationOutput(scores=scores) actual_signature_def = export_output.as_signature_def(input_tensors) expected_signature_def = meta_graph_pb2.SignatureDef() shape = tensor_shape_pb2.TensorShapeProto( dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)]) dtype_float = types_pb2.DataType.Value("DT_FLOAT") dtype_string = types_pb2.DataType.Value("DT_STRING") expected_signature_def.inputs[ signature_constants.CLASSIFY_INPUTS].CopyFrom( meta_graph_pb2.TensorInfo(name="input-tensor-1:0", dtype=dtype_string, tensor_shape=shape)) expected_signature_def.outputs[ signature_constants.CLASSIFY_OUTPUT_SCORES].CopyFrom( meta_graph_pb2.TensorInfo(name="output-tensor-scores:0", dtype=dtype_float, tensor_shape=shape)) expected_signature_def.method_name = ( signature_constants.CLASSIFY_METHOD_NAME) self.assertEqual(actual_signature_def, expected_signature_def)
def model_fn(features, labels, mode): _, _ = features, labels return model_fn_lib.EstimatorSpec( mode, loss=constant_op.constant([103]), train_op=state_ops.assign_add(training.get_global_step(), 1), predictions=constant_op.constant([502]), export_outputs={'test': export_output.ClassificationOutput( constant_op.constant([[32.]]))})
def testExportOutputsNoDict(self): with tf.Graph().as_default(), self.cached_session(): predictions = {'loss': tf.constant(1.)} classes = tf.constant('hello') with self.assertRaisesRegexp(TypeError, 'export_outputs must be dict'): model_fn.EstimatorSpec( mode=ModeKeys.PREDICT, predictions=predictions, export_outputs=export_output.ClassificationOutput(classes=classes))
def test_classify_classes_must_be_strings(self): classes = array_ops.placeholder(dtypes.float32, 1, name="output-tensor-1") with self.assertRaises(ValueError) as e: export_output_lib.ClassificationOutput(classes=classes) self.assertEqual( 'Classification classes must be a string Tensor; got ' 'Tensor("output-tensor-1:0", shape=(1,), dtype=float32)', str(e.exception))
def test_classify_scores_must_be_float(self): scores = array_ops.placeholder(dtypes.string, 1, name="output-tensor-1") with self.assertRaises(ValueError) as e: export_output_lib.ClassificationOutput(scores=scores) self.assertEqual( 'Classification scores must be a float32 Tensor; got ' 'Tensor("output-tensor-1:0", shape=(1,), dtype=string)', str(e.exception))
def test_build_all_signature_defs_with_single_alternatives(self): receiver_tensor = array_ops.placeholder(dtypes.string) receiver_tensors_alternative_1 = array_ops.placeholder(dtypes.int64) receiver_tensors_alternative_2 = array_ops.sparse_placeholder( dtypes.float32) # Note we are passing single Tensors as values of # receiver_tensors_alternatives, where normally that is a dict. # In this case a dict will be created using the default receiver tensor # name "input". receiver_tensors_alternatives = {"other1": receiver_tensors_alternative_1, "other2": receiver_tensors_alternative_2} output_1 = constant_op.constant([1.]) output_2 = constant_op.constant(["2"]) output_3 = constant_op.constant(["3"]) export_outputs = { signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: export_output.RegressionOutput(value=output_1), "head-2": export_output.ClassificationOutput(classes=output_2), "head-3": export_output.PredictOutput(outputs={ "some_output_3": output_3 }), } signature_defs = export.build_all_signature_defs( receiver_tensor, export_outputs, receiver_tensors_alternatives) expected_signature_defs = { "serving_default": signature_def_utils.regression_signature_def( receiver_tensor, output_1), "head-2": signature_def_utils.classification_signature_def( receiver_tensor, output_2, None), "head-3": signature_def_utils.predict_signature_def( {"input": receiver_tensor}, {"some_output_3": output_3}), "other1:head-3": signature_def_utils.predict_signature_def( {"input": receiver_tensors_alternative_1}, {"some_output_3": output_3}), "other2:head-3": signature_def_utils.predict_signature_def( {"input": receiver_tensors_alternative_2}, {"some_output_3": output_3}) # Note that the alternatives 'other:serving_default' and 'other:head-2' # are invalid, because regession and classification signatures must take # a single string input. Here we verify that these invalid signatures # are not included in the export. } self.assertDictEqual(expected_signature_defs, signature_defs)
def _predict_spec(tower_specs, aggregation_device): """Populate replicated EstimatorSpec for `GraphKeys.PREDICT`.""" estimator_spec = _asdict(tower_specs[0]) estimator_spec['mode'] = model_fn_lib.ModeKeys.PREDICT with ops_lib.device(aggregation_device): estimator_spec['predictions'] = _concat_tensor_dicts( *[tower_spec.predictions for tower_spec in tower_specs]) export_outputs_dict = _dict_concat( *[tower_spec.export_outputs for tower_spec in tower_specs]) export_outputs = {} for name, export_output_list in six.iteritems(export_outputs_dict): if isinstance(export_output_list[0], export_output_lib.PredictOutput): export_outputs[name] = export_output_lib.PredictOutput( outputs=_concat_tensor_dicts(*[ export_output.outputs for export_output in export_output_list ])) elif isinstance(export_output_list[0], export_output_lib.RegressionOutput): export_outputs[name] = export_output_lib.RegressionOutput( value=array_ops.concat([ export_output.value for export_output in export_output_list ], axis=0)) elif isinstance(export_output_list[0], export_output_lib.ClassificationOutput): scores = None if export_output_list[0].scores is not None: scores = array_ops.concat([ export_output.scores for export_output in export_output_list ], axis=0) classes = None if export_output_list[0].classes is not None: classes = array_ops.stack([ export_output.classes for export_output in export_output_list ], axis=0) export_outputs[name] = export_output_lib.ClassificationOutput( scores=scores, classes=classes) estimator_spec['export_outputs'] = export_outputs return model_fn_lib.EstimatorSpec(**estimator_spec)
def classification_output(scores, n_classes, label_vocabulary=None): batch_size = array_ops.shape(scores)[0] if label_vocabulary: export_class_list = label_vocabulary else: export_class_list = string_ops.as_string(math_ops.range(n_classes)) export_output_classes = array_ops.tile(input=array_ops.expand_dims( input=export_class_list, axis=0), multiples=[batch_size, 1]) return export_output.ClassificationOutput( scores=scores, # `ClassificationOutput` requires string classes. classes=export_output_classes)
def _model_fn_for_export_tests(features, labels, mode): _, _ = features, labels variables.Variable(1., name='weight') scores = constant_op.constant([3.]) classes = constant_op.constant(['wumpus']) update_global_step = state_ops.assign_add(training.get_global_step(), 1) with ops.control_dependencies([update_global_step]): train_op = constant_op.constant(2.) return model_fn_lib.EstimatorSpec( mode, predictions=constant_op.constant(10.), loss=constant_op.constant(1.), train_op=train_op, export_outputs={ 'test': export_output.ClassificationOutput(scores, classes)})
def model_fn(features, mode, params): """网络架构与后处理""" graph = TextCNNGraph(params, features) graph.initial_params() graph.build_epoch_increment() graph.build_forward() predict_id = graph.predict y = graph.y loss = graph.loss num_classes = graph.class_num print(graph.i2l.shape) print(graph.probs.shape) if mode == tf.estimator.ModeKeys.PREDICT: batch_labels = tf.tile(tf.reshape(graph.classes, (1, -1)), [tf.shape(graph.x)[0], 1]) predictions = { "labels": batch_labels, "probs": graph.probs } classification_output = export_output.ClassificationOutput(scores=graph.probs, classes=batch_labels) return tf.estimator.EstimatorSpec(mode, predictions=predictions, export_outputs={ "classification": classification_output } ) else: # metrics metrics = { "precision": precision(y, predict_id, num_classes), "recall": recall(y, predict_id, num_classes), "f1": f1(y, predict_id, num_classes) } for metric_name, op in metrics.items(): tf.summary.scalar(metric_name, op[1]) graph.build_summary() if mode == tf.estimator.ModeKeys.EVAL: return tf.estimator.EstimatorSpec(mode, loss=loss, eval_metric_ops=metrics) elif mode == tf.estimator.ModeKeys.TRAIN: graph.build_optimize() train_op = tf.train.AdamOptimizer().minimize( loss, global_step=tf.train.get_or_create_global_step()) return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
def testExportOutputsMultiheadWithDefault(self): with ops.Graph().as_default(), self.cached_session(): predictions = {'loss': constant_op.constant(1.)} output_1 = constant_op.constant([1.]) output_2 = constant_op.constant(['2']) output_3 = constant_op.constant(['3']) export_outputs = { signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: export_output.RegressionOutput(value=output_1), 'head-2': export_output.ClassificationOutput(classes=output_2), 'head-3': export_output.PredictOutput(outputs={ 'some_output_3': output_3 })} estimator_spec = model_fn.EstimatorSpec( mode=model_fn.ModeKeys.PREDICT, predictions=predictions, export_outputs=export_outputs) self.assertEqual(export_outputs, estimator_spec.export_outputs)
def testExportOutputsMultiheadMissingDefault(self): with ops.Graph().as_default(), self.cached_session(): predictions = {'loss': constant_op.constant(1.)} output_1 = constant_op.constant([1.]) output_2 = constant_op.constant(['2']) output_3 = constant_op.constant(['3']) export_outputs = { 'head-1': export_output.RegressionOutput(value=output_1), 'head-2': export_output.ClassificationOutput(classes=output_2), 'head-3': export_output.PredictOutput(outputs={ 'some_output_3': output_3 })} with self.assertRaisesRegexp( ValueError, 'Multiple export_outputs were provided, but none of them is ' 'specified as the default. Do this by naming one of them with ' 'signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY.'): model_fn.EstimatorSpec( mode=model_fn.ModeKeys.PREDICT, predictions=predictions, export_outputs=export_outputs)
def testExportOutputsMultiheadMissingDefault(self): with tf.Graph().as_default(), self.cached_session(): predictions = {'loss': tf.constant(1.)} output_1 = tf.constant([1.]) output_2 = tf.constant(['2']) output_3 = tf.constant(['3']) export_outputs = { 'head-1': export_output.RegressionOutput(value=output_1), 'head-2': export_output.ClassificationOutput(classes=output_2), 'head-3': export_output.PredictOutput( outputs={'some_output_3': output_3}) } with self.assertRaisesRegexp( ValueError, 'Multiple [`]*export_outputs[`]* were provided'): model_fn.EstimatorSpec(mode=ModeKeys.PREDICT, predictions=predictions, export_outputs=export_outputs)
def classification_output(scores, n_classes, label_vocabulary=None): return export_output.ClassificationOutput( scores=scores, # `ClassificationOutput` requires string classes. classes=all_classes(scores, n_classes, label_vocabulary))
def test_classify_requires_classes_or_scores(self): with self.assertRaises(ValueError) as e: export_output_lib.ClassificationOutput() self.assertEqual("At least one of scores and classes must be set.", str(e.exception))
def host_call(predictions): classes = tf.as_string(predictions, name='classes') classification_output = export_output_lib.ClassificationOutput( classes=classes) export_outputs['classification'] = classification_output
def test_classify_requires_classes_or_scores(self): with self.assertRaisesRegexp( ValueError, "At least one of scores and classes must be set."): export_output_lib.ClassificationOutput()