def _get_eval_estimator_spec(gan_model,
                             gan_loss,
                             get_eval_metric_ops_fn=None,
                             name=None):
  """Return an EstimatorSpec for the eval case."""
  scalar_loss = gan_loss.generator_loss + gan_loss.discriminator_loss
  with ops.name_scope(None, 'metrics',
                      [gan_loss.generator_loss, gan_loss.discriminator_loss]):

    def _summary_key(head_name, val):
      return '%s/%s' % (val, head_name) if head_name else val

    eval_metric_ops = {
        _summary_key(name, 'generator_loss'):
            metrics_lib.mean(gan_loss.generator_loss),
        _summary_key(name, 'discriminator_loss'):
            metrics_lib.mean(gan_loss.discriminator_loss)
    }
    if get_eval_metric_ops_fn is not None:
      custom_eval_metric_ops = get_eval_metric_ops_fn(gan_model)
      if not isinstance(custom_eval_metric_ops, dict):
        raise TypeError('get_eval_metric_ops_fn must return a dict, '
                        'received: {}'.format(custom_eval_metric_ops))
      eval_metric_ops.update(custom_eval_metric_ops)
  return model_fn_lib.EstimatorSpec(
      mode=model_fn_lib.ModeKeys.EVAL,
      predictions=gan_model.generated_data,
      loss=scalar_loss,
      eval_metric_ops=eval_metric_ops)
Пример #2
0
def _convert_keras_metrics_to_estimator(model):
  """Convert metrics from a Keras model to ops used by the Estimator framework.

  Args:
    model: A `tf.keras.Model` object.

  Returns:
    Dictionary mapping metric names to tuples of (value, update) ops. May return
    `None` if the model does not contain any metrics.
  """
  if not getattr(model, 'metrics', None):
    return None

  # TODO(psv/fchollet): support stateful metrics
  eval_metric_ops = {}
  # When each metric maps to an output
  if isinstance(model.metrics, dict):
    for i, output_name in enumerate(model.metrics.keys()):
      metric_name = model.metrics[output_name]
      if callable(metric_name):
        metric_name = metric_name.__name__
      # When some outputs use the same metric
      if list(model.metrics.values()).count(metric_name) > 1:
        metric_name += '_' + output_name
      eval_metric_ops[metric_name] = metrics_module.mean(
          model.metrics_tensors[i - len(model.metrics)])
  else:
    for i, metric_name in enumerate(model.metrics):
      if callable(metric_name):
        metric_name = metric_name.__name__
      eval_metric_ops[metric_name] = metrics_module.mean(
          model.metrics_tensors[i])
  return eval_metric_ops
Пример #3
0
  def model_fn(features, labels, mode):
    """model_fn for keras Estimator."""
    model = _clone_and_build_model(mode, keras_model, custom_objects, features,
                                   labels)
    # Get inputs to EstimatorSpec
    predictions = dict(zip(model.output_names, model.outputs))

    loss = None
    train_op = None
    eval_metric_ops = None

    # Set loss and metric only during train and evaluate.
    if mode is not model_fn_lib.ModeKeys.PREDICT:
      if mode is model_fn_lib.ModeKeys.TRAIN:
        model._make_train_function()  # pylint: disable=protected-access
      else:
        model._make_test_function()  # pylint: disable=protected-access
      loss = model.total_loss

      if model.metrics:
        # TODO(fchollet): support stateful metrics
        eval_metric_ops = {}
        # When each metric maps to an output
        if isinstance(model.metrics, dict):
          for i, output_name in enumerate(model.metrics.keys()):
            metric_name = model.metrics[output_name]
            if callable(metric_name):
              metric_name = metric_name.__name__
            # When some outputs use the same metric
            if list(model.metrics.values()).count(metric_name) > 1:
              metric_name += '_' + output_name
            eval_metric_ops[metric_name] = metrics_module.mean(
                model.metrics_tensors[i - len(model.metrics)])
        else:
          for i, metric_name in enumerate(model.metrics):
            if callable(metric_name):
              metric_name = metric_name.__name__
            eval_metric_ops[metric_name] = metrics_module.mean(
                model.metrics_tensors[i])

    # Set train_op only during train.
    if mode is model_fn_lib.ModeKeys.TRAIN:
      train_op = model.train_function.updates_op

    if not model._is_graph_network:
      # Reset model state to original state,
      # to avoid `model_fn` being destructive for the initial model argument.
      _in_place_subclassed_model_state_restoration(keras_model)
    return model_fn_lib.EstimatorSpec(
        mode=mode,
        predictions=predictions,
        loss=loss,
        train_op=train_op,
        eval_metric_ops=eval_metric_ops,
        export_outputs={
            _DEFAULT_SERVING_KEY:
            export_lib.export_output.PredictOutput(predictions)
        })
Пример #4
0
 def _eval_metric_ops(
     self, labels, probabilities, weights, unreduced_loss,
     regularization_loss):
   """Returns a dict of metrics for eval_metric_ops."""
   with ops.name_scope(
       None, 'metrics',
       [labels, probabilities, weights, unreduced_loss, regularization_loss]):
     keys = metric_keys.MetricKeys
     metric_ops = {
         # Estimator already adds a metric for loss.
         head_lib._summary_key(self._name, keys.LOSS_MEAN):  # pylint:disable=protected-access
             metrics_lib.mean(
                 values=unreduced_loss,
                 weights=weights,
                 name=keys.LOSS_MEAN),
         head_lib._summary_key(self._name, keys.AUC):  # pylint:disable=protected-access
             metrics_lib.auc(labels=labels, predictions=probabilities,
                             weights=weights, name=keys.AUC),
         head_lib._summary_key(self._name, keys.AUC_PR):  # pylint:disable=protected-access
             metrics_lib.auc(labels=labels, predictions=probabilities,
                             weights=weights, curve='PR',
                             name=keys.AUC_PR),
     }
     if regularization_loss is not None:
       loss_regularization_key = head_lib._summary_key(  # pylint:disable=protected-access
           self._name, keys.LOSS_REGULARIZATION)
       metric_ops[loss_regularization_key] = (
           metrics_lib.mean(
               values=regularization_loss,
               name=keys.LOSS_REGULARIZATION))
     for threshold in self._thresholds:
       accuracy_key = keys.ACCURACY_AT_THRESHOLD % threshold
       metric_ops[head_lib._summary_key(self._name, accuracy_key)] = (  # pylint:disable=protected-access
           head_lib._accuracy_at_threshold(  # pylint:disable=protected-access
               labels=labels,
               predictions=probabilities,
               weights=weights,
               threshold=threshold,
               name=accuracy_key))
       # Precision for positive examples.
       precision_key = keys.PRECISION_AT_THRESHOLD % threshold
       metric_ops[head_lib._summary_key(self._name, precision_key)] = (  # pylint:disable=protected-access
           head_lib._precision_at_threshold(  # pylint:disable=protected-access
               labels=labels,
               predictions=probabilities,
               weights=weights,
               threshold=threshold,
               name=precision_key))
       # Recall for positive examples.
       recall_key = keys.RECALL_AT_THRESHOLD % threshold
       metric_ops[head_lib._summary_key(self._name, recall_key)] = (  # pylint:disable=protected-access
           head_lib._recall_at_threshold(  # pylint:disable=protected-access
               labels=labels,
               predictions=probabilities,
               weights=weights,
               threshold=threshold,
               name=recall_key))
   return metric_ops
Пример #5
0
def _convert_keras_metrics_to_estimator(model):
  """Convert metrics from a Keras model to ops used by the Estimator framework.

  Args:
    model: A `tf.keras.Model` object.

  Returns:
    Dictionary mapping metric names to tuples of (value, update) ops. May return
    `None` if the model does not contain any metrics.
  """
  if not getattr(model, 'metrics', None):
    return None

  eval_metric_ops = {}

  def get_metric_name(metric):
    if isinstance(metric, metrics.Metric):
      return metric.name
    if callable(metric):
      return metric.__name__
    assert isinstance(metric, six.string_types)
    return metric

  # When each metric maps to an output
  if isinstance(model.metrics, dict):
    for i, output_name in enumerate(model.metrics.keys()):
      # `metric` is the user given metric value in `compile`. This can be
      # metric name (`acc`), metric function (binary_accuracy) or a metric
      # object (BinaryAccuracy()).
      metric = model.metrics[output_name]
      metric_name = get_metric_name(metric)
      # When some outputs use the same metric
      if list(model.metrics.values()).count(metric_name) > 1:
        metric_name += '_' + output_name
      if isinstance(metric, metrics.Metric):
        eval_metric_ops[metric_name] = metric
      else:
        eval_metric_ops[metric_name] = metrics_module.mean(
            model.metrics_tensors[i - len(model.metrics)])
  else:
    for i, metric in enumerate(model.metrics):
      metric_name = get_metric_name(metric)
      if isinstance(metric, metrics.Metric):
        eval_metric_ops[metric_name] = metric
      else:
        eval_metric_ops[metric_name] = metrics_module.mean(
            model.metrics_tensors[i])
  return eval_metric_ops
Пример #6
0
  def _merge_eval(self, all_estimator_spec):
    """Merges list of `EstimatorSpec` for eval.

    Args:
      all_estimator_spec: list of `EstimatorSpec` for the individual heads.

    Returns:
      `EstimatorSpec` that merges all heads for EVAL.
    """
    predictions = {}
    metrics = {}
    losses = []
    with ops.name_scope('merge_eval'):
      for head, spec in zip(self._heads, all_estimator_spec):
        losses.append(spec.loss)
        head_name = head.name
        # Loss metric is not added by default.
        loss_name = head_lib._summary_key(  # pylint:disable=protected-access
            head_name, metric_keys.MetricKeys.LOSS)
        metrics[loss_name] = metrics_lib.mean(spec.loss, name=loss_name)
        # Metric keys already contain head.name.
        metrics.update(spec.eval_metric_ops or {})
        for k, v in six.iteritems(spec.predictions):
          predictions[(head_name, k)] = v
      loss = _merge_losses(losses, self._head_weights)

    return model_fn.EstimatorSpec(
        mode=model_fn.ModeKeys.EVAL,
        predictions=predictions,
        loss=loss,
        eval_metric_ops=metrics)
Пример #7
0
 def model_fn(features, labels, mode):
   _ = labels
   step = training.get_global_step()
   w = variable_scope.get_variable(
       'w',
       shape=[],
       initializer=init_ops.zeros_initializer(),
       dtype=dtypes.int64)
   if estimator_lib.ModeKeys.TRAIN == mode:
     # to consume features, we have control dependency
     with ops.control_dependencies([features]):
       step_inc = state_ops.assign_add(training.get_global_step(), 1)
     with ops.control_dependencies([step_inc]):
       assign_w_to_step_plus_2 = w.assign(step + 2)
     return estimator_lib.EstimatorSpec(
         mode,
         loss=constant_op.constant(3.),
         train_op=assign_w_to_step_plus_2)
   if estimator_lib.ModeKeys.EVAL == mode:
     # to consume features, we have control dependency
     with ops.control_dependencies([features]):
       loss = constant_op.constant(5.)
     return estimator_lib.EstimatorSpec(
         mode,
         loss=loss,
         # w is constant in each step, so the mean.
         # w = 0 if step==0 else step+2
         eval_metric_ops={'mean_of_const': metrics_lib.mean(w)})
 def metric_fn(
     generator_inputs, generated_data, real_data, discriminator_real_outputs,
     discriminator_gen_outputs, generator_loss, discriminator_loss):
   """`metric_fn` used in TPUEstimator to calculate metrics."""
   eval_metric_ops = {
       'generator_loss': metrics_lib.mean(generator_loss),
       'discriminator_loss': metrics_lib.mean(discriminator_loss),
   }
   custom_eval_metric_ops = get_eval_metric_ops_fn(
       generator_inputs, generated_data, real_data,
       discriminator_real_outputs, discriminator_gen_outputs)
   if not isinstance(custom_eval_metric_ops, dict):
     raise TypeError('`get_eval_metric_ops_fn` must return a dict, '
                     'received: {}'.format(custom_eval_metric_ops))
   eval_metric_ops.update(custom_eval_metric_ops)
   return eval_metric_ops
Пример #9
0
def _sigmoid_entropy(probabilities, targets, weights=None):
  return metrics.mean(
      losses.sigmoid_cross_entropy(probabilities,
                                   _squeeze_and_onehot(
                                       targets,
                                       array_ops.shape(probabilities)[1])),
      weights=weights)
Пример #10
0
def _predictions_mean(predictions, weights=None, name=None):
  with ops.name_scope(
      name, 'predictions_mean', (predictions, weights)) as scope:
    predictions = math_ops.to_float(predictions, name='predictions')
    if weights is not None:
      weights = weights_broadcast_ops.broadcast_weights(weights, predictions)
    return metrics_lib.mean(predictions, weights=weights, name=scope)
Пример #11
0
 def _eval_metric_ops(self, labels, class_ids, weights, weighted_sum_loss,
                      example_weight_sum):
   """Returns the Eval metric ops."""
   with ops.name_scope(
       None, 'metrics',
       (labels, class_ids, weights, weighted_sum_loss, example_weight_sum)):
     keys = metric_keys.MetricKeys
     metric_ops = {
         # Estimator already adds a metric for loss.
         # TODO(xiejw): Any other metrics?
         _summary_key(self._name, keys.LOSS_MEAN):
             metrics_lib.mean(
                 # Both values and weights here are reduced, scalar Tensors.
                 # values is the actual mean we want -- weights represents the
                 # total weight of the batch and is needed to calculate
                 # update_op over many batches.
                 values=(weighted_sum_loss / example_weight_sum),
                 weights=example_weight_sum,
                 name=keys.LOSS_MEAN),
         _summary_key(self._name, keys.ACCURACY):
             metrics_lib.accuracy(
                 labels=labels,
                 predictions=class_ids,
                 weights=weights,
                 name=keys.ACCURACY),
     }
   return metric_ops
Пример #12
0
  def _evaluate_model(self,
                      input_fn,
                      hooks=None,
                      checkpoint_path=None,
                      name=''):
    """Evaluates the model using the training.evaluation library."""
    # Check that model has been trained (if nothing has been set explicitly).
    if not checkpoint_path:
      latest_path = saver.latest_checkpoint(self._model_dir)
      if not latest_path:
        raise ValueError('Could not find trained model in model_dir: {}.'.
                         format(self._model_dir))
      checkpoint_path = latest_path

    # Setup output directory.
    eval_dir = os.path.join(self._model_dir, 'eval' if not name else
                            'eval_' + name)

    with ops.Graph().as_default() as g:
      random_seed.set_random_seed(self._config.tf_random_seed)
      global_step_tensor = self._create_and_assert_global_step(g)
      features, labels = self._get_features_and_labels_from_input_fn(
          input_fn, model_fn_lib.ModeKeys.EVAL)
      estimator_spec = self._call_model_fn(
          features, labels, model_fn_lib.ModeKeys.EVAL)

      if model_fn_lib.LOSS_METRIC_KEY in estimator_spec.eval_metric_ops:
        raise ValueError(
            'Metric with name "%s" is not allowed, because Estimator ' % (
                model_fn_lib.LOSS_METRIC_KEY) +
            'already defines a default metric with the same name.')
      estimator_spec.eval_metric_ops[
          model_fn_lib.LOSS_METRIC_KEY] = metrics_lib.mean(estimator_spec.loss)

      update_op, eval_dict = _extract_metric_update_ops(
          estimator_spec.eval_metric_ops)

      if ops.GraphKeys.GLOBAL_STEP in eval_dict:
        raise ValueError(
            'Metric with name `global_step` is not allowed, because Estimator '
            'already defines a default metric with the same name.')
      eval_dict[ops.GraphKeys.GLOBAL_STEP] = global_step_tensor

      eval_results = evaluation._evaluate_once(  # pylint: disable=protected-access
          checkpoint_path=checkpoint_path,
          master=self._config.evaluation_master,
          scaffold=estimator_spec.scaffold,
          eval_ops=update_op,
          final_ops=eval_dict,
          hooks=hooks,
          config=self._session_config)

      _write_dict_to_summary(
          output_dir=eval_dir,
          dictionary=eval_results,
          current_global_step=eval_results[ops.GraphKeys.GLOBAL_STEP])

    return eval_results
Пример #13
0
def _r2(probabilities, targets, weights=None):
  targets = math_ops.cast(targets, dtypes.float32)
  y_mean = math_ops.reduce_mean(targets, 0)
  squares_total = math_ops.reduce_sum(
      math_ops.squared_difference(targets, y_mean), 0)
  squares_residuals = math_ops.reduce_sum(
      math_ops.squared_difference(targets, probabilities), 0)
  score = 1 - math_ops.reduce_sum(squares_residuals / squares_total)
  return metrics.mean(score, weights=weights)
Пример #14
0
 def model_fn(features, labels, mode):
   _, _ = features, labels
   return estimator_lib.EstimatorSpec(
       mode,
       loss=constant_op.constant(3.),
       scaffold=training.Scaffold(saver=training.Saver()),
       train_op=constant_op.constant(5.),
       eval_metric_ops={
           'mean_of_features': metrics_lib.mean(constant_op.constant(2.))
       })
Пример #15
0
 def _eval_metric_ops(self, labels, probabilities, weights, weighted_sum_loss,
                      example_weight_sum):
   """Returns a dict of metrics for eval_metric_ops."""
   with ops.name_scope(
       None, 'metrics',
       [labels, probabilities, weights, weighted_sum_loss, example_weight_sum
       ]):
     keys = metric_keys.MetricKeys
     metric_ops = {
         # Estimator already adds a metric for loss.
         head_lib._summary_key(self._name, keys.LOSS_MEAN):  # pylint:disable=protected-access
             metrics_lib.mean(
                 # Both values and weights here are reduced, scalar Tensors.
                 # values is the actual mean we want, but we pass the scalar
                 # example_weight_sum in order to return the correct update_op
                 # alongside the value_op for streaming metrics.
                 values=(weighted_sum_loss / example_weight_sum),
                 weights=example_weight_sum,
                 name=keys.LOSS_MEAN),
         head_lib._summary_key(self._name, keys.AUC):  # pylint:disable=protected-access
             metrics_lib.auc(labels=labels, predictions=probabilities,
                             weights=weights, name=keys.AUC),
         head_lib._summary_key(self._name, keys.AUC_PR):  # pylint:disable=protected-access
             metrics_lib.auc(labels=labels, predictions=probabilities,
                             weights=weights, curve='PR',
                             name=keys.AUC_PR),
     }
     for threshold in self._thresholds:
       accuracy_key = keys.ACCURACY_AT_THRESHOLD % threshold
       metric_ops[head_lib._summary_key(self._name, accuracy_key)] = (  # pylint:disable=protected-access
           head_lib._accuracy_at_threshold(  # pylint:disable=protected-access
               labels=labels,
               predictions=probabilities,
               weights=weights,
               threshold=threshold,
               name=accuracy_key))
       # Precision for positive examples.
       precision_key = keys.PRECISION_AT_THRESHOLD % threshold
       metric_ops[head_lib._summary_key(self._name, precision_key)] = (  # pylint:disable=protected-access
           head_lib._precision_at_threshold(  # pylint:disable=protected-access
               labels=labels,
               predictions=probabilities,
               weights=weights,
               threshold=threshold,
               name=precision_key))
       # Recall for positive examples.
       recall_key = keys.RECALL_AT_THRESHOLD % threshold
       metric_ops[head_lib._summary_key(self._name, recall_key)] = (  # pylint:disable=protected-access
           head_lib._recall_at_threshold(  # pylint:disable=protected-access
               labels=labels,
               predictions=probabilities,
               weights=weights,
               threshold=threshold,
               name=recall_key))
   return metric_ops
Пример #16
0
  def create_estimator_spec(
      self, features, mode, logits, labels=None, train_op_fn=None):
    """See `Head`."""
    with variable_scope.variable_scope(
        None,
        default_name='regression_head',
        values=(tuple(six.itervalues(features)) + (labels, logits))):

      # Predict.
      logits = _check_logits(logits, self._logits_dimension)
      predictions = {prediction_keys.PredictionKeys.PREDICTIONS: logits}
      if mode == model_fn.ModeKeys.PREDICT:
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.PREDICT,
            predictions=predictions,
            export_outputs={'': export_output.RegressionOutput(value=logits)})

      # Eval.
      labels = _check_labels(_maybe_expand_dim(math_ops.to_float(labels)),
                             self._logits_dimension)
      unweighted_loss = losses.mean_squared_error(
          labels=labels, predictions=logits, reduction=losses.Reduction.NONE)
      weights = (
          1. if (self._weight_feature_key is None) else
          features[self._weight_feature_key])
      weights = _maybe_expand_dim(math_ops.to_float(weights, name='weights'))
      training_loss = losses.compute_weighted_loss(
          unweighted_loss, weights=weights, reduction=losses.Reduction.SUM)
      if mode == model_fn.ModeKeys.EVAL:
        # Estimator already adds a metric for loss.
        eval_metric_ops = {
            metric_keys.MetricKeys.LOSS_MEAN: metrics_lib.mean(
                unweighted_loss, weights=weights)
        }
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.EVAL,
            predictions=predictions,
            loss=training_loss,
            eval_metric_ops=eval_metric_ops)

      # Train.
      if train_op_fn is None:
        raise ValueError('train_op_fn can not be None.')
      logging_ops.scalar_summary(metric_keys.MetricKeys.LOSS, training_loss)
      logging_ops.scalar_summary(
          metric_keys.MetricKeys.LOSS_MEAN,
          losses.compute_weighted_loss(
              unweighted_loss, weights=weights,
              reduction=losses.Reduction.MEAN))
      return model_fn.EstimatorSpec(
          mode=model_fn.ModeKeys.TRAIN,
          predictions=predictions,
          loss=training_loss,
          train_op=train_op_fn(training_loss))
Пример #17
0
  def testMetricsCollection(self):

    def _enqueue_vector(sess, queue, values, shape=None):
      if not shape:
        shape = (1, len(values))
      dtype = queue.dtypes[0]
      sess.run(
          queue.enqueue(constant_op.constant(
              values, dtype=dtype, shape=shape)))

    meta_graph_filename = os.path.join(
        _TestDir("metrics_export"), "meta_graph.pb")

    graph = ops.Graph()
    with self.session(graph=graph) as sess:
      values_queue = data_flow_ops.FIFOQueue(
          4, dtypes.float32, shapes=(1, 2))
      _enqueue_vector(sess, values_queue, [0, 1])
      _enqueue_vector(sess, values_queue, [-4.2, 9.1])
      _enqueue_vector(sess, values_queue, [6.5, 0])
      _enqueue_vector(sess, values_queue, [-3.2, 4.0])
      values = values_queue.dequeue()

      _, update_op = metrics.mean(values)

      initializer = variables.local_variables_initializer()
      self.evaluate(initializer)
      self.evaluate(update_op)

    meta_graph.export_scoped_meta_graph(
        filename=meta_graph_filename, graph=graph)

    # Verifies that importing a meta_graph with LOCAL_VARIABLES collection
    # works correctly.
    graph = ops.Graph()
    with self.session(graph=graph) as sess:
      meta_graph.import_scoped_meta_graph(meta_graph_filename)
      initializer = variables.local_variables_initializer()
      self.evaluate(initializer)

    # Verifies that importing an old meta_graph where "local_variables"
    # collection is of node_list type works, but cannot build initializer
    # with the collection.
    graph = ops.Graph()
    with self.session(graph=graph) as sess:
      meta_graph.import_scoped_meta_graph(
          test.test_src_dir_path(
              "python/framework/testdata/metrics_export_meta_graph.pb"))
      self.assertEqual(len(ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES)),
                       2)
      with self.assertRaisesRegexp(
          AttributeError, "'Tensor' object has no attribute 'initializer'"):
        initializer = variables.local_variables_initializer()
Пример #18
0
 def model_fn(features, labels, mode):
   _ = labels
   if estimator_lib.ModeKeys.TRAIN == mode:
     with ops.control_dependencies([features]):
       train_op = state_ops.assign_add(training.get_global_step(), 1)
     return estimator_lib.EstimatorSpec(
         mode, loss=constant_op.constant(3.), train_op=train_op)
   if estimator_lib.ModeKeys.EVAL == mode:
     return estimator_lib.EstimatorSpec(
         mode,
         loss=constant_op.constant(5.),
         eval_metric_ops={'mean_of_features': metrics_lib.mean(features)})
Пример #19
0
  def create_estimator_spec(
      self, features, mode, logits, labels=None, train_op_fn=None):
    """See `Head`."""
    # Predict.
    with ops.name_scope('head'):
      logits = _check_logits(logits, self._logits_dimension)
      predictions = {prediction_keys.PredictionKeys.PREDICTIONS: logits}
      if mode == model_fn.ModeKeys.PREDICT:
        regression_output = export_output.RegressionOutput(value=logits)
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.PREDICT,
            predictions=predictions,
            export_outputs={
                _DEFAULT_SERVING_KEY: regression_output,
                _REGRESS_SERVING_KEY: regression_output,
                _PREDICT_SERVING_KEY: export_output.PredictOutput(predictions)
            })

      # Eval.
      unweighted_loss, _ = self.create_loss(
          features=features, mode=mode, logits=logits, labels=labels)
      weights = _weights(features, self._weight_column)
      training_loss = losses.compute_weighted_loss(
          unweighted_loss, weights=weights, reduction=losses.Reduction.SUM)
      if mode == model_fn.ModeKeys.EVAL:
        # Estimator already adds a metric for loss.
        eval_metric_ops = {
            metric_keys.MetricKeys.LOSS_MEAN: metrics_lib.mean(
                unweighted_loss, weights=weights)
        }
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.EVAL,
            predictions=predictions,
            loss=training_loss,
            eval_metric_ops=eval_metric_ops)

      # Train.
      if train_op_fn is None:
        raise ValueError('train_op_fn can not be None.')
    with ops.name_scope(''):
      summary.scalar(
          _summary_key(self._name, metric_keys.MetricKeys.LOSS),
          training_loss)
      summary.scalar(
          _summary_key(self._name, metric_keys.MetricKeys.LOSS_MEAN),
          losses.compute_weighted_loss(
              unweighted_loss, weights=weights,
              reduction=losses.Reduction.MEAN))
    return model_fn.EstimatorSpec(
        mode=model_fn.ModeKeys.TRAIN,
        predictions=predictions,
        loss=training_loss,
        train_op=train_op_fn(training_loss))
Пример #20
0
  def _evaluate_model(self,
                      input_fn,
                      hooks=None,
                      checkpoint_path=None,
                      name=''):
    """Evaluates the model using the training.evaluation library."""
    # Check that model has been trained (if nothing has been set explicitly).
    if not checkpoint_path:
      latest_path = saver.latest_checkpoint(self._model_dir)
      if not latest_path:
        raise ValueError('Could not find trained model in model_dir: {}.'.
                         format(self._model_dir))
      checkpoint_path = latest_path

    # Setup output directory.
    eval_dir = os.path.join(self._model_dir, 'eval' if not name else
                            'eval_' + name)

    with ops.Graph().as_default() as g:
      random_seed.set_random_seed(self._config.tf_random_seed)
      global_step_tensor = training.create_global_step(g)
      features, labels = input_fn()
      estimator_spec = self._call_model_fn(
          features, labels, model_fn_lib.ModeKeys.EVAL)

      self._verify_default_metric_key(model_fn_lib.MetricKeys.LOSS,
                                      estimator_spec.eval_metric_ops)
      estimator_spec.eval_metric_ops[
          model_fn_lib.MetricKeys.LOSS] = metrics_lib.mean(estimator_spec.loss)

      update_op, eval_dict = _extract_metric_update_ops(
          estimator_spec.eval_metric_ops)

      self._verify_default_metric_key(ops.GraphKeys.GLOBAL_STEP, eval_dict)
      eval_dict[ops.GraphKeys.GLOBAL_STEP] = global_step_tensor

      eval_results = evaluation._evaluate_once(  # pylint: disable=protected-access
          checkpoint_path=checkpoint_path,
          master=self._config.evaluation_master,
          scaffold=estimator_spec.scaffold,
          eval_ops=update_op,
          final_ops=eval_dict,
          hooks=hooks,
          config=config_pb2.ConfigProto(allow_soft_placement=True))

      _write_dict_to_summary(
          output_dir=eval_dir,
          dictionary=eval_results,
          current_global_step=eval_results[ops.GraphKeys.GLOBAL_STEP])

    return eval_results
Пример #21
0
    def model_fn(features, labels, mode):
      _, _ = features, labels

      def init_fn(scaffold, session):
        _, _ = scaffold, session

      return estimator_lib.EstimatorSpec(
          mode,
          loss=constant_op.constant(3.),
          scaffold=training.Scaffold(init_fn=init_fn),
          train_op=constant_op.constant(5.),
          eval_metric_ops={
              'mean_of_features': metrics_lib.mean(constant_op.constant(2.))
          })
Пример #22
0
  def create_estimator_spec(
      self, features, mode, logits, labels=None, train_op_fn=None):
    """See `Head`."""
    # Predict.
    with ops.name_scope('head'):
      logits = _check_logits(logits, self._logits_dimension)
      predictions = {prediction_keys.PredictionKeys.PREDICTIONS: logits}
      if mode == model_fn.ModeKeys.PREDICT:
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.PREDICT,
            predictions=predictions,
            export_outputs={'': export_output.RegressionOutput(value=logits)})

      # Eval.
      labels = _check_labels(_maybe_expand_dim(math_ops.to_float(labels)),
                             self._logits_dimension)
      unweighted_loss = losses.mean_squared_error(
          labels=labels, predictions=logits, reduction=losses.Reduction.NONE)
      weights = _weights(features, self._weight_column)
      training_loss = losses.compute_weighted_loss(
          unweighted_loss, weights=weights, reduction=losses.Reduction.SUM)
      if mode == model_fn.ModeKeys.EVAL:
        # Estimator already adds a metric for loss.
        eval_metric_ops = {
            metric_keys.MetricKeys.LOSS_MEAN: metrics_lib.mean(
                unweighted_loss, weights=weights)
        }
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.EVAL,
            predictions=predictions,
            loss=training_loss,
            eval_metric_ops=eval_metric_ops)

      # Train.
      if train_op_fn is None:
        raise ValueError('train_op_fn can not be None.')
    with ops.name_scope(''):
      summary.scalar(metric_keys.MetricKeys.LOSS, training_loss)
      summary.scalar(metric_keys.MetricKeys.LOSS_MEAN,
                     losses.compute_weighted_loss(
                         unweighted_loss,
                         weights=weights,
                         reduction=losses.Reduction.MEAN))
    return model_fn.EstimatorSpec(
        mode=model_fn.ModeKeys.TRAIN,
        predictions=predictions,
        loss=training_loss,
        train_op=train_op_fn(training_loss))
Пример #23
0
 def model_fn(features, labels, mode):
   _, _ = features, labels
   w = variables.Variable(
       initial_value=[0.],
       trainable=False,
       collections=[ops.GraphKeys.SAVEABLE_OBJECTS])
   init_op = control_flow_ops.group(
       [w.initializer, training.get_global_step().initializer])
   return estimator_lib.EstimatorSpec(
       mode,
       loss=constant_op.constant(3.),
       scaffold=training.Scaffold(init_op=init_op),
       train_op=constant_op.constant(5.),
       eval_metric_ops={
           'mean_of_features': metrics_lib.mean(constant_op.constant(2.))
       })
Пример #24
0
 def test_metrics_consistent(self):
   # Tests that the identity metrics used to report in-sample predictions match
   # the behavior of standard metrics.
   g = ops.Graph()
   with g.as_default():
     features = {
         feature_keys.TrainEvalFeatures.TIMES:
             array_ops.zeros((1, 1)),
         feature_keys.TrainEvalFeatures.VALUES:
             array_ops.zeros((1, 1, 1)),
         "ticker":
             array_ops.reshape(
                 math_ops.cast(
                     variables.VariableV1(
                         name="ticker",
                         initial_value=0,
                         dtype=dtypes.int64,
                         collections=[ops.GraphKeys.LOCAL_VARIABLES])
                     .count_up_to(10),
                     dtype=dtypes.float32), (1, 1, 1))
     }
     model_fn = ts_head_lib.TimeSeriesRegressionHead(
         model=_TickerModel(),
         state_manager=state_management.PassthroughStateManager(),
         optimizer=train.GradientDescentOptimizer(0.001)).create_estimator_spec
     outputs = model_fn(
         features=features, labels=None, mode=estimator_lib.ModeKeys.EVAL)
     metric_update_ops = [
         metric[1] for metric in outputs.eval_metric_ops.values()]
     loss_mean, loss_update = metrics.mean(outputs.loss)
     metric_update_ops.append(loss_update)
     with self.cached_session() as sess:
       coordinator = coordinator_lib.Coordinator()
       queue_runner_impl.start_queue_runners(sess, coord=coordinator)
       variables.local_variables_initializer().run()
       sess.run(metric_update_ops)
       loss_evaled, metric_evaled, nested_metric_evaled = sess.run(
           (loss_mean, outputs.eval_metric_ops["ticker"][0],
            outputs.eval_metric_ops[feature_keys.FilteringResults.STATE_TUPLE][
                0][0]))
       # The custom model_utils metrics for in-sample predictions should be in
       # sync with the Estimator's mean metric for model loss.
       self.assertAllClose(0., loss_evaled)
       self.assertAllClose((((0.,),),), metric_evaled)
       self.assertAllClose((((0.,),),), nested_metric_evaled)
       coordinator.request_stop()
       coordinator.join()
Пример #25
0
 def _eval_metric_ops(self, labels, probabilities, logits,
                      class_ids, weights, unweighted_loss):
   """Returns the Eval metric ops."""
   with ops.name_scope(
       None, 'metrics',
       (labels, probabilities, logits, class_ids, weights, unweighted_loss)):
     keys = metric_keys.MetricKeys
     metric_ops = {
         # Estimator already adds a metric for loss.
         # TODO(xiejw): Any other metrics?
         keys.LOSS_MEAN: metrics_lib.mean(
             unweighted_loss, weights=weights, name=keys.LOSS_MEAN),
         keys.ACCURACY: metrics_lib.accuracy(
             labels=labels, predictions=class_ids, weights=weights,
             name=keys.ACCURACY),
     }
   return metric_ops
Пример #26
0
 def _eval_metric_ops(
     self, labels, logits, logistic, scores, classes, unweighted_loss,
     weights=None):
   with ops.name_scope(
       None, 'metrics',
       (labels, logits, logistic, scores, classes, unweighted_loss, weights)):
     keys = metric_keys.MetricKeys
     labels_mean = _indicator_labels_mean(
         labels=labels, weights=weights, name=keys.LABEL_MEAN)
     metric_ops = {
         # Estimator already adds a metric for loss.
         keys.LOSS_MEAN: metrics_lib.mean(
             unweighted_loss, weights=weights, name=keys.LOSS_MEAN),
         keys.ACCURACY: metrics_lib.accuracy(
             labels=labels, predictions=classes, weights=weights,
             name=keys.ACCURACY),
         keys.PREDICTION_MEAN: _predictions_mean(
             predictions=logistic, weights=weights, name=keys.PREDICTION_MEAN),
         keys.LABEL_MEAN: labels_mean,
         keys.ACCURACY_BASELINE: _accuracy_baseline(labels_mean),
         keys.AUC: _auc(
             labels=labels, predictions=logistic, weights=weights,
             name=keys.AUC),
         keys.AUC_PR: _auc(
             labels=labels, predictions=logistic, weights=weights, curve='PR',
             name=keys.AUC_PR)
     }
     for threshold in self._thresholds:
       accuracy_key = keys.ACCURACY_AT_THRESHOLD % threshold
       metric_ops[accuracy_key] = _accuracy_at_threshold(
           labels=labels, predictions=logistic, weights=weights,
           threshold=threshold, name=accuracy_key)
       # Precision for positive examples.
       precision_key = keys.PRECISION_AT_THRESHOLD % threshold
       metric_ops[precision_key] = _precision_at_threshold(
           labels=labels, predictions=logistic, weights=weights,
           threshold=threshold, name=precision_key)
       # Recall for positive examples.
       recall_key = keys.RECALL_AT_THRESHOLD % threshold
       metric_ops[recall_key] = _recall_at_threshold(
           labels=labels, predictions=logistic, weights=weights,
           threshold=threshold, name=recall_key)
     return metric_ops
Пример #27
0
    def create_estimator_spec(self,
                              features,
                              mode,
                              logits,
                              labels=None,
                              train_op_fn=None):
        """See `Head`."""
        # Predict.
        with ops.name_scope(self._name, 'head'):
            logits = _check_logits(logits, self._logits_dimension)
            predictions = {prediction_keys.PredictionKeys.PREDICTIONS: logits}
            if mode == model_fn.ModeKeys.PREDICT:
                regression_output = export_output.RegressionOutput(
                    value=logits)
                return model_fn.EstimatorSpec(
                    mode=model_fn.ModeKeys.PREDICT,
                    predictions=predictions,
                    export_outputs={
                        _DEFAULT_SERVING_KEY:
                        regression_output,
                        _REGRESS_SERVING_KEY:
                        regression_output,
                        _PREDICT_SERVING_KEY:
                        export_output.PredictOutput(predictions)
                    })

            # Eval.
            unweighted_loss, _ = self.create_loss(features=features,
                                                  mode=mode,
                                                  logits=logits,
                                                  labels=labels)
            weights = _weights(features, self._weight_column)
            training_loss = losses.compute_weighted_loss(
                unweighted_loss,
                weights=weights,
                reduction=losses.Reduction.SUM)
            if mode == model_fn.ModeKeys.EVAL:
                # Estimator already adds a metric for loss.
                eval_metric_ops = {
                    metric_keys.MetricKeys.LOSS_MEAN:
                    metrics_lib.mean(unweighted_loss, weights=weights)
                }
                return model_fn.EstimatorSpec(mode=model_fn.ModeKeys.EVAL,
                                              predictions=predictions,
                                              loss=training_loss,
                                              eval_metric_ops=eval_metric_ops)

            # Train.
            if train_op_fn is None:
                raise ValueError('train_op_fn can not be None.')
        with ops.name_scope(''):
            summary.scalar(
                _summary_key(self._name, metric_keys.MetricKeys.LOSS),
                training_loss)
            summary.scalar(
                _summary_key(self._name, metric_keys.MetricKeys.LOSS_MEAN),
                losses.compute_weighted_loss(unweighted_loss,
                                             weights=weights,
                                             reduction=losses.Reduction.MEAN))
        return model_fn.EstimatorSpec(mode=model_fn.ModeKeys.TRAIN,
                                      predictions=predictions,
                                      loss=training_loss,
                                      train_op=train_op_fn(training_loss))
Пример #28
0
 def metric_fn():
   return {'auc': metrics_lib.mean(constant_op.constant([2.]))}
Пример #29
0
    def _evaluate_model(self,
                        input_fn,
                        hooks=None,
                        checkpoint_path=None,
                        name=''):
        """Evaluates the model using the training.evaluation library."""
        # Check that model has been trained (if nothing has been set explicitly).
        if not checkpoint_path:
            latest_path = saver.latest_checkpoint(self._model_dir)
            if not latest_path:
                raise ValueError(
                    'Could not find trained model in model_dir: {}.'.format(
                        self._model_dir))
            checkpoint_path = latest_path

        # Setup output directory.
        eval_dir = os.path.join(self._model_dir,
                                'eval' if not name else 'eval_' + name)

        with ops.Graph().as_default() as g:
            random_seed.set_random_seed(self._config.tf_random_seed)
            global_step_tensor = self._create_and_assert_global_step(g)
            features, labels, input_hooks = (
                self._get_features_and_labels_from_input_fn(
                    input_fn, model_fn_lib.ModeKeys.EVAL))
            estimator_spec = self._call_model_fn(features, labels,
                                                 model_fn_lib.ModeKeys.EVAL,
                                                 self.config)

            if model_fn_lib.LOSS_METRIC_KEY in estimator_spec.eval_metric_ops:
                raise ValueError(
                    'Metric with name "%s" is not allowed, because Estimator '
                    % (model_fn_lib.LOSS_METRIC_KEY) +
                    'already defines a default metric with the same name.')
            estimator_spec.eval_metric_ops[
                model_fn_lib.LOSS_METRIC_KEY] = metrics_lib.mean(
                    estimator_spec.loss)

            update_op, eval_dict = _extract_metric_update_ops(
                estimator_spec.eval_metric_ops)

            if ops.GraphKeys.GLOBAL_STEP in eval_dict:
                raise ValueError(
                    'Metric with name `global_step` is not allowed, because Estimator '
                    'already defines a default metric with the same name.')
            eval_dict[ops.GraphKeys.GLOBAL_STEP] = global_step_tensor

            all_hooks = list(input_hooks)
            all_hooks.extend(hooks)
            all_hooks.extend(list(estimator_spec.evaluation_hooks or []))

            eval_results = evaluation._evaluate_once(  # pylint: disable=protected-access
                checkpoint_path=checkpoint_path,
                master=self._config.evaluation_master,
                scaffold=estimator_spec.scaffold,
                eval_ops=update_op,
                final_ops=eval_dict,
                hooks=all_hooks,
                config=self._session_config)

            _write_dict_to_summary(
                output_dir=eval_dir,
                dictionary=eval_results,
                current_global_step=eval_results[ops.GraphKeys.GLOBAL_STEP])

        return eval_results
Пример #30
0
def dfm_model_fn(features, labels, mode, params):
    net = tf.feature_column.input_layer(
        features, params['feature_columns']
    )  # shape(batch_size, column_num * embedding_size)
    last_deep_layer = build_deep_layers(net, params)

    column_num, dimension = _check_fm_columns(params['feature_columns'])
    feature_embeddings = tf.reshape(
        net,
        (-1, column_num, dimension))  #(batch_size,column_num, embedding_size)

    # sum_square part
    summed_feature_embeddings = tf.reduce_sum(feature_embeddings,
                                              1)  # (batch_size,embedding_size)
    summed_square_feature_embeddings = tf.square(summed_feature_embeddings)

    # squre-sum part
    squared_feature_embeddings = tf.square(feature_embeddings)
    squared_sum_feature_embeddings = tf.reduce_sum(squared_feature_embeddings,
                                                   1)

    fm_second_order = 0.5 * tf.subtract(summed_square_feature_embeddings,
                                        squared_sum_feature_embeddings)
    #print(tf.shape(fm_second_order))
    #print(fm_second_order.get_shape())

    if params['use_fm']:
        print('--use fm--')
        last_layer = tf.concat([fm_second_order, last_deep_layer], 1)
    else:
        last_layer = last_deep_layer
    #head = tf.contrib.estimator.binary_classification_head(loss_reduction=losses.Reduction.SUM)
    head = head_lib._binary_logistic_or_multi_class_head(  # pylint: disable=protected-access
        n_classes=2,
        weight_column=None,
        label_vocabulary=None,
        loss_reduction=losses.Reduction.SUM)
    logits = tf.layers.dense(
        last_layer,
        units=head.logits_dimension,
        kernel_initializer=tf.glorot_uniform_initializer())
    optimizer = tf.train.AdagradOptimizer(
        learning_rate=params['learning_rate'])

    preds = tf.sigmoid(logits)
    #print(tf.shape(preds))
    #print(preds.get_shape())
    user_id = features['user_id']
    label = features['label']
    if mode == tf.estimator.ModeKeys.EVAL:
        accuracy = tf.metrics.accuracy(labels=labels['class'],
                                       predictions=tf.to_float(
                                           tf.greater_equal(preds, 0.5)))
        auc = tf.metrics.auc(labels['class'], preds)
        label_mean = metrics_lib.mean(labels['class'])
        prediction_mean = metrics_lib.mean(preds)

        prediction_squared_difference = tf.math.squared_difference(
            preds, prediction_mean[0])
        prediction_squared_sum = tf.reduce_sum(prediction_squared_difference)
        num_predictions = tf.to_float(tf.size(preds))
        s_deviation = tf.sqrt(prediction_squared_sum /
                              num_predictions), accuracy[0]  #标准差

        c_variation = tf.to_float(s_deviation[0] /
                                  prediction_mean[0]), accuracy[0]  #变异系数

        #group_auc = tf.to_float(cal_group_auc(labels['class'], preds, labels['user_id'])), accuracy[0] # group auc

        metrics = {
            'accuracy': accuracy,
            'auc': auc,
            'label/mean': label_mean,
            'prediction/mean': prediction_mean,
            'standard deviation': s_deviation,
            'coefficient of variation': c_variation
        }
        #          'group auc': group_auc}
        tf.summary.scalar('accuracy', accuracy[1])
        tf.summary.scalar('auc', auc[1])
        tf.summary.scalar('label/mean', label_mean[1])
        tf.summary.scalar('prediction/mean', prediction_mean[1])
        tf.summary.scalar('s_deviation', s_deviation[1])
        tf.summary.scalar('c_variation', c_variation[1])
        #tf.summary.scalar('group_auc', group_auc[1])

        loss = tf.reduce_sum(
            tf.nn.sigmoid_cross_entropy_with_logits(labels=labels['class'],
                                                    logits=logits))
        #print(tf.shape(loss))
        #print(loss.get_shape())
        return tf.estimator.EstimatorSpec(mode,
                                          loss=loss,
                                          eval_metric_ops=metrics)

    if mode == tf.estimator.ModeKeys.PREDICT:
        predictions = {
            'probabilities': preds,
            'user_id': user_id,
            'label': label
        }
        export_outputs = {
            'prediction': tf.estimator.export.PredictOutput(predictions)
        }
        return tf.estimator.EstimatorSpec(mode,
                                          predictions=predictions,
                                          export_outputs=export_outputs)

    return head.create_estimator_spec(
        features=features,
        mode=mode,
        labels=labels,
        logits=logits,
        train_op_fn=lambda loss: optimizer.minimize(
            loss, global_step=tf.train.get_global_step()))
Пример #31
0
  def model_fn(self, features, mode, config):
    """Model function for the estimator.

    Note that this does not take a `1abels` arg. This works, but `input_fn` must
    return either `features` or, equivalently, `(features, None)`.

    Args:
      features: The input points. See @{tf.estimator.Estimator}.
      mode: See @{tf.estimator.Estimator}.
      config: See @{tf.estimator.Estimator}.

    Returns:
      A @{tf.estimator.EstimatorSpec} (see @{tf.estimator.Estimator}) specifying
      this behavior:
        * `train_op`: Execute one mini-batch or full-batch run of Lloyd's
             algorithm.
        * `loss`: The sum of the squared distances from each input point to its
             closest center.
        * `eval_metric_ops`: Maps `SCORE` to `loss`.
        * `predictions`: Maps `ALL_DISTANCES` to the distance from each input
             point to each cluster center; maps `CLUSTER_INDEX` to the index of
             the closest cluster center for each input point.
    """
    # input_points is a single Tensor. Therefore, the sharding functionality
    # in clustering_ops is unused, and some of the values below are lists of a
    # single item.
    input_points = _parse_tensor_or_dict(features)

    # Let N = the number of input_points.
    # all_distances: A list of one matrix of shape (N, num_clusters). Each value
    #   is the distance from an input point to a cluster center.
    # model_predictions: A list of one vector of shape (N). Each value is the
    #   cluster id of an input point.
    # losses: Similar to cluster_idx but provides the distance to the cluster
    #   center.
    # is_initialized: scalar indicating whether the initial cluster centers
    #   have been chosen; see init_op.
    # cluster_centers_var: a Variable containing the cluster centers.
    # init_op: an op to choose the initial cluster centers. A single worker
    #   repeatedly executes init_op until is_initialized becomes True.
    # training_op: an op that runs an iteration of training, either an entire
    #   Lloyd iteration or a mini-batch of a Lloyd iteration. Multiple workers
    #   may execute this op, but only after is_initialized becomes True.
    (all_distances, model_predictions, losses, is_initialized, init_op,
     training_op) = clustering_ops.KMeans(
         inputs=input_points,
         num_clusters=self._num_clusters,
         initial_clusters=self._initial_clusters,
         distance_metric=self._distance_metric,
         use_mini_batch=self._use_mini_batch,
         mini_batch_steps_per_iteration=self._mini_batch_steps_per_iteration,
         random_seed=self._random_seed,
         kmeans_plus_plus_num_retries=self._kmeans_plus_plus_num_retries
     ).training_graph()

    loss = math_ops.reduce_sum(losses)
    summary.scalar('loss/raw', loss)

    incr_step = state_ops.assign_add(training_util.get_global_step(), 1)
    training_op = control_flow_ops.with_dependencies([training_op, incr_step],
                                                     loss)

    training_hooks = [
        _InitializeClustersHook(init_op, is_initialized, config.is_chief)
    ]
    if self._relative_tolerance is not None:
      training_hooks.append(
          _LossRelativeChangeHook(loss, self._relative_tolerance))

    return model_fn_lib.EstimatorSpec(
        mode=mode,
        predictions={
            KMeansClustering.ALL_DISTANCES: all_distances[0],
            KMeansClustering.CLUSTER_INDEX: model_predictions[0],
        },
        loss=loss,
        train_op=training_op,
        eval_metric_ops={KMeansClustering.SCORE: metrics.mean(loss)},
        training_hooks=training_hooks)
Пример #32
0
    def create_estimator_spec(self,
                              features,
                              mode,
                              logits,
                              labels=None,
                              train_op_fn=tfgan_train.gan_train_ops):
        """Returns `EstimatorSpec` that a model_fn can return.

    See `Head` for more details.

    Args:
      features: Must be `None`.
      mode: Estimator's `ModeKeys`.
      logits: A GANModel tuple.
      labels: Must be `None`.
      train_op_fn: Function that takes a GANModel, GANLoss, generator optimizer,
        and discriminator optimizer, and returns a `GANTrainOps` tuple. For
        example, this function can come from TFGAN's `train.py` library, or can
        be custom.

    Returns:
      `EstimatorSpec`.

    Raises:
      ValueError: If `features` isn't `None`.
      ValueError: If `train_op_fn` isn't provided in train mode.
    """
        _validate_logits_and_labels(logits, labels)
        if features is not None:
            raise ValueError(
                '`features` should be `None`. Instead, found: %s' % features)
        gan_model = logits  # rename variable for clarity
        with ops.name_scope('GANHead'):
            if mode == model_fn_lib.ModeKeys.PREDICT:
                return model_fn_lib.EstimatorSpec(
                    mode=model_fn_lib.ModeKeys.PREDICT,
                    predictions=gan_model.generated_data)
            elif mode == model_fn_lib.ModeKeys.EVAL:
                gan_loss = self.create_loss(features=None,
                                            mode=mode,
                                            logits=gan_model,
                                            labels=None)
                scalar_loss = gan_loss.generator_loss + gan_loss.discriminator_loss
                with ops.name_scope(
                        None, 'metrics',
                    [gan_loss.generator_loss, gan_loss.discriminator_loss]):
                    eval_metric_ops = {
                        _summary_key(self._name, 'generator_loss'):
                        metrics_lib.mean(gan_loss.generator_loss),
                        _summary_key(self._name, 'discriminator_loss'):
                        metrics_lib.mean(gan_loss.discriminator_loss)
                    }
                return model_fn_lib.EstimatorSpec(
                    mode=model_fn_lib.ModeKeys.EVAL,
                    predictions=gan_model.generated_data,
                    loss=scalar_loss,
                    eval_metric_ops=eval_metric_ops)
            elif mode == model_fn_lib.ModeKeys.TRAIN:
                if train_op_fn is None:
                    raise ValueError('train_op_fn can not be None.')
                gan_loss = self.create_loss(None, mode, gan_model, None)
                scalar_loss = gan_loss.generator_loss + gan_loss.discriminator_loss
                train_ops = train_op_fn(gan_model, gan_loss,
                                        self._generator_optimizer,
                                        self._discriminator_optimizer)
                training_hooks = self._get_hooks_fn(train_ops)
                return model_fn_lib.EstimatorSpec(
                    loss=scalar_loss,
                    mode=model_fn_lib.ModeKeys.TRAIN,
                    train_op=train_ops.global_step_inc_op,
                    training_hooks=training_hooks)
            else:
                raise ValueError('Mode not recognized: %s' % mode)
Пример #33
0
    def model_fn(features, labels, mode):
        """model_fn for keras Estimator."""
        model = _clone_and_build_model(mode, keras_model, custom_objects,
                                       features, labels)
        model_output_names = []
        # We need to make sure that the output names of the last layer in the model
        # is the same for each of the cloned models. This is required for mirrored
        # strategy when we call regroup.
        if distribute_lib.has_distribution_strategy():
            for name in model.output_names:
                name = re.compile(r'_\d$').sub('', name)
                model_output_names.append(name)
        else:
            model_output_names = model.output_names

        # Get inputs to EstimatorSpec
        predictions = dict(zip(model_output_names, model.outputs))

        loss = None
        train_op = None
        eval_metric_ops = None

        # Set loss and metric only during train and evaluate.
        if mode is not model_fn_lib.ModeKeys.PREDICT:
            if mode is model_fn_lib.ModeKeys.TRAIN:
                model._make_train_function()  # pylint: disable=protected-access
            else:
                model._make_test_function()  # pylint: disable=protected-access
            loss = model.total_loss

            if model.metrics:
                # TODO(fchollet): support stateful metrics
                eval_metric_ops = {}
                # When each metric maps to an output
                if isinstance(model.metrics, dict):
                    for i, output_name in enumerate(model.metrics.keys()):
                        metric_name = model.metrics[output_name]
                        if callable(metric_name):
                            metric_name = metric_name.__name__
                        # When some outputs use the same metric
                        if list(model.metrics.values()).count(metric_name) > 1:
                            metric_name += '_' + output_name
                        eval_metric_ops[metric_name] = metrics_module.mean(
                            model.metrics_tensors[i - len(model.metrics)])
                else:
                    for i, metric_name in enumerate(model.metrics):
                        if callable(metric_name):
                            metric_name = metric_name.__name__
                        eval_metric_ops[metric_name] = metrics_module.mean(
                            model.metrics_tensors[i])

        # Set train_op only during train.
        if mode is model_fn_lib.ModeKeys.TRAIN:
            train_op = model.train_function.updates_op

        if not model._is_graph_network:
            # Reset model state to original state,
            # to avoid `model_fn` being destructive for the initial model argument.
            _in_place_subclassed_model_state_restoration(keras_model)
        return model_fn_lib.EstimatorSpec(
            mode=mode,
            predictions=predictions,
            loss=loss,
            train_op=train_op,
            eval_metric_ops=eval_metric_ops,
            export_outputs={
                _DEFAULT_SERVING_KEY:
                export_lib.export_output.PredictOutput(predictions)
            })
Пример #34
0
 def _eval_metric_ops(self, labels, probabilities, weights, unreduced_loss,
                      regularization_loss):
     """Returns a dict of metrics for eval_metric_ops."""
     with ops.name_scope(None, 'metrics', [
             labels, probabilities, weights, unreduced_loss,
             regularization_loss
     ]):
         keys = metric_keys.MetricKeys
         metric_ops = {
             # Estimator already adds a metric for loss.
             head_lib._summary_key(self._name, keys.LOSS_MEAN):  # pylint:disable=protected-access
                 metrics_lib.mean(
                     values=unreduced_loss,
                     weights=weights,
                     name=keys.LOSS_MEAN),
             head_lib._summary_key(self._name, keys.AUC):  # pylint:disable=protected-access
                 metrics_lib.auc(labels=labels, predictions=probabilities,
                                 weights=weights, name=keys.AUC),
             head_lib._summary_key(self._name, keys.AUC_PR):  # pylint:disable=protected-access
                 metrics_lib.auc(labels=labels, predictions=probabilities,
                                 weights=weights, curve='PR',
                                 name=keys.AUC_PR),
         }
         if regularization_loss is not None:
             loss_regularization_key = head_lib._summary_key(  # pylint:disable=protected-access
                 self._name, keys.LOSS_REGULARIZATION)
             metric_ops[loss_regularization_key] = (metrics_lib.mean(
                 values=regularization_loss, name=keys.LOSS_REGULARIZATION))
         for threshold in self._thresholds:
             accuracy_key = keys.ACCURACY_AT_THRESHOLD % threshold
             metric_ops[head_lib._summary_key(self._name, accuracy_key)] = (  # pylint:disable=protected-access
                 head_lib._accuracy_at_threshold(  # pylint:disable=protected-access
                     labels=labels,
                     predictions=probabilities,
                     weights=weights,
                     threshold=threshold,
                     name=accuracy_key))
             # Precision for positive examples.
             precision_key = keys.PRECISION_AT_THRESHOLD % threshold
             metric_ops[head_lib._summary_key(
                 self._name, precision_key)] = (  # pylint:disable=protected-access
                     head_lib._precision_at_threshold(  # pylint:disable=protected-access
                         labels=labels,
                         predictions=probabilities,
                         weights=weights,
                         threshold=threshold,
                         name=precision_key))
             # Recall for positive examples.
             recall_key = keys.RECALL_AT_THRESHOLD % threshold
             metric_ops[head_lib._summary_key(self._name, recall_key)] = (  # pylint:disable=protected-access
                 head_lib._recall_at_threshold(  # pylint:disable=protected-access
                     labels=labels,
                     predictions=probabilities,
                     weights=weights,
                     threshold=threshold,
                     name=recall_key))
         for class_id in self._classes_for_class_based_metrics:
             batch_rank = array_ops.rank(probabilities) - 1
             begin = array_ops.concat([
                 array_ops.zeros([batch_rank], dtype=dtypes.int32),
                 [class_id]
             ],
                                      axis=0)
             size = array_ops.concat([
                 -1 * array_ops.ones([batch_rank], dtype=dtypes.int32), [1]
             ],
                                     axis=0)
             class_probabilities = array_ops.slice(probabilities,
                                                   begin=begin,
                                                   size=size)
             class_labels = array_ops.slice(labels, begin=begin, size=size)
             if self._label_vocabulary is None:
                 prob_key = keys.PROBABILITY_MEAN_AT_CLASS % class_id
             else:
                 prob_key = (keys.PROBABILITY_MEAN_AT_NAME %
                             self._label_vocabulary[class_id])
             metric_ops[head_lib._summary_key(self._name, prob_key)] = (  # pylint:disable=protected-access
                 head_lib._predictions_mean(  # pylint:disable=protected-access
                     predictions=class_probabilities,
                     weights=weights,
                     name=prob_key))
             if self._label_vocabulary is None:
                 auc_key = keys.AUC_AT_CLASS % class_id
             else:
                 auc_key = keys.AUC_AT_NAME % self._label_vocabulary[
                     class_id]
             metric_ops[head_lib._summary_key(self._name, auc_key)] = (  # pylint:disable=protected-access
                 head_lib._auc(  # pylint:disable=protected-access
                     labels=class_labels,
                     predictions=class_probabilities,
                     weights=weights,
                     name=auc_key))
             if self._label_vocabulary is None:
                 auc_pr_key = keys.AUC_PR_AT_CLASS % class_id
             else:
                 auc_pr_key = keys.AUC_PR_AT_NAME % self._label_vocabulary[
                     class_id]
             metric_ops[head_lib._summary_key(self._name, auc_pr_key)] = (  # pylint:disable=protected-access
                 head_lib._auc(  # pylint:disable=protected-access
                     labels=class_labels,
                     predictions=class_probabilities,
                     weights=weights,
                     curve='PR',
                     name=auc_pr_key))
     return metric_ops
Пример #35
0
  def model_fn(self, features, mode, config):
    """Model function for the estimator.

    Note that this does not take a `1abels` arg. This works, but `input_fn` must
    return either `features` or, equivalently, `(features, None)`.

    Args:
      features: The input points. See @{tf.estimator.Estimator}.
      mode: See @{tf.estimator.Estimator}.
      config: See @{tf.estimator.Estimator}.

    Returns:
      A @{tf.estimator.EstimatorSpec} (see @{tf.estimator.Estimator}) specifying
      this behavior:
        * `train_op`: Execute one mini-batch or full-batch run of Lloyd's
             algorithm.
        * `loss`: The sum of the squared distances from each input point to its
             closest center.
        * `eval_metric_ops`: Maps `SCORE` to `loss`.
        * `predictions`: Maps `ALL_DISTANCES` to the distance from each input
             point to each cluster center; maps `CLUSTER_INDEX` to the index of
             the closest cluster center for each input point.
    """
    # input_points is a single Tensor. Therefore, the sharding functionality
    # in clustering_ops is unused, and some of the values below are lists of a
    # single item.
    input_points = _parse_tensor_or_dict(features)

    # Let N = the number of input_points.
    # all_distances: A list of one matrix of shape (N, num_clusters). Each value
    #   is the distance from an input point to a cluster center.
    # model_predictions: A list of one vector of shape (N). Each value is the
    #   cluster id of an input point.
    # losses: Similar to cluster_idx but provides the distance to the cluster
    #   center.
    # is_initialized: scalar indicating whether the initial cluster centers
    #   have been chosen; see init_op.
    # cluster_centers_var: a Variable containing the cluster centers.
    # init_op: an op to choose the initial cluster centers. A single worker
    #   repeatedly executes init_op until is_initialized becomes True.
    # training_op: an op that runs an iteration of training, either an entire
    #   Lloyd iteration or a mini-batch of a Lloyd iteration. Multiple workers
    #   may execute this op, but only after is_initialized becomes True.
    (all_distances, model_predictions, losses, is_initialized, init_op,
     training_op) = clustering_ops.KMeans(
         inputs=input_points,
         num_clusters=self._num_clusters,
         initial_clusters=self._initial_clusters,
         distance_metric=self._distance_metric,
         use_mini_batch=self._use_mini_batch,
         mini_batch_steps_per_iteration=self._mini_batch_steps_per_iteration,
         random_seed=self._random_seed,
         kmeans_plus_plus_num_retries=self._kmeans_plus_plus_num_retries
     ).training_graph()

    loss = math_ops.reduce_sum(losses)
    summary.scalar('loss/raw', loss)

    incr_step = state_ops.assign_add(training_util.get_global_step(), 1)
    training_op = control_flow_ops.with_dependencies([training_op, incr_step],
                                                     loss)

    training_hooks = [
        _InitializeClustersHook(init_op, is_initialized, config.is_chief)
    ]
    if self._relative_tolerance is not None:
      training_hooks.append(
          _LossRelativeChangeHook(loss, self._relative_tolerance))

    export_outputs = {
        KMeansClustering.ALL_DISTANCES:
            export_output.PredictOutput(all_distances[0]),
        KMeansClustering.CLUSTER_INDEX:
            export_output.PredictOutput(model_predictions[0]),
        signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
            export_output.PredictOutput(model_predictions[0])
    }

    return model_fn_lib.EstimatorSpec(
        mode=mode,
        predictions={
            KMeansClustering.ALL_DISTANCES: all_distances[0],
            KMeansClustering.CLUSTER_INDEX: model_predictions[0],
        },
        loss=loss,
        train_op=training_op,
        eval_metric_ops={KMeansClustering.SCORE: metrics.mean(loss)},
        training_hooks=training_hooks,
        export_outputs=export_outputs)
Пример #36
0
 def metric_fn(generator_loss, discriminator_loss):
     return {
         'generator_loss': metrics_lib.mean(generator_loss),
         'discriminator_loss': metrics_lib.mean(discriminator_loss),
     }
Пример #37
0
 def _eval_metric_ops(self,
                      labels,
                      logits,
                      logistic,
                      scores,
                      class_ids,
                      unweighted_loss,
                      weights=None):
   with ops.name_scope(None, 'metrics', (labels, logits, logistic, scores,
                                         class_ids, unweighted_loss, weights)):
     keys = metric_keys.MetricKeys
     labels_mean = _indicator_labels_mean(
         labels=labels, weights=weights, name=keys.LABEL_MEAN)
     metric_ops = {
         # Estimator already adds a metric for loss.
         keys.LOSS_MEAN:
             metrics_lib.mean(
                 unweighted_loss, weights=weights, name=keys.LOSS_MEAN),
         keys.ACCURACY:
             metrics_lib.accuracy(
                 labels=labels,
                 predictions=class_ids,
                 weights=weights,
                 name=keys.ACCURACY),
         keys.PREDICTION_MEAN:
             _predictions_mean(
                 predictions=logistic,
                 weights=weights,
                 name=keys.PREDICTION_MEAN),
         keys.LABEL_MEAN:
             labels_mean,
         keys.ACCURACY_BASELINE:
             _accuracy_baseline(labels_mean),
         keys.AUC:
             _auc(
                 labels=labels,
                 predictions=logistic,
                 weights=weights,
                 name=keys.AUC),
         keys.AUC_PR:
             _auc(
                 labels=labels,
                 predictions=logistic,
                 weights=weights,
                 curve='PR',
                 name=keys.AUC_PR)
     }
     for threshold in self._thresholds:
       accuracy_key = keys.ACCURACY_AT_THRESHOLD % threshold
       metric_ops[accuracy_key] = _accuracy_at_threshold(
           labels=labels, predictions=logistic, weights=weights,
           threshold=threshold, name=accuracy_key)
       # Precision for positive examples.
       precision_key = keys.PRECISION_AT_THRESHOLD % threshold
       metric_ops[precision_key] = _precision_at_threshold(
           labels=labels, predictions=logistic, weights=weights,
           threshold=threshold, name=precision_key)
       # Recall for positive examples.
       recall_key = keys.RECALL_AT_THRESHOLD % threshold
       metric_ops[recall_key] = _recall_at_threshold(
           labels=labels, predictions=logistic, weights=weights,
           threshold=threshold, name=recall_key)
     return metric_ops
Пример #38
0
 def _top_k(probabilities, targets):
     targets = math_ops.to_int32(targets)
     if targets.get_shape().ndims > 1:
         targets = array_ops.squeeze(targets, axis=[1])
     return metrics.mean(nn.in_top_k(probabilities, targets, k))
Пример #39
0
 def _eval_metric_ops(self, labels, logits, logistic, class_ids, weights,
                      weighted_sum_loss, example_weight_sum):
   with ops.name_scope(None, 'metrics',
                       (labels, logits, logistic, class_ids, weights,
                        weighted_sum_loss, example_weight_sum)):
     keys = metric_keys.MetricKeys
     labels_mean = _indicator_labels_mean(
         labels=labels, weights=weights, name=keys.LABEL_MEAN)
     metric_ops = {
         # Estimator already adds a metric for loss.
         _summary_key(self._name, keys.LOSS_MEAN):
             metrics_lib.mean(
                 # Both values and weights here are reduced, scalar Tensors.
                 # values is the actual mean we want -- weights represents the
                 # total weight of the batch and is needed to calculate
                 # update_op over many batches.
                 values=(weighted_sum_loss / example_weight_sum),
                 weights=example_weight_sum,
                 name=keys.LOSS_MEAN),
         _summary_key(self._name, keys.ACCURACY):
             metrics_lib.accuracy(
                 labels=labels,
                 predictions=class_ids,
                 weights=weights,
                 name=keys.ACCURACY),
         _summary_key(self._name, keys.PREDICTION_MEAN):
             _predictions_mean(
                 predictions=logistic,
                 weights=weights,
                 name=keys.PREDICTION_MEAN),
         _summary_key(self._name, keys.LABEL_MEAN):
             labels_mean,
         _summary_key(self._name, keys.ACCURACY_BASELINE):
             _accuracy_baseline(labels_mean),
         _summary_key(self._name, keys.AUC):
             _auc(
                 labels=labels,
                 predictions=logistic,
                 weights=weights,
                 name=keys.AUC),
         _summary_key(self._name, keys.AUC_PR):
             _auc(
                 labels=labels,
                 predictions=logistic,
                 weights=weights,
                 curve='PR',
                 name=keys.AUC_PR)
     }
     for threshold in self._thresholds:
       accuracy_key = keys.ACCURACY_AT_THRESHOLD % threshold
       metric_ops[_summary_key(self._name,
                               accuracy_key)] = _accuracy_at_threshold(
                                   labels=labels,
                                   predictions=logistic,
                                   weights=weights,
                                   threshold=threshold,
                                   name=accuracy_key)
       # Precision for positive examples.
       precision_key = keys.PRECISION_AT_THRESHOLD % threshold
       metric_ops[_summary_key(self._name,
                               precision_key)] = _precision_at_threshold(
                                   labels=labels,
                                   predictions=logistic,
                                   weights=weights,
                                   threshold=threshold,
                                   name=precision_key)
       # Recall for positive examples.
       recall_key = keys.RECALL_AT_THRESHOLD % threshold
       metric_ops[_summary_key(self._name,
                               recall_key)] = _recall_at_threshold(
                                   labels=labels,
                                   predictions=logistic,
                                   weights=weights,
                                   threshold=threshold,
                                   name=recall_key)
     return metric_ops
Пример #40
0
  def create_estimator_spec(
      self, features, mode, logits, labels=None, train_op_fn=None):
    """Returns an `EstimatorSpec`.

    Please note that,
    + All args must be passed via name.

    Args:
      features: Input `dict` of `Tensor` or `SparseTensor` objects.
      mode: Estimator's `ModeKeys`.
      logits: logits `Tensor` with shape `[D0, D1, ... DN, logits_dimension]`.
        For many applications, the shape is `[batch_size, logits_dimension]`.
      labels: Labels `Tensor` with shape matching `logits`, namely
        `[D0, D1, ... DN, logits_dimension]`. When `logits_dimension=1`, shape
        `[D0, D1, ... DN]` is also supported. `labels` is required argument when
        `mode` equals `TRAIN` or `EVAL`.
      train_op_fn: Function that takes a scalar loss `Tensor` and returns
        `train_op`. Required in TRAIN mode.
    Returns:
      `EstimatorSpec`.
    Raises:
      ValueError: If `train_op_fn` is `None` in TRAIN mode.
    """
    # Predict.
    with ops.name_scope(self._name, 'head'):
      logits = _check_logits_final_dim(logits, self._logits_dimension)
      predictions = {prediction_keys.PredictionKeys.PREDICTIONS: logits}
      if mode == model_fn.ModeKeys.PREDICT:
        regression_output = export_output.RegressionOutput(value=logits)
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.PREDICT,
            predictions=predictions,
            export_outputs={
                _DEFAULT_SERVING_KEY: regression_output,
                _REGRESS_SERVING_KEY: regression_output,
                _PREDICT_SERVING_KEY: export_output.PredictOutput(predictions)
            })

      weighted_sum_loss, example_weight_sum, _ = self.create_loss(
          features=features, mode=mode, logits=logits, labels=labels)

      # Eval.
      if mode == model_fn.ModeKeys.EVAL:
        # Estimator already adds a metric for loss.
        eval_metric_ops = {
            metric_keys.MetricKeys.LOSS_MEAN:
                metrics_lib.mean(
                    # Both values and weights here are reduced, scalar Tensors.
                    # values is the actual mean we want -- weights represents
                    # the total weight of the batch and is needed to calculate
                    # update_op over many batches.
                    values=(weighted_sum_loss / example_weight_sum),
                    weights=example_weight_sum)
        }
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.EVAL,
            predictions=predictions,
            loss=weighted_sum_loss,
            eval_metric_ops=eval_metric_ops)

      # Train.
      if train_op_fn is None:
        raise ValueError('train_op_fn can not be None.')
    with ops.name_scope(''):
      summary.scalar(
          _summary_key(self._name, metric_keys.MetricKeys.LOSS),
          weighted_sum_loss)
      summary.scalar(
          _summary_key(self._name, metric_keys.MetricKeys.LOSS_MEAN),
          weighted_sum_loss / example_weight_sum)
    return model_fn.EstimatorSpec(
        mode=model_fn.ModeKeys.TRAIN,
        predictions=predictions,
        loss=weighted_sum_loss,
        train_op=train_op_fn(weighted_sum_loss))
Пример #41
0
def _indicator_labels_mean(labels, weights=None, name=None):
    with ops.name_scope(name, 'labels_mean', (labels, weights)) as scope:
        labels = math_ops.to_float(labels, name='labels')
        if weights is not None:
            weights = weights_broadcast_ops.broadcast_weights(weights, labels)
        return metrics_lib.mean(labels, weights=weights, name=scope)
Пример #42
0
 def metric_fn_1():
     return {'two': metrics_lib.mean(constant_op.constant([2.]))}
Пример #43
0
def _class_log_loss(probabilities, targets, weights=None):
    return metrics.mean(losses.log_loss(
        probabilities,
        _squeeze_and_onehot(targets,
                            array_ops.shape(probabilities)[1])),
                        weights=weights)
Пример #44
0
def _indicator_labels_mean(labels, weights=None, name=None):
  with ops.name_scope(name, 'labels_mean', (labels, weights)) as scope:
    labels = math_ops.to_float(labels, name='labels')
    if weights is not None:
      weights = weights_broadcast_ops.broadcast_weights(weights, labels)
    return metrics_lib.mean(labels, weights=weights, name=scope)
Пример #45
0
 def metric_fn(features):
   return {'mean_x': metrics_lib.mean(features['x'])}
Пример #46
0
 def metric_fn(features):
   return {'mean_x': metrics_lib.mean(features['x'])}
Пример #47
0
def _sigmoid_entropy(probabilities, targets, weights=None):
    return metrics.mean(losses.sigmoid_cross_entropy(
        probabilities,
        _squeeze_and_onehot(targets,
                            array_ops.shape(probabilities)[1])),
                        weights=weights)
Пример #48
0
def _softmax_entropy(probabilities, targets, weights=None):
    return metrics.mean(losses.sparse_softmax_cross_entropy(
        probabilities, math_ops.to_int32(targets)),
                        weights=weights)
Пример #49
0
 def _eval_metric_ops(
     self, labels, probabilities, weights, unreduced_loss,
     regularization_loss):
   """Returns a dict of metrics for eval_metric_ops."""
   with ops.name_scope(
       None, 'metrics',
       [labels, probabilities, weights, unreduced_loss, regularization_loss]):
     keys = metric_keys.MetricKeys
     metric_ops = {
         # Estimator already adds a metric for loss.
         head_lib._summary_key(self._name, keys.LOSS_MEAN):  # pylint:disable=protected-access
             metrics_lib.mean(
                 values=unreduced_loss,
                 weights=weights,
                 name=keys.LOSS_MEAN),
         head_lib._summary_key(self._name, keys.AUC):  # pylint:disable=protected-access
             metrics_lib.auc(labels=labels, predictions=probabilities,
                             weights=weights, name=keys.AUC),
         head_lib._summary_key(self._name, keys.AUC_PR):  # pylint:disable=protected-access
             metrics_lib.auc(labels=labels, predictions=probabilities,
                             weights=weights, curve='PR',
                             name=keys.AUC_PR),
     }
     if regularization_loss is not None:
       loss_regularization_key = head_lib._summary_key(  # pylint:disable=protected-access
           self._name, keys.LOSS_REGULARIZATION)
       metric_ops[loss_regularization_key] = (
           metrics_lib.mean(
               values=regularization_loss,
               name=keys.LOSS_REGULARIZATION))
     for threshold in self._thresholds:
       accuracy_key = keys.ACCURACY_AT_THRESHOLD % threshold
       metric_ops[head_lib._summary_key(self._name, accuracy_key)] = (  # pylint:disable=protected-access
           head_lib._accuracy_at_threshold(  # pylint:disable=protected-access
               labels=labels,
               predictions=probabilities,
               weights=weights,
               threshold=threshold,
               name=accuracy_key))
       # Precision for positive examples.
       precision_key = keys.PRECISION_AT_THRESHOLD % threshold
       metric_ops[head_lib._summary_key(self._name, precision_key)] = (  # pylint:disable=protected-access
           head_lib._precision_at_threshold(  # pylint:disable=protected-access
               labels=labels,
               predictions=probabilities,
               weights=weights,
               threshold=threshold,
               name=precision_key))
       # Recall for positive examples.
       recall_key = keys.RECALL_AT_THRESHOLD % threshold
       metric_ops[head_lib._summary_key(self._name, recall_key)] = (  # pylint:disable=protected-access
           head_lib._recall_at_threshold(  # pylint:disable=protected-access
               labels=labels,
               predictions=probabilities,
               weights=weights,
               threshold=threshold,
               name=recall_key))
     for class_id in self._classes_for_class_based_metrics:
       batch_rank = array_ops.rank(probabilities) - 1
       begin = array_ops.concat(
           [array_ops.zeros([batch_rank], dtype=dtypes.int32), [class_id]],
           axis=0)
       size = array_ops.concat(
           [-1 * array_ops.ones([batch_rank], dtype=dtypes.int32), [1]],
           axis=0)
       class_probabilities = array_ops.slice(
           probabilities, begin=begin, size=size)
       class_labels = array_ops.slice(labels, begin=begin, size=size)
       prob_key = keys.PROBABILITY_MEAN_AT_CLASS % class_id
       metric_ops[head_lib._summary_key(self._name, prob_key)] = (  # pylint:disable=protected-access
           head_lib._predictions_mean(  # pylint:disable=protected-access
               predictions=class_probabilities,
               weights=weights,
               name=prob_key))
       auc_key = keys.AUC_AT_CLASS % class_id
       metric_ops[head_lib._summary_key(self._name, auc_key)] = (  # pylint:disable=protected-access
           head_lib._auc(  # pylint:disable=protected-access
               labels=class_labels,
               predictions=class_probabilities,
               weights=weights,
               name=auc_key))
       auc_pr_key = keys.AUC_PR_AT_CLASS % class_id
       metric_ops[head_lib._summary_key(self._name, auc_pr_key)] = (  # pylint:disable=protected-access
           head_lib._auc(  # pylint:disable=protected-access
               labels=class_labels,
               predictions=class_probabilities,
               weights=weights,
               curve='PR',
               name=auc_pr_key))
   return metric_ops
Пример #50
0
    def create_estimator_spec(self,
                              features,
                              mode,
                              logits,
                              labels=None,
                              train_op_fn=None):
        """See `Head`."""
        with variable_scope.variable_scope(
                None,
                default_name='regression_head',
                values=(tuple(six.itervalues(features)) + (labels, logits))):

            # Predict.
            logits = _check_logits(logits, self._logits_dimension)
            predictions = {prediction_keys.PredictionKeys.PREDICTIONS: logits}
            if mode == model_fn.ModeKeys.PREDICT:
                return model_fn.EstimatorSpec(
                    mode=model_fn.ModeKeys.PREDICT,
                    predictions=predictions,
                    export_outputs={
                        '': export_output.RegressionOutput(value=logits)
                    })

            # Eval.
            labels = _check_labels(math_ops.to_float(labels),
                                   self._logits_dimension)
            unweighted_loss = losses.mean_squared_error(
                labels=labels,
                predictions=logits,
                reduction=losses.Reduction.NONE)
            weights = (1. if (self._weight_feature_key is None) else
                       features[self._weight_feature_key])
            weights = math_ops.to_float(weights, name='weights')
            training_loss = losses.compute_weighted_loss(
                unweighted_loss,
                weights=weights,
                reduction=losses.Reduction.SUM)
            if mode == model_fn.ModeKeys.EVAL:
                # Estimator already adds a metric for loss.
                eval_metric_ops = {
                    metric_keys.MetricKeys.LOSS_MEAN:
                    metrics_lib.mean(unweighted_loss, weights=weights)
                }
                return model_fn.EstimatorSpec(mode=model_fn.ModeKeys.EVAL,
                                              predictions=predictions,
                                              loss=training_loss,
                                              eval_metric_ops=eval_metric_ops)

            # Train.
            if train_op_fn is None:
                raise ValueError('train_op_fn can not be None.')
            logging_ops.scalar_summary(metric_keys.MetricKeys.LOSS,
                                       training_loss)
            logging_ops.scalar_summary(
                metric_keys.MetricKeys.LOSS_MEAN,
                losses.compute_weighted_loss(unweighted_loss,
                                             weights=weights,
                                             reduction=losses.Reduction.MEAN))
            return model_fn.EstimatorSpec(mode=model_fn.ModeKeys.TRAIN,
                                          predictions=predictions,
                                          loss=training_loss,
                                          train_op=train_op_fn(training_loss))
Пример #51
0
    def create_estimator_spec(
            self, features, logits, mode, labels=None, train_op_fn=None):
        """See `Head`."""

        # split logits into mu, sigma and alpha
        components = array_ops.reshape(logits, [-1, 3, self._m])
        mus = components[:, 0, :]
        sigmas = components[:, 1, :]
        alphas = components[:, 2, :]
        alphas = nn_ops.softmax(clip_ops.clip_by_value(alphas, 1e-2, 1.))

        # Predict.
        with ops.name_scope('head'):
            #logits = head_lib._check_logits(logits, self._logits_dimension)
            means = math_ops.reduce_sum(alphas*mus, axis=1, keepdims=True)

            uncertainty = math_ops.reduce_sum(
                alphas*sigmas, axis=1, keepdims=True)
            
            predicted_value = array_ops.concat([means, uncertainty], 1)
            predictions = {prediction_keys.PredictionKeys.PREDICTIONS:
                           predicted_value}
            if mode == model_fn.ModeKeys.PREDICT:
                regression_output = export_output.RegressionOutput(
                    value=predicted_value)
                return model_fn.EstimatorSpec(
                    mode=model_fn.ModeKeys.PREDICT,
                    predictions=predictions,
                    export_outputs={
                        head_lib._DEFAULT_SERVING_KEY: regression_output,
                        head_lib._REGRESS_SERVING_KEY: regression_output,
                        head_lib._PREDICT_SERVING_KEY:
                        export_output.PredictOutput(predictions)
                    })
            
            # Eval.
            if mode == model_fn.ModeKeys.EVAL:
                # Estimator already adds a metric for loss.
                mus = math_ops.reduce_sum(alphas*mus, axis=1, keepdims=True)
                #mus = utils.tf_print(mus, "mus:")
                #labels = utils.tf_print(labels, "labels:")
                training_loss, unweighted_loss, _ = self.create_loss2(
                    features=features, mode=mode, logits=mus, labels=labels)
                keys = metric_keys.MetricKeys

                eval_metric_ops = {
                    head_lib._summary_key(self._name, 
                        keys.LOSS_MEAN) : 
                            metrics_lib.mean(
                                unweighted_loss, weights=None)
                }
                return model_fn.EstimatorSpec(
                    mode=model_fn.ModeKeys.EVAL,
                    predictions=predictions,
                    loss=training_loss,
                    eval_metric_ops=eval_metric_ops)

            # Train.
            if train_op_fn is None:
                raise ValueError('train_op_fn can not be None.')

            training_loss, unweighted_loss, _ = self.create_loss(
                features=features, mode=mode, mus=mus,
                sigmas=sigmas, alphas=alphas, labels=labels)

        with ops.name_scope(''):
            summary.scalar(
                head_lib._summary_key(self._name,
                                      metric_keys.MetricKeys.LOSS_MEAN),
                losses.compute_weighted_loss(
                    unweighted_loss,
                    reduction=losses.Reduction.MEAN))
            return model_fn.EstimatorSpec(
                mode=model_fn.ModeKeys.TRAIN,
                predictions=predictions,
                loss=training_loss,
                train_op=train_op_fn(training_loss))