Exemple #1
0
  def create_estimator_spec(
      self, features, mode, logits, labels=None, train_op_fn=None):
    """See `Head`."""
    with ops.name_scope('head'):
      logits = head_lib._check_logits(logits, self.logits_dimension)  # pylint:disable=protected-access

      # Predict.
      pred_keys = prediction_keys.PredictionKeys
      with ops.name_scope(None, 'predictions', (logits,)):
        probabilities = math_ops.sigmoid(logits, name=pred_keys.PROBABILITIES)
        predictions = {
            pred_keys.LOGITS: logits,
            pred_keys.PROBABILITIES: probabilities,
        }
      if mode == model_fn.ModeKeys.PREDICT:
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.PREDICT,
            predictions=predictions,
            export_outputs={
                '': export_output.ClassificationOutput(scores=probabilities)
            })

      # Eval.
      unweighted_loss, processed_labels = self.create_loss(
          features=features, mode=mode, logits=logits, labels=labels)
      # Averages loss over classes.
      per_example_loss = math_ops.reduce_mean(
          unweighted_loss, axis=-1, keep_dims=True)
      weights = head_lib._weights(features, self._weight_column)  # pylint:disable=protected-access
      training_loss = losses.compute_weighted_loss(
          per_example_loss, weights=weights, reduction=losses.Reduction.SUM)
      if mode == model_fn.ModeKeys.EVAL:
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.EVAL,
            predictions=predictions,
            loss=training_loss,
            eval_metric_ops=self._eval_metric_ops(
                labels=processed_labels,
                probabilities=probabilities,
                weights=weights,
                per_example_loss=per_example_loss))

      # Train.
      if train_op_fn is None:
        raise ValueError('train_op_fn can not be None.')
    with ops.name_scope(''):
      summary.scalar(
          head_lib._summary_key(self._name, metric_keys.MetricKeys.LOSS),  # pylint:disable=protected-access
          training_loss)
      summary.scalar(
          head_lib._summary_key(  # pylint:disable=protected-access
              self._name, metric_keys.MetricKeys.LOSS_MEAN),
          losses.compute_weighted_loss(
              unweighted_loss, weights=weights,
              reduction=losses.Reduction.MEAN))
    return model_fn.EstimatorSpec(
        mode=model_fn.ModeKeys.TRAIN,
        predictions=predictions,
        loss=training_loss,
        train_op=train_op_fn(training_loss))
Exemple #2
0
  def _merge_eval(self, all_estimator_spec):
    """Merges list of `EstimatorSpec` for eval.

    Args:
      all_estimator_spec: list of `EstimatorSpec` for the individual heads.

    Returns:
      `EstimatorSpec` that merges all heads for EVAL.
    """
    predictions = {}
    metrics = {}
    losses = []
    with ops.name_scope('merge_eval'):
      for head, spec in zip(self._heads, all_estimator_spec):
        losses.append(spec.loss)
        head_name = head.name
        # Loss metric is not added by default.
        loss_name = head_lib._summary_key(  # pylint:disable=protected-access
            head_name, metric_keys.MetricKeys.LOSS)
        metrics[loss_name] = metrics_lib.mean(spec.loss, name=loss_name)
        # Metric keys already contain head.name.
        metrics.update(spec.eval_metric_ops or {})
        for k, v in six.iteritems(spec.predictions):
          predictions[(head_name, k)] = v
      loss = _merge_losses(losses, self._head_weights)

    return model_fn.EstimatorSpec(
        mode=model_fn.ModeKeys.EVAL,
        predictions=predictions,
        loss=loss,
        eval_metric_ops=metrics)
Exemple #3
0
 def create_loss(self, features, mode, logits=None, labels=None):
   """See `_Head`."""
   model_outputs = self.state_manager.define_loss(
       self.model, features, mode)
   summary.scalar(
       head_lib._summary_key(self._name, metric_keys.MetricKeys.LOSS),
       model_outputs.loss)
   return model_outputs
Exemple #4
0
 def _eval_metric_ops(
     self, labels, probabilities, weights, unreduced_loss,
     regularization_loss):
   """Returns a dict of metrics for eval_metric_ops."""
   with ops.name_scope(
       None, 'metrics',
       [labels, probabilities, weights, unreduced_loss, regularization_loss]):
     keys = metric_keys.MetricKeys
     metric_ops = {
         # Estimator already adds a metric for loss.
         head_lib._summary_key(self._name, keys.LOSS_MEAN):  # pylint:disable=protected-access
             metrics_lib.mean(
                 values=unreduced_loss,
                 weights=weights,
                 name=keys.LOSS_MEAN),
         head_lib._summary_key(self._name, keys.AUC):  # pylint:disable=protected-access
             metrics_lib.auc(labels=labels, predictions=probabilities,
                             weights=weights, name=keys.AUC),
         head_lib._summary_key(self._name, keys.AUC_PR):  # pylint:disable=protected-access
             metrics_lib.auc(labels=labels, predictions=probabilities,
                             weights=weights, curve='PR',
                             name=keys.AUC_PR),
     }
     if regularization_loss is not None:
       loss_regularization_key = head_lib._summary_key(  # pylint:disable=protected-access
           self._name, keys.LOSS_REGULARIZATION)
       metric_ops[loss_regularization_key] = (
           metrics_lib.mean(
               values=regularization_loss,
               name=keys.LOSS_REGULARIZATION))
     for threshold in self._thresholds:
       accuracy_key = keys.ACCURACY_AT_THRESHOLD % threshold
       metric_ops[head_lib._summary_key(self._name, accuracy_key)] = (  # pylint:disable=protected-access
           head_lib._accuracy_at_threshold(  # pylint:disable=protected-access
               labels=labels,
               predictions=probabilities,
               weights=weights,
               threshold=threshold,
               name=accuracy_key))
       # Precision for positive examples.
       precision_key = keys.PRECISION_AT_THRESHOLD % threshold
       metric_ops[head_lib._summary_key(self._name, precision_key)] = (  # pylint:disable=protected-access
           head_lib._precision_at_threshold(  # pylint:disable=protected-access
               labels=labels,
               predictions=probabilities,
               weights=weights,
               threshold=threshold,
               name=precision_key))
       # Recall for positive examples.
       recall_key = keys.RECALL_AT_THRESHOLD % threshold
       metric_ops[head_lib._summary_key(self._name, recall_key)] = (  # pylint:disable=protected-access
           head_lib._recall_at_threshold(  # pylint:disable=protected-access
               labels=labels,
               predictions=probabilities,
               weights=weights,
               threshold=threshold,
               name=recall_key))
   return metric_ops
Exemple #5
0
 def _eval_metric_ops(self, labels, probabilities, weights, weighted_sum_loss,
                      example_weight_sum):
   """Returns a dict of metrics for eval_metric_ops."""
   with ops.name_scope(
       None, 'metrics',
       [labels, probabilities, weights, weighted_sum_loss, example_weight_sum
       ]):
     keys = metric_keys.MetricKeys
     metric_ops = {
         # Estimator already adds a metric for loss.
         head_lib._summary_key(self._name, keys.LOSS_MEAN):  # pylint:disable=protected-access
             metrics_lib.mean(
                 # Both values and weights here are reduced, scalar Tensors.
                 # values is the actual mean we want, but we pass the scalar
                 # example_weight_sum in order to return the correct update_op
                 # alongside the value_op for streaming metrics.
                 values=(weighted_sum_loss / example_weight_sum),
                 weights=example_weight_sum,
                 name=keys.LOSS_MEAN),
         head_lib._summary_key(self._name, keys.AUC):  # pylint:disable=protected-access
             metrics_lib.auc(labels=labels, predictions=probabilities,
                             weights=weights, name=keys.AUC),
         head_lib._summary_key(self._name, keys.AUC_PR):  # pylint:disable=protected-access
             metrics_lib.auc(labels=labels, predictions=probabilities,
                             weights=weights, curve='PR',
                             name=keys.AUC_PR),
     }
     for threshold in self._thresholds:
       accuracy_key = keys.ACCURACY_AT_THRESHOLD % threshold
       metric_ops[head_lib._summary_key(self._name, accuracy_key)] = (  # pylint:disable=protected-access
           head_lib._accuracy_at_threshold(  # pylint:disable=protected-access
               labels=labels,
               predictions=probabilities,
               weights=weights,
               threshold=threshold,
               name=accuracy_key))
       # Precision for positive examples.
       precision_key = keys.PRECISION_AT_THRESHOLD % threshold
       metric_ops[head_lib._summary_key(self._name, precision_key)] = (  # pylint:disable=protected-access
           head_lib._precision_at_threshold(  # pylint:disable=protected-access
               labels=labels,
               predictions=probabilities,
               weights=weights,
               threshold=threshold,
               name=precision_key))
       # Recall for positive examples.
       recall_key = keys.RECALL_AT_THRESHOLD % threshold
       metric_ops[head_lib._summary_key(self._name, recall_key)] = (  # pylint:disable=protected-access
           head_lib._recall_at_threshold(  # pylint:disable=protected-access
               labels=labels,
               predictions=probabilities,
               weights=weights,
               threshold=threshold,
               name=recall_key))
   return metric_ops
Exemple #6
0
 def _eval_metric_ops(self, labels, probabilities, weights,
                      unweighted_loss):
     """Returns a dict of metrics for eval_metric_ops."""
     with ops.name_scope(None, 'metrics',
                         [labels, probabilities, weights, unweighted_loss]):
         keys = metric_keys.MetricKeys
         metric_ops = {
             # Estimator already adds a metric for loss.
             head_lib._summary_key(self._name, keys.LOSS_MEAN):  # pylint:disable=protected-access
                 metrics_lib.mean(
                     unweighted_loss, weights=weights, name=keys.LOSS_MEAN),
             head_lib._summary_key(self._name, keys.AUC):  # pylint:disable=protected-access
                 metrics_lib.auc(
                     labels=labels, predictions=probabilities, weights=weights,
                     name=keys.AUC),
             head_lib._summary_key(self._name, keys.AUC_PR):  # pylint:disable=protected-access
                 metrics_lib.auc(
                     labels=labels, predictions=probabilities, weights=weights,
                     curve='PR', name=keys.AUC_PR),
         }
         for threshold in self._thresholds:
             accuracy_key = keys.ACCURACY_AT_THRESHOLD % threshold
             metric_ops[head_lib._summary_key(self._name, accuracy_key)] = (  # pylint:disable=protected-access
                 head_lib._accuracy_at_threshold(  # pylint:disable=protected-access
                     labels=labels,
                     predictions=probabilities,
                     weights=weights,
                     threshold=threshold,
                     name=accuracy_key))
             # Precision for positive examples.
             precision_key = keys.PRECISION_AT_THRESHOLD % threshold
             metric_ops[head_lib._summary_key(
                 self._name, precision_key)] = (  # pylint:disable=protected-access
                     head_lib._precision_at_threshold(  # pylint:disable=protected-access
                         labels=labels,
                         predictions=probabilities,
                         weights=weights,
                         threshold=threshold,
                         name=precision_key))
             # Recall for positive examples.
             recall_key = keys.RECALL_AT_THRESHOLD % threshold
             metric_ops[head_lib._summary_key(self._name, recall_key)] = (  # pylint:disable=protected-access
                 head_lib._recall_at_threshold(  # pylint:disable=protected-access
                     labels=labels,
                     predictions=probabilities,
                     weights=weights,
                     threshold=threshold,
                     name=recall_key))
     return metric_ops
Exemple #7
0
    def _merge_eval(self, all_estimator_spec, use_tpu=False):
        """Merges list of `EstimatorSpec` for eval.

    Args:
      all_estimator_spec: list of `EstimatorSpec` for the individual heads.
      use_tpu: If `True`, will raise `NotImplementedError`, because TPU is not
        yet supported for eval.

    Returns:
      `EstimatorSpec` that merges all heads for EVAL.
    Raises:
      NotImplementedError: If `use_tpu` is `True`.
    """
        if use_tpu:
            raise NotImplementedError(
                'TPU evaluation is not implemented for multi_head.')
        predictions = {}
        metrics = {}
        losses = []
        with ops.name_scope('merge_eval'):
            for head, spec in zip(self._heads, all_estimator_spec):
                losses.append(spec.loss)
                head_name = head.name
                # Loss metric is not added by default.
                loss_name = head_lib._summary_key(  # pylint:disable=protected-access
                    head_name, metric_keys.MetricKeys.LOSS)
                metrics[loss_name] = metrics_lib.mean(spec.loss,
                                                      name=loss_name)
                # Metric keys already contain head.name.
                metrics.update(spec.eval_metric_ops or {})
                for k, v in six.iteritems(spec.predictions):
                    predictions[(head_name, k)] = v
            loss = _merge_losses(losses, self._head_weights)

        return model_fn.EstimatorSpec(mode=model_fn.ModeKeys.EVAL,
                                      predictions=predictions,
                                      loss=loss,
                                      eval_metric_ops=metrics)
Exemple #8
0
  def create_estimator_spec(
      self, features, mode, logits, labels=None, train_op_fn=None):
    """See `Head`."""
    with ops.name_scope(self._name, 'head'):
      logits = head_lib._check_logits_final_dim(logits, self.logits_dimension)  # pylint:disable=protected-access

      # Predict.
      pred_keys = prediction_keys.PredictionKeys
      with ops.name_scope(None, 'predictions', (logits,)):
        probabilities = math_ops.sigmoid(logits, name=pred_keys.PROBABILITIES)
        predictions = {
            pred_keys.LOGITS: logits,
            pred_keys.PROBABILITIES: probabilities,
        }
      if mode == model_fn.ModeKeys.PREDICT:
        classifier_output = head_lib._classification_output(  # pylint:disable=protected-access
            scores=probabilities, n_classes=self._n_classes,
            label_vocabulary=self._label_vocabulary)
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.PREDICT,
            predictions=predictions,
            export_outputs={
                _DEFAULT_SERVING_KEY: classifier_output,
                head_lib._CLASSIFY_SERVING_KEY: classifier_output,  # pylint:disable=protected-access
                head_lib._PREDICT_SERVING_KEY: (  # pylint:disable=protected-access
                    export_output.PredictOutput(predictions))
            })

      (weighted_sum_loss, example_weight_sum,
       processed_labels) = self.create_loss(
           features=features, mode=mode, logits=logits, labels=labels)

      # Eval.
      if mode == model_fn.ModeKeys.EVAL:
        weights = head_lib._get_weights_and_check_match_logits(  # pylint:disable=protected-access,
            features=features, weight_column=self._weight_column, logits=logits)
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.EVAL,
            predictions=predictions,
            loss=weighted_sum_loss,
            eval_metric_ops=self._eval_metric_ops(
                labels=processed_labels,
                probabilities=probabilities,
                weights=weights,
                weighted_sum_loss=weighted_sum_loss,
                example_weight_sum=example_weight_sum))

      # Train.
      if train_op_fn is None:
        raise ValueError('train_op_fn can not be None.')
    with ops.name_scope(''):
      summary.scalar(
          head_lib._summary_key(self._name, metric_keys.MetricKeys.LOSS),  # pylint:disable=protected-access
          weighted_sum_loss)
      summary.scalar(
          head_lib._summary_key(  # pylint:disable=protected-access
              self._name, metric_keys.MetricKeys.LOSS_MEAN),
          weighted_sum_loss / example_weight_sum)
    return model_fn.EstimatorSpec(
        mode=model_fn.ModeKeys.TRAIN,
        predictions=predictions,
        loss=weighted_sum_loss,
        train_op=train_op_fn(weighted_sum_loss))
Exemple #9
0
    def create_estimator_spec(self,
                              features,
                              mode,
                              logits,
                              labels=None,
                              optimizer=None,
                              train_op_fn=None,
                              regularization_losses=None):
        """Returns an `EstimatorSpec`.

        Args:
          features: Input `dict` of `Tensor` or `SparseTensor` objects.
          mode: Estimator's `ModeKeys`.
          logits: a tensor array with 2 element, one for start positon probilities
          , and one for the end postion probilities.
          the shape is `[batch_size, logits_dimension]`.
          labels: a tensor with demention [batch_size, 2], 2 position for true position in a doc
          optimizer: `Optimizer` instance to optimize the loss in TRAIN mode.
            Namely, sets `train_op = optimizer.minimize(loss, global_step)`, which
            updates variables and increments `global_step`.
          train_op_fn: Function that takes a scalar loss `Tensor` and returns
            `train_op`. Used if `optimizer` is `None`.
          regularization_losses: A list of additional scalar losses to be added to
            the training loss, such as regularization losses. These losses are
            usually expressed as a batch average, so for best results users need to
            set `loss_reduction=SUM_OVER_BATCH_SIZE` or
            `loss_reduction=SUM_OVER_NONZERO_WEIGHTS` when creating the head to
            avoid scaling errors.
        Returns:
          `EstimatorSpec`.
        Raises:
          ValueError: If both `train_op_fn` and `optimizer` are `None` in TRAIN
            mode, or if both are set.
        """
        with tf.name_scope(self._name, 'head'):
            # Predict.
            pred_keys = prediction_keys.PredictionKeys
            predictions = {}

            if mode == tf.estimator.ModeKeys.PREDICT:
                output = export_output.PredictOutput(predictions)
                return tf.estimator.EstimatorSpec(
                    mode=tf.estimator.ModeKeys.PREDICT,
                    predictions=predictions,
                    export_outputs={
                        _DEFAULT_SERVING_KEY: output,
                        _PREDICT_SERVING_KEY: output
                    })

            training_loss, unreduced_loss, weights, label_ids = self.create_loss(
                features=features, mode=mode, logits=logits, labels=labels)

            if regularization_losses:
                regularization_loss = tf.add_n(regularization_losses)
                regularized_training_loss = tf.add_n(
                    [training_loss, regularization_loss])
            else:
                regularization_loss = None
                regularized_training_loss = training_loss

            # Eval.
            if mode == tf.estimator.ModeKeys.EVAL:
                return tf.estimator.EstimatorSpec(
                    mode=tf.estimator.ModeKeys.EVAL,
                    predictions=predictions,
                    loss=regularized_training_loss,
                    eval_metric_ops=self._eval_metric_ops(
                        labels=label_ids,
                        predict=
                        None,  #tf.sign(tf.abs(tf.reshape(only_max,[d1,d2*d3*d3]))),
                        location=None,  # loc,
                        pro=max,
                        unreduced_loss=unreduced_loss,
                        regularization_loss=regularization_loss))

            # Train.
            if optimizer is not None:
                if train_op_fn is not None:
                    raise ValueError(
                        'train_op_fn and optimizer cannot both be set.')
                train_op = optimizer.minimize(
                    regularized_training_loss,
                    global_step=tf.train.get_global_step())
            elif train_op_fn is not None:
                train_op = train_op_fn(regularized_training_loss)
            else:
                raise ValueError(
                    'train_op_fn and optimizer cannot both be None.')
        with tf.name_scope(''):
            keys = metric_keys.MetricKeys
            tf.summary.scalar(_summary_key(self._name, keys.LOSS),
                              regularized_training_loss)

            if regularization_loss is not None:
                tf.summary.scalar(
                    _summary_key(self._name, keys.LOSS_REGULARIZATION),
                    regularization_loss)
        return tf.estimator.EstimatorSpec(mode=tf.estimator.ModeKeys.TRAIN,
                                          predictions=predictions,
                                          loss=regularized_training_loss,
                                          train_op=train_op)
    def create_estimator_spec(self,
                              features,
                              mode,
                              logits,
                              labels=None,
                              train_op_fn=None,
                              regularization_losses=None):
        """Returns an `EstimatorSpec`.

    Args:
      features: Input `dict` of `Tensor` or `SparseTensor` objects.
      mode: Estimator's `ModeKeys`.
      logits: logits `Tensor` with shape `[D0, D1, ... DN, n_classes]`.
        For many applications, the shape is `[batch_size, n_classes]`.
      labels: Labels with shape matching `logits`. Can be multi-hot `Tensor`
        with shape `[D0, D1, ... DN, n_classes]` or `SparseTensor` with
        `dense_shape` `[D0, D1, ... DN, ?]`. `labels` is required argument when
        `mode` equals `TRAIN` or `EVAL`.
      train_op_fn: Function that takes a scalar loss `Tensor` and returns
        `train_op`. Required in TRAIN mode.
      regularization_losses: A list of additional scalar losses to be added to
        the training loss, such as regularization losses. These losses are
        usually expressed as a batch average, so for best results users need to
        set `loss_reduction=SUM_OVER_BATCH_SIZE` or
        `loss_reduction=SUM_OVER_NONZERO_WEIGHTS` when creating the head to
        avoid scaling errors.
    Returns:
      `EstimatorSpec`.
    Raises:
      ValueError: If `train_op_fn` is `None` in TRAIN mode.
    """
        with ops.name_scope(self._name, 'head'):
            logits = head_lib._check_logits_final_dim(logits,
                                                      self.logits_dimension)  # pylint:disable=protected-access

            # Predict.
            pred_keys = prediction_keys.PredictionKeys
            with ops.name_scope(None, 'predictions', (logits, )):
                probabilities = math_ops.sigmoid(logits,
                                                 name=pred_keys.PROBABILITIES)
                predictions = {
                    pred_keys.LOGITS: logits,
                    pred_keys.PROBABILITIES: probabilities,
                }
            if mode == model_fn.ModeKeys.PREDICT:
                classifier_output = head_lib._classification_output(  # pylint:disable=protected-access
                    scores=probabilities,
                    n_classes=self._n_classes,
                    label_vocabulary=self._label_vocabulary)
                return model_fn.EstimatorSpec(
                    mode=model_fn.ModeKeys.PREDICT,
                    predictions=predictions,
                    export_outputs={
                        _DEFAULT_SERVING_KEY:
                        classifier_output,
                        head_lib._CLASSIFY_SERVING_KEY:
                        classifier_output,  # pylint:disable=protected-access
                        head_lib._PREDICT_SERVING_KEY: (  # pylint:disable=protected-access
                            export_output.PredictOutput(predictions))
                    })

            (training_loss, unreduced_loss, weights,
             processed_labels) = self.create_loss(features=features,
                                                  mode=mode,
                                                  logits=logits,
                                                  labels=labels)
            if regularization_losses:
                regularization_loss = math_ops.add_n(regularization_losses)
                regularized_training_loss = math_ops.add_n(
                    [training_loss, regularization_loss])
            else:
                regularization_loss = None
                regularized_training_loss = training_loss

            # Eval.
            if mode == model_fn.ModeKeys.EVAL:
                return model_fn.EstimatorSpec(
                    mode=model_fn.ModeKeys.EVAL,
                    predictions=predictions,
                    loss=regularized_training_loss,
                    eval_metric_ops=self._eval_metric_ops(
                        labels=processed_labels,
                        probabilities=probabilities,
                        weights=weights,
                        unreduced_loss=unreduced_loss,
                        regularization_loss=regularization_loss))

            # Train.
            if train_op_fn is None:
                raise ValueError('train_op_fn can not be None.')
            # Only summarize mean_loss for SUM reduction to preserve backwards
            # compatibility. Otherwise skip it to avoid unnecessary computation.
            if self._loss_reduction == losses.Reduction.SUM:
                example_weight_sum = math_ops.reduce_sum(
                    weights * array_ops.ones_like(unreduced_loss))
                mean_loss = training_loss / example_weight_sum
            else:
                mean_loss = None
        with ops.name_scope(''):
            keys = metric_keys.MetricKeys
            summary.scalar(
                head_lib._summary_key(self._name, keys.LOSS),  # pylint:disable=protected-access
                regularized_training_loss)
            if mean_loss is not None:
                summary.scalar(
                    head_lib._summary_key(self._name, keys.LOSS_MEAN),  # pylint:disable=protected-access
                    mean_loss)
            if regularization_loss is not None:
                summary.scalar(
                    head_lib._summary_key(self._name,
                                          keys.LOSS_REGULARIZATION),  # pylint:disable=protected-access
                    regularization_loss)
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.TRAIN,
            predictions=predictions,
            loss=regularized_training_loss,
            train_op=train_op_fn(regularized_training_loss))
Exemple #11
0
    def create_estimator_spec(self,
                              features,
                              mode,
                              logits,
                              labels=None,
                              train_op_fn=None):
        """See `Head`."""
        with ops.name_scope(self._name, 'head'):
            logits = head_lib._check_logits_final_dim(logits,
                                                      self.logits_dimension)  # pylint:disable=protected-access

            # Predict.
            pred_keys = prediction_keys.PredictionKeys
            with ops.name_scope(None, 'predictions', (logits, )):
                probabilities = math_ops.sigmoid(logits,
                                                 name=pred_keys.PROBABILITIES)
                predictions = {
                    pred_keys.LOGITS: logits,
                    pred_keys.PROBABILITIES: probabilities,
                }
            if mode == model_fn.ModeKeys.PREDICT:
                classifier_output = head_lib._classification_output(  # pylint:disable=protected-access
                    scores=probabilities,
                    n_classes=self._n_classes,
                    label_vocabulary=self._label_vocabulary)
                return model_fn.EstimatorSpec(
                    mode=model_fn.ModeKeys.PREDICT,
                    predictions=predictions,
                    export_outputs={
                        _DEFAULT_SERVING_KEY:
                        classifier_output,
                        head_lib._CLASSIFY_SERVING_KEY:
                        classifier_output,  # pylint:disable=protected-access
                        head_lib._PREDICT_SERVING_KEY: (  # pylint:disable=protected-access
                            export_output.PredictOutput(predictions))
                    })

            (weighted_sum_loss, example_weight_sum,
             processed_labels) = self.create_loss(features=features,
                                                  mode=mode,
                                                  logits=logits,
                                                  labels=labels)

            # Eval.
            if mode == model_fn.ModeKeys.EVAL:
                weights = head_lib._get_weights_and_check_match_logits(  # pylint:disable=protected-access,
                    features=features,
                    weight_column=self._weight_column,
                    logits=logits)
                return model_fn.EstimatorSpec(
                    mode=model_fn.ModeKeys.EVAL,
                    predictions=predictions,
                    loss=weighted_sum_loss,
                    eval_metric_ops=self._eval_metric_ops(
                        labels=processed_labels,
                        probabilities=probabilities,
                        weights=weights,
                        weighted_sum_loss=weighted_sum_loss,
                        example_weight_sum=example_weight_sum))

            # Train.
            if train_op_fn is None:
                raise ValueError('train_op_fn can not be None.')
        with ops.name_scope(''):
            summary.scalar(
                head_lib._summary_key(self._name, metric_keys.MetricKeys.LOSS),  # pylint:disable=protected-access
                weighted_sum_loss)
            summary.scalar(
                head_lib._summary_key(  # pylint:disable=protected-access
                    self._name, metric_keys.MetricKeys.LOSS_MEAN),
                weighted_sum_loss / example_weight_sum)
        return model_fn.EstimatorSpec(mode=model_fn.ModeKeys.TRAIN,
                                      predictions=predictions,
                                      loss=weighted_sum_loss,
                                      train_op=train_op_fn(weighted_sum_loss))
Exemple #12
0
    def create_estimator_spec(self,
                              features,
                              mode,
                              logits,
                              labels=None,
                              train_op_fn=None):
        """See `Head`."""
        with ops.name_scope(self._name, 'head'):
            logits = head_lib._check_logits(logits, self.logits_dimension)  # pylint:disable=protected-access

            # Predict.
            pred_keys = prediction_keys.PredictionKeys
            with ops.name_scope(None, 'predictions', (logits, )):
                probabilities = math_ops.sigmoid(logits,
                                                 name=pred_keys.PROBABILITIES)
                predictions = {
                    pred_keys.LOGITS: logits,
                    pred_keys.PROBABILITIES: probabilities,
                }
            if mode == model_fn.ModeKeys.PREDICT:
                return model_fn.EstimatorSpec(
                    mode=model_fn.ModeKeys.PREDICT,
                    predictions=predictions,
                    export_outputs={
                        '':
                        export_output.ClassificationOutput(
                            scores=probabilities)
                    })

            # Eval.
            unweighted_loss, processed_labels = self.create_loss(
                features=features, mode=mode, logits=logits, labels=labels)
            # Averages loss over classes.
            per_example_loss = math_ops.reduce_mean(unweighted_loss,
                                                    axis=-1,
                                                    keep_dims=True)
            weights = head_lib._weights(features, self._weight_column)  # pylint:disable=protected-access
            training_loss = losses.compute_weighted_loss(
                per_example_loss,
                weights=weights,
                reduction=losses.Reduction.SUM)
            if mode == model_fn.ModeKeys.EVAL:
                return model_fn.EstimatorSpec(
                    mode=model_fn.ModeKeys.EVAL,
                    predictions=predictions,
                    loss=training_loss,
                    eval_metric_ops=self._eval_metric_ops(
                        labels=processed_labels,
                        probabilities=probabilities,
                        weights=weights,
                        per_example_loss=per_example_loss))

            # Train.
            if train_op_fn is None:
                raise ValueError('train_op_fn can not be None.')
        with ops.name_scope(''):
            summary.scalar(
                head_lib._summary_key(self._name, metric_keys.MetricKeys.LOSS),  # pylint:disable=protected-access
                training_loss)
            summary.scalar(
                head_lib._summary_key(  # pylint:disable=protected-access
                    self._name, metric_keys.MetricKeys.LOSS_MEAN),
                losses.compute_weighted_loss(unweighted_loss,
                                             weights=weights,
                                             reduction=losses.Reduction.MEAN))
        return model_fn.EstimatorSpec(mode=model_fn.ModeKeys.TRAIN,
                                      predictions=predictions,
                                      loss=training_loss,
                                      train_op=train_op_fn(training_loss))
Exemple #13
0
  def _create_tpu_estimator_spec(
      self, features, mode, logits, labels=None, optimizer=None,
      train_op_fn=None, regularization_losses=None):
    """Returns an `model_fn._TPUEstimatorSpec`.

    Args:
      features: Input `dict` of `Tensor` or `SparseTensor` objects.
      mode: Estimator's `ModeKeys`.
      logits: logits `Tensor` with shape `[D0, D1, ... DN, n_classes]`.
        For many applications, the shape is `[batch_size, n_classes]`.
      labels: Labels with shape matching `logits`. Can be multi-hot `Tensor`
        with shape `[D0, D1, ... DN, n_classes]` or `SparseTensor` with
        `dense_shape` `[D0, D1, ... DN, ?]`. `labels` is required argument when
        `mode` equals `TRAIN` or `EVAL`.
      optimizer: `Optimizer` instance to optimize the loss in TRAIN mode.
        Namely, sets `train_op = optimizer.minimize(loss, global_step)`, which
        updates variables and increments `global_step`.
      train_op_fn: Function that takes a scalar loss `Tensor` and returns
        `train_op`. Used if `optimizer` is `None`.
      regularization_losses: A list of additional scalar losses to be added to
        the training loss, such as regularization losses. These losses are
        usually expressed as a batch average, so for best results users need to
        set `loss_reduction=SUM_OVER_BATCH_SIZE` or
        `loss_reduction=SUM_OVER_NONZERO_WEIGHTS` when creating the head to
        avoid scaling errors.
    Returns:
      `model_fn._TPUEstimatorSpec`.
    Raises:
      ValueError: If both `train_op_fn` and `optimizer` are `None` in TRAIN
        mode, or if both are set.
    """
    with ops.name_scope(self._name, 'head'):
      logits = head_lib._check_logits_final_dim(logits, self.logits_dimension)  # pylint:disable=protected-access

      # Predict.
      pred_keys = prediction_keys.PredictionKeys
      with ops.name_scope(None, 'predictions', (logits,)):
        probabilities = math_ops.sigmoid(logits, name=pred_keys.PROBABILITIES)
        predictions = {
            pred_keys.LOGITS: logits,
            pred_keys.PROBABILITIES: probabilities,
        }
      if mode == model_fn.ModeKeys.PREDICT:
        classifier_output = head_lib._classification_output(  # pylint:disable=protected-access
            scores=probabilities, n_classes=self._n_classes,
            label_vocabulary=self._label_vocabulary)
        return model_fn._TPUEstimatorSpec(  # pylint:disable=protected-access
            mode=model_fn.ModeKeys.PREDICT,
            predictions=predictions,
            export_outputs={
                _DEFAULT_SERVING_KEY: classifier_output,
                head_lib._CLASSIFY_SERVING_KEY: classifier_output,  # pylint:disable=protected-access
                head_lib._PREDICT_SERVING_KEY: (  # pylint:disable=protected-access
                    export_output.PredictOutput(predictions))
            })

      (training_loss, unreduced_loss, weights,
       processed_labels) = self.create_loss(
           features=features, mode=mode, logits=logits, labels=labels)
      if regularization_losses:
        regularization_loss = math_ops.add_n(regularization_losses)
        regularized_training_loss = math_ops.add_n(
            [training_loss, regularization_loss])
      else:
        regularization_loss = None
        regularized_training_loss = training_loss

      # Eval.
      if mode == model_fn.ModeKeys.EVAL:
        return model_fn._TPUEstimatorSpec(  # pylint:disable=protected-access
            mode=model_fn.ModeKeys.EVAL,
            predictions=predictions,
            loss=regularized_training_loss,
            eval_metrics=head_lib._create_eval_metrics_tuple(  # pylint:disable=protected-access
                self._eval_metric_ops, {
                    'labels': processed_labels,
                    'probabilities': probabilities,
                    'weights': weights,
                    'unreduced_loss': unreduced_loss,
                    'regularization_loss': regularization_loss,
                }))

      # Train.
      if optimizer is not None:
        if train_op_fn is not None:
          raise ValueError('train_op_fn and optimizer cannot both be set.')
        train_op = optimizer.minimize(
            regularized_training_loss,
            global_step=training_util.get_global_step())
      elif train_op_fn is not None:
        train_op = train_op_fn(regularized_training_loss)
      else:
        raise ValueError('train_op_fn and optimizer cannot both be None.')
      # Only summarize mean_loss for SUM reduction to preserve backwards
      # compatibility. Otherwise skip it to avoid unnecessary computation.
      if self._loss_reduction == losses.Reduction.SUM:
        example_weight_sum = math_ops.reduce_sum(
            weights * array_ops.ones_like(unreduced_loss))
        mean_loss = training_loss / example_weight_sum
      else:
        mean_loss = None
    with ops.name_scope(''):
      keys = metric_keys.MetricKeys
      summary.scalar(
          head_lib._summary_key(self._name, keys.LOSS),  # pylint:disable=protected-access
          regularized_training_loss)
      if mean_loss is not None:
        summary.scalar(
            head_lib._summary_key(self._name, keys.LOSS_MEAN),  # pylint:disable=protected-access
            mean_loss)
      if regularization_loss is not None:
        summary.scalar(
            head_lib._summary_key(self._name, keys.LOSS_REGULARIZATION),  # pylint:disable=protected-access
            regularization_loss)
    return model_fn._TPUEstimatorSpec(  # pylint:disable=protected-access
        mode=model_fn.ModeKeys.TRAIN,
        predictions=predictions,
        loss=regularized_training_loss,
        train_op=train_op)
Exemple #14
0
    def create_estimator_spec(self,
                              features,
                              mode,
                              logits,
                              labels=None,
                              optimizer=None,
                              train_op_fn=None,
                              regularization_losses=None,
                              params=None):
        """Returns an `EstimatorSpec`.

        Args:
          features: Input `dict` of `Tensor` or `SparseTensor` objects.
          mode: Estimator's `ModeKeys`.
          logits: a tensor array with 2 element, one for start positon probilities
          , and one for the end postion probilities.
          the shape is `[batch_size, logits_dimension]`.
          labels: a tensor with demention [batch_size, 2], 2 position for true position in a doc
          optimizer: `Optimizer` instance to optimize the loss in TRAIN mode.
            Namely, sets `train_op = optimizer.minimize(loss, global_step)`, which
            updates variables and increments `global_step`.
          train_op_fn: Function that takes a scalar loss `Tensor` and returns
            `train_op`. Used if `optimizer` is `None`.
          regularization_losses: A list of additional scalar losses to be added to
            the training loss, such as regularization losses. These losses are
            usually expressed as a batch average, so for best results users need to
            set `loss_reduction=SUM_OVER_BATCH_SIZE` or
            `loss_reduction=SUM_OVER_NONZERO_WEIGHTS` when creating the head to
            avoid scaling errors.
        Returns:
          `EstimatorSpec`.
        Raises:
          ValueError: If both `train_op_fn` and `optimizer` are `None` in TRAIN
            mode, or if both are set.
        """
        with tf.name_scope(self._name, 'head'):
            # Predict.
            pred_keys = prediction_keys.PredictionKeys
            with tf.name_scope(None, 'predictions', (logits, )):

                [start_logits, end_logits] = logits
                shape = tf.shape(start_logits)
                d1 = shape[0]
                d2 = shape[1]
                d3 = shape[2]
                start_logits = tf.reshape(
                    tf.nn.softmax(tf.reshape(start_logits, [-1, d2 * d3]), -1),
                    [d1, d2, d3, 1])
                end_logits = tf.reshape(
                    tf.nn.softmax(tf.reshape(end_logits, [-1, d2 * d3]), -1),
                    [d1, d2, 1, d3])
                #[batch_size, sent_number, seq_len,seq_len]
                mul = tf.multiply(start_logits, end_logits)
                x_tensor_band = tf.matrix_band_part(mul, 0, params.ans_limit)
                x_tensor_reshape = tf.reshape(x_tensor_band,
                                              [d1, d2 * d3 * d3])
                #[batch_size]
                max = tf.reduce_max(x_tensor_reshape, -1)
                #[batch_size,1]
                row_index = tf.where(tf.not_equal(max,
                                                  tf.ones_like(max) + 2.1))
                indice = tf.expand_dims(tf.argmax(x_tensor_reshape, -1), -1)
                indice = tf.concat([row_index, indice], axis=-1)
                only_max = tf.sparse_to_dense(
                    indice, tf.shape(x_tensor_reshape, out_type=tf.int64), max)
                only_max = tf.reshape(only_max, [d1, d2, d3, d3])
                loc = tf.where(tf.cast(only_max, tf.bool))
                loc = tf.slice(
                    loc, [0, 1], [-1, -1], name="locations"
                )  #[batch_size,postions=3] positions = (sent_no,start_pos,end_pos)
                predictions = {
                    "s_logits": start_logits,
                    "e_logits": end_logits,
                    pred_keys.PROBABILITIES: max,
                    "locations": loc
                }
            if mode == model_fn.ModeKeys.PREDICT:
                output = export_output.PredictOutput(predictions)
                return model_fn.EstimatorSpec(mode=model_fn.ModeKeys.PREDICT,
                                              predictions=predictions,
                                              export_outputs={
                                                  _DEFAULT_SERVING_KEY: output,
                                                  _PREDICT_SERVING_KEY: output
                                              })

            training_loss, unreduced_loss, weights, label_ids = self.create_loss(
                features=None, mode=mode, logits=logits, labels=labels)

            if regularization_losses:
                regularization_loss = tf.add_n(regularization_losses)
                regularized_training_loss = tf.add_n(
                    [training_loss, regularization_loss])
            else:
                regularization_loss = None
                regularized_training_loss = training_loss
            # Eval.
            if mode == model_fn.ModeKeys.EVAL:
                return model_fn.EstimatorSpec(
                    mode=model_fn.ModeKeys.EVAL,
                    predictions=predictions,
                    loss=regularized_training_loss,
                    eval_metric_ops=self._eval_metric_ops(
                        labels=label_ids,
                        predict=tf.sign(
                            tf.abs(tf.reshape(only_max, [d1, d2 * d3 * d3]))),
                        location=loc,
                        pro=max,
                        unreduced_loss=unreduced_loss,
                        regularization_loss=regularization_loss))

            # Train.
            if optimizer is not None:
                if train_op_fn is not None:
                    raise ValueError(
                        'train_op_fn and optimizer cannot both be set.')
                train_op = optimizer.minimize(
                    regularized_training_loss,
                    global_step=training_util.get_global_step())
            elif train_op_fn is not None:
                train_op = train_op_fn(regularized_training_loss)
            else:
                raise ValueError(
                    'train_op_fn and optimizer cannot both be None.')
        with tf.name_scope(''):
            keys = metric_keys.MetricKeys
            tf.summary.scalar(_summary_key(self._name, keys.LOSS),
                              regularized_training_loss)

            if regularization_loss is not None:
                tf.summary.scalar(
                    _summary_key(self._name, keys.LOSS_REGULARIZATION),
                    regularization_loss)
        return model_fn.EstimatorSpec(mode=model_fn.ModeKeys.TRAIN,
                                      predictions=predictions,
                                      loss=regularized_training_loss,
                                      train_op=train_op)
Exemple #15
0
    def _eval_metric_ops(self, labels, predict, location, pro, unreduced_loss,
                         regularization_loss):
        """Returns the Eval metric ops."""
        with tf.name_scope(
                None, 'metrics',
            (labels, unreduced_loss, predict, location, regularization_loss)):
            keys = metric_keys.MetricKeys
            [label_start, label_end] = labels
            shape = tf.shape(label_start)
            d1 = shape[0]
            d2 = shape[1]
            d3 = shape[2]
            if isinstance(label_start, tf.SparseTensor):
                label_start = tf.sparse_tensor_to_dense(label_start)
                label_end = tf.sparse_tensor_to_dense(label_end)
            label_start = tf.expand_dims(label_start, -1)
            label_end = tf.expand_dims(label_end, -2)

            mul = tf.multiply(label_start, label_end)
            mul = tf.reshape(mul, [d1, d2 * d3 * d3])

            start_pos = tf.argmax(tf.reshape(label_start, [-1, d2 * d3]),
                                  -1,
                                  output_type=tf.int32)
            end_pos = tf.argmax(tf.reshape(label_end, [-1, d2 * d3]),
                                -1,
                                output_type=tf.int32)

            #[sentNo,start,end] = tf.squeeze(tf.split(tf.cast(location,tf.int32),3,axis=-1),-1)
            postions = tf.split(tf.cast(location, tf.int32), 3, axis=-1)
            # squeezed = tf.shape(tf.squeeze(tf.split(tf.cast(location,tf.int32),3,axis=-1),-1))
            # print(squeezed)
            sentNo = tf.squeeze(postions[0], -1)
            start = tf.squeeze(postions[1], -1)
            end = tf.squeeze(postions[2], -1)
            start = start + sentNo * d2
            end = end + sentNo * d2

            min = tf.minimum(end_pos + 1 - start, end + 1 - start_pos)
            min = tf.minimum(min, end + 1 - start)
            min = tf.minimum(min, end_pos + 1 - start_pos)

            true_postive = tf.nn.relu(min)
            predict_scan = tf.nn.relu(end + 1 - start)
            predict_scan = tf.where(tf.cast(predict_scan, tf.bool),
                                    predict_scan,
                                    tf.ones_like(predict_scan) * (-1))
            label_scan = tf.nn.relu(end_pos + 1 - start_pos)
            label_scan = tf.where(tf.cast(label_scan, tf.bool), label_scan,
                                  tf.ones_like(label_scan) * (-1))
            labels = tf.argmax(mul, -1)
            predictions = tf.argmax(predict, -1)
            precision, precision_op = tf.metrics.mean(
                tf.nn.relu(true_postive / predict_scan))
            recall, recall_op = tf.metrics.mean(
                tf.nn.relu(true_postive / label_scan))

            def f1(precision, recall):
                return 2 * precision * recall / (precision + recall)

            f1_value = f1(precision, recall)
            f1_op = f1(precision_op, recall_op)

            metric_ops = {
                # Estimator already adds a metric for loss.
                # TODO(xiejw): Any other metrics?
                _summary_key(self._name, keys.LOSS_MEAN):
                tf.metrics.mean(values=unreduced_loss,
                                weights=None,
                                name=keys.LOSS_MEAN),
                _summary_key(self._name, "EM"):
                tf.metrics.accuracy(labels=labels,
                                    predictions=predictions,
                                    weights=None,
                                    name="EM"),
                _summary_key(self.name, keys.PRECISION):
                (precision, precision_op),
                _summary_key(self.name, keys.RECALL): (recall, recall_op),
                _summary_key(self.name, "f1_score"): (f1_value, f1_op),
                _summary_key(self.name, "raw_pro"):
                tf.metrics.mean(pro)
            }

            if regularization_loss is not None:
                metric_ops[_summary_key(
                    self._name, keys.LOSS_REGULARIZATION)] = (tf.metrics.mean(
                        values=regularization_loss,
                        name=keys.LOSS_REGULARIZATION))
        return metric_ops
Exemple #16
0
 def _eval_metric_ops(self, labels, probabilities, weights, unreduced_loss,
                      regularization_loss):
     """Returns a dict of metrics for eval_metric_ops."""
     with ops.name_scope(None, 'metrics', [
             labels, probabilities, weights, unreduced_loss,
             regularization_loss
     ]):
         keys = metric_keys.MetricKeys
         metric_ops = {
             # Estimator already adds a metric for loss.
             head_lib._summary_key(self._name, keys.LOSS_MEAN):  # pylint:disable=protected-access
                 metrics_lib.mean(
                     values=unreduced_loss,
                     weights=weights,
                     name=keys.LOSS_MEAN),
             head_lib._summary_key(self._name, keys.AUC):  # pylint:disable=protected-access
                 metrics_lib.auc(labels=labels, predictions=probabilities,
                                 weights=weights, name=keys.AUC),
             head_lib._summary_key(self._name, keys.AUC_PR):  # pylint:disable=protected-access
                 metrics_lib.auc(labels=labels, predictions=probabilities,
                                 weights=weights, curve='PR',
                                 name=keys.AUC_PR),
         }
         if regularization_loss is not None:
             loss_regularization_key = head_lib._summary_key(  # pylint:disable=protected-access
                 self._name, keys.LOSS_REGULARIZATION)
             metric_ops[loss_regularization_key] = (metrics_lib.mean(
                 values=regularization_loss, name=keys.LOSS_REGULARIZATION))
         for threshold in self._thresholds:
             accuracy_key = keys.ACCURACY_AT_THRESHOLD % threshold
             metric_ops[head_lib._summary_key(self._name, accuracy_key)] = (  # pylint:disable=protected-access
                 head_lib._accuracy_at_threshold(  # pylint:disable=protected-access
                     labels=labels,
                     predictions=probabilities,
                     weights=weights,
                     threshold=threshold,
                     name=accuracy_key))
             # Precision for positive examples.
             precision_key = keys.PRECISION_AT_THRESHOLD % threshold
             metric_ops[head_lib._summary_key(
                 self._name, precision_key)] = (  # pylint:disable=protected-access
                     head_lib._precision_at_threshold(  # pylint:disable=protected-access
                         labels=labels,
                         predictions=probabilities,
                         weights=weights,
                         threshold=threshold,
                         name=precision_key))
             # Recall for positive examples.
             recall_key = keys.RECALL_AT_THRESHOLD % threshold
             metric_ops[head_lib._summary_key(self._name, recall_key)] = (  # pylint:disable=protected-access
                 head_lib._recall_at_threshold(  # pylint:disable=protected-access
                     labels=labels,
                     predictions=probabilities,
                     weights=weights,
                     threshold=threshold,
                     name=recall_key))
         for class_id in self._classes_for_class_based_metrics:
             batch_rank = array_ops.rank(probabilities) - 1
             begin = array_ops.concat([
                 array_ops.zeros([batch_rank], dtype=dtypes.int32),
                 [class_id]
             ],
                                      axis=0)
             size = array_ops.concat([
                 -1 * array_ops.ones([batch_rank], dtype=dtypes.int32), [1]
             ],
                                     axis=0)
             class_probabilities = array_ops.slice(probabilities,
                                                   begin=begin,
                                                   size=size)
             class_labels = array_ops.slice(labels, begin=begin, size=size)
             prob_key = keys.PROBABILITY_MEAN_AT_CLASS % class_id
             metric_ops[head_lib._summary_key(self._name, prob_key)] = (  # pylint:disable=protected-access
                 head_lib._predictions_mean(  # pylint:disable=protected-access
                     predictions=class_probabilities,
                     weights=weights,
                     name=prob_key))
             auc_key = keys.AUC_AT_CLASS % class_id
             metric_ops[head_lib._summary_key(self._name, auc_key)] = (  # pylint:disable=protected-access
                 head_lib._auc(  # pylint:disable=protected-access
                     labels=class_labels,
                     predictions=class_probabilities,
                     weights=weights,
                     name=auc_key))
             auc_pr_key = keys.AUC_PR_AT_CLASS % class_id
             metric_ops[head_lib._summary_key(self._name, auc_pr_key)] = (  # pylint:disable=protected-access
                 head_lib._auc(  # pylint:disable=protected-access
                     labels=class_labels,
                     predictions=class_probabilities,
                     weights=weights,
                     curve='PR',
                     name=auc_pr_key))
     return metric_ops
Exemple #17
0
    def create_estimator_spec(self,
                              features,
                              mode,
                              logits,
                              labels=None,
                              train_op_fn=None):
        """See `Head`."""
        with ops.name_scope(self._name, 'head'):
            logits = head_lib._check_logits_final_dim(logits,
                                                      self.logits_dimension)  # pylint:disable=protected-access

            # Predict.
            pred_keys = prediction_keys.PredictionKeys
            with ops.name_scope(None, 'predictions', (logits, )):
                probabilities = math_ops.sigmoid(logits,
                                                 name=pred_keys.PROBABILITIES)
                predictions = {
                    pred_keys.LOGITS: logits,
                    pred_keys.PROBABILITIES: probabilities,
                }
            if mode == model_fn.ModeKeys.PREDICT:
                classifier_output = head_lib._classification_output(  # pylint:disable=protected-access
                    scores=probabilities,
                    n_classes=self._n_classes,
                    label_vocabulary=self._label_vocabulary)
                return model_fn.EstimatorSpec(
                    mode=model_fn.ModeKeys.PREDICT,
                    predictions=predictions,
                    export_outputs={
                        _DEFAULT_SERVING_KEY:
                        classifier_output,
                        head_lib._CLASSIFY_SERVING_KEY:
                        classifier_output,  # pylint:disable=protected-access
                        head_lib._PREDICT_SERVING_KEY: (  # pylint:disable=protected-access
                            export_output.PredictOutput(predictions))
                    })

            (training_loss, unreduced_loss, weights,
             processed_labels) = self.create_loss(features=features,
                                                  mode=mode,
                                                  logits=logits,
                                                  labels=labels)

            # Eval.
            if mode == model_fn.ModeKeys.EVAL:
                return model_fn.EstimatorSpec(
                    mode=model_fn.ModeKeys.EVAL,
                    predictions=predictions,
                    loss=training_loss,
                    eval_metric_ops=self._eval_metric_ops(
                        labels=processed_labels,
                        probabilities=probabilities,
                        weights=weights,
                        unreduced_loss=unreduced_loss))

            # Train.
            if train_op_fn is None:
                raise ValueError('train_op_fn can not be None.')
            # Only summarize mean_loss for SUM reduction to preserve backwards
            # compatibility. Otherwise skip it to avoid unnecessary computation.
            if self._loss_reduction == losses.Reduction.SUM:
                example_weight_sum = math_ops.reduce_sum(
                    weights * array_ops.ones_like(unreduced_loss))
                mean_loss = training_loss / example_weight_sum
            else:
                mean_loss = None
        with ops.name_scope(''):
            summary.scalar(
                head_lib._summary_key(self._name, metric_keys.MetricKeys.LOSS),  # pylint:disable=protected-access
                training_loss)
            if mean_loss is not None:
                summary.scalar(
                    head_lib._summary_key(  # pylint:disable=protected-access
                        self._name, metric_keys.MetricKeys.LOSS_MEAN),
                    mean_loss)
        return model_fn.EstimatorSpec(mode=model_fn.ModeKeys.TRAIN,
                                      predictions=predictions,
                                      loss=training_loss,
                                      train_op=train_op_fn(training_loss))
    def create_estimator_spec(
            self, features, logits, mode, labels=None, train_op_fn=None):
        """See `Head`."""

        # split logits into mu, sigma and alpha
        components = array_ops.reshape(logits, [-1, 3, self._m])
        mus = components[:, 0, :]
        sigmas = components[:, 1, :]
        alphas = components[:, 2, :]
        alphas = nn_ops.softmax(clip_ops.clip_by_value(alphas, 1e-2, 1.))

        # Predict.
        with ops.name_scope('head'):
            #logits = head_lib._check_logits(logits, self._logits_dimension)
            means = math_ops.reduce_sum(alphas*mus, axis=1, keepdims=True)

            uncertainty = math_ops.reduce_sum(
                alphas*sigmas, axis=1, keepdims=True)
            
            predicted_value = array_ops.concat([means, uncertainty], 1)
            predictions = {prediction_keys.PredictionKeys.PREDICTIONS:
                           predicted_value}
            if mode == model_fn.ModeKeys.PREDICT:
                regression_output = export_output.RegressionOutput(
                    value=predicted_value)
                return model_fn.EstimatorSpec(
                    mode=model_fn.ModeKeys.PREDICT,
                    predictions=predictions,
                    export_outputs={
                        head_lib._DEFAULT_SERVING_KEY: regression_output,
                        head_lib._REGRESS_SERVING_KEY: regression_output,
                        head_lib._PREDICT_SERVING_KEY:
                        export_output.PredictOutput(predictions)
                    })
            
            # Eval.
            if mode == model_fn.ModeKeys.EVAL:
                # Estimator already adds a metric for loss.
                mus = math_ops.reduce_sum(alphas*mus, axis=1, keepdims=True)
                #mus = utils.tf_print(mus, "mus:")
                #labels = utils.tf_print(labels, "labels:")
                training_loss, unweighted_loss, _ = self.create_loss2(
                    features=features, mode=mode, logits=mus, labels=labels)
                keys = metric_keys.MetricKeys

                eval_metric_ops = {
                    head_lib._summary_key(self._name, 
                        keys.LOSS_MEAN) : 
                            metrics_lib.mean(
                                unweighted_loss, weights=None)
                }
                return model_fn.EstimatorSpec(
                    mode=model_fn.ModeKeys.EVAL,
                    predictions=predictions,
                    loss=training_loss,
                    eval_metric_ops=eval_metric_ops)

            # Train.
            if train_op_fn is None:
                raise ValueError('train_op_fn can not be None.')

            training_loss, unweighted_loss, _ = self.create_loss(
                features=features, mode=mode, mus=mus,
                sigmas=sigmas, alphas=alphas, labels=labels)

        with ops.name_scope(''):
            summary.scalar(
                head_lib._summary_key(self._name,
                                      metric_keys.MetricKeys.LOSS_MEAN),
                losses.compute_weighted_loss(
                    unweighted_loss,
                    reduction=losses.Reduction.MEAN))
            return model_fn.EstimatorSpec(
                mode=model_fn.ModeKeys.TRAIN,
                predictions=predictions,
                loss=training_loss,
                train_op=train_op_fn(training_loss))
Exemple #19
0
 def _eval_metric_ops(
     self, labels, probabilities, weights, unreduced_loss,
     regularization_loss):
   """Returns a dict of metrics for eval_metric_ops."""
   with ops.name_scope(
       None, 'metrics',
       [labels, probabilities, weights, unreduced_loss, regularization_loss]):
     keys = metric_keys.MetricKeys
     metric_ops = {
         # Estimator already adds a metric for loss.
         head_lib._summary_key(self._name, keys.LOSS_MEAN):  # pylint:disable=protected-access
             metrics_lib.mean(
                 values=unreduced_loss,
                 weights=weights,
                 name=keys.LOSS_MEAN),
         head_lib._summary_key(self._name, keys.AUC):  # pylint:disable=protected-access
             metrics_lib.auc(labels=labels, predictions=probabilities,
                             weights=weights, name=keys.AUC),
         head_lib._summary_key(self._name, keys.AUC_PR):  # pylint:disable=protected-access
             metrics_lib.auc(labels=labels, predictions=probabilities,
                             weights=weights, curve='PR',
                             name=keys.AUC_PR),
     }
     if regularization_loss is not None:
       loss_regularization_key = head_lib._summary_key(  # pylint:disable=protected-access
           self._name, keys.LOSS_REGULARIZATION)
       metric_ops[loss_regularization_key] = (
           metrics_lib.mean(
               values=regularization_loss,
               name=keys.LOSS_REGULARIZATION))
     for threshold in self._thresholds:
       accuracy_key = keys.ACCURACY_AT_THRESHOLD % threshold
       metric_ops[head_lib._summary_key(self._name, accuracy_key)] = (  # pylint:disable=protected-access
           head_lib._accuracy_at_threshold(  # pylint:disable=protected-access
               labels=labels,
               predictions=probabilities,
               weights=weights,
               threshold=threshold,
               name=accuracy_key))
       # Precision for positive examples.
       precision_key = keys.PRECISION_AT_THRESHOLD % threshold
       metric_ops[head_lib._summary_key(self._name, precision_key)] = (  # pylint:disable=protected-access
           head_lib._precision_at_threshold(  # pylint:disable=protected-access
               labels=labels,
               predictions=probabilities,
               weights=weights,
               threshold=threshold,
               name=precision_key))
       # Recall for positive examples.
       recall_key = keys.RECALL_AT_THRESHOLD % threshold
       metric_ops[head_lib._summary_key(self._name, recall_key)] = (  # pylint:disable=protected-access
           head_lib._recall_at_threshold(  # pylint:disable=protected-access
               labels=labels,
               predictions=probabilities,
               weights=weights,
               threshold=threshold,
               name=recall_key))
     for class_id in self._classes_for_class_based_metrics:
       batch_rank = array_ops.rank(probabilities) - 1
       begin = array_ops.concat(
           [array_ops.zeros([batch_rank], dtype=dtypes.int32), [class_id]],
           axis=0)
       size = array_ops.concat(
           [-1 * array_ops.ones([batch_rank], dtype=dtypes.int32), [1]],
           axis=0)
       class_probabilities = array_ops.slice(
           probabilities, begin=begin, size=size)
       class_labels = array_ops.slice(labels, begin=begin, size=size)
       prob_key = keys.PROBABILITY_MEAN_AT_CLASS % class_id
       metric_ops[head_lib._summary_key(self._name, prob_key)] = (  # pylint:disable=protected-access
           head_lib._predictions_mean(  # pylint:disable=protected-access
               predictions=class_probabilities,
               weights=weights,
               name=prob_key))
       auc_key = keys.AUC_AT_CLASS % class_id
       metric_ops[head_lib._summary_key(self._name, auc_key)] = (  # pylint:disable=protected-access
           head_lib._auc(  # pylint:disable=protected-access
               labels=class_labels,
               predictions=class_probabilities,
               weights=weights,
               name=auc_key))
       auc_pr_key = keys.AUC_PR_AT_CLASS % class_id
       metric_ops[head_lib._summary_key(self._name, auc_pr_key)] = (  # pylint:disable=protected-access
           head_lib._auc(  # pylint:disable=protected-access
               labels=class_labels,
               predictions=class_probabilities,
               weights=weights,
               curve='PR',
               name=auc_pr_key))
   return metric_ops
Exemple #20
0
  def create_estimator_spec(
      self, features, mode, logits, labels=None, train_op_fn=None):
    """See `Head`."""
    with ops.name_scope(self._name, 'head'):
      logits = head_lib._check_logits_final_dim(logits, self.logits_dimension)  # pylint:disable=protected-access

      # Predict.
      pred_keys = prediction_keys.PredictionKeys
      with ops.name_scope(None, 'predictions', (logits,)):
        probabilities = math_ops.sigmoid(logits, name=pred_keys.PROBABILITIES)
        predictions = {
            pred_keys.LOGITS: logits,
            pred_keys.PROBABILITIES: probabilities,
        }
      if mode == model_fn.ModeKeys.PREDICT:
        classifier_output = head_lib._classification_output(  # pylint:disable=protected-access
            scores=probabilities, n_classes=self._n_classes,
            label_vocabulary=self._label_vocabulary)
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.PREDICT,
            predictions=predictions,
            export_outputs={
                _DEFAULT_SERVING_KEY: classifier_output,
                head_lib._CLASSIFY_SERVING_KEY: classifier_output,  # pylint:disable=protected-access
                head_lib._PREDICT_SERVING_KEY: (  # pylint:disable=protected-access
                    export_output.PredictOutput(predictions))
            })

      (training_loss, unreduced_loss, weights,
       processed_labels) = self.create_loss(
           features=features, mode=mode, logits=logits, labels=labels)

      # Eval.
      if mode == model_fn.ModeKeys.EVAL:
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.EVAL,
            predictions=predictions,
            loss=training_loss,
            eval_metric_ops=self._eval_metric_ops(
                labels=processed_labels,
                probabilities=probabilities,
                weights=weights,
                unreduced_loss=unreduced_loss))

      # Train.
      if train_op_fn is None:
        raise ValueError('train_op_fn can not be None.')
      # Only summarize mean_loss for SUM reduction to preserve backwards
      # compatibility. Otherwise skip it to avoid unnecessary computation.
      if self._loss_reduction == losses.Reduction.SUM:
        example_weight_sum = math_ops.reduce_sum(
            weights * array_ops.ones_like(unreduced_loss))
        mean_loss = training_loss / example_weight_sum
      else:
        mean_loss = None
    with ops.name_scope(''):
      summary.scalar(
          head_lib._summary_key(self._name, metric_keys.MetricKeys.LOSS),  # pylint:disable=protected-access
          training_loss)
      if mean_loss is not None:
        summary.scalar(
            head_lib._summary_key(  # pylint:disable=protected-access
                self._name, metric_keys.MetricKeys.LOSS_MEAN),
            mean_loss)
    return model_fn.EstimatorSpec(
        mode=model_fn.ModeKeys.TRAIN,
        predictions=predictions,
        loss=training_loss,
        train_op=train_op_fn(training_loss))