示例#1
0
  def _log_prob(self, event):
    if self.validate_args:
      event = distribution_util.embed_check_integer_casting_closed(
          event, target_dtype=dtypes.bool)

    # TODO(jaana): The current sigmoid_cross_entropy_with_logits has
    # inconsistent behavior for logits = inf/-inf.
    event = math_ops.cast(event, self.logits.dtype)
    logits = self.logits
    # sigmoid_cross_entropy_with_logits doesn't broadcast shape,
    # so we do this here.

    def _broadcast(logits, event):
      return (array_ops.ones_like(event) * logits,
              array_ops.ones_like(logits) * event)

    # First check static shape.
    if (event.get_shape().is_fully_defined() and
        logits.get_shape().is_fully_defined()):
      if event.get_shape() != logits.get_shape():
        logits, event = _broadcast(logits, event)
    else:
      logits, event = control_flow_ops.cond(
          distribution_util.same_dynamic_shape(logits, event),
          lambda: (logits, event),
          lambda: _broadcast(logits, event))
    return -nn.sigmoid_cross_entropy_with_logits(labels=event, logits=logits)
示例#2
0
def _log_loss_with_two_classes(logits, labels):
  # sigmoid_cross_entropy_with_logits requires [batch_size, 1] labels.
  if len(labels.get_shape()) == 1:
    labels = array_ops.expand_dims(labels, dim=[1])
  loss_vec = nn.sigmoid_cross_entropy_with_logits(logits,
                                                  math_ops.to_float(labels))
  return loss_vec
示例#3
0
  def log_prob(self, event, name="log_prob"):
    """Log of the probability mass function.

    Args:
      event: `int32` or `int64` binary Tensor.
      name: A name for this operation (optional).

    Returns:
      The log-probabilities of the events.
    """
    # TODO(jaana): The current sigmoid_cross_entropy_with_logits has
    # inconsistent  behavior for logits = inf/-inf.
    with ops.name_scope(self.name):
      with ops.name_scope(name, values=[self.logits, event]):
        event = ops.convert_to_tensor(event, name="event")
        event = math_ops.cast(event, self.logits.dtype)
        logits = self.logits
        # sigmoid_cross_entropy_with_logits doesn't broadcast shape,
        # so we do this here.
        # TODO(b/30637701): Check dynamic shape, and don't broadcast if the
        # dynamic shapes are the same.
        if (not event.get_shape().is_fully_defined() or
            not logits.get_shape().is_fully_defined() or
            event.get_shape() != logits.get_shape()):
          logits = array_ops.ones_like(event) * logits
          event = array_ops.ones_like(logits) * event
        return -nn.sigmoid_cross_entropy_with_logits(logits, event)
示例#4
0
def _log_loss_with_two_classes(logits, target):
  # sigmoid_cross_entropy_with_logits requires [batch_size, 1] target.
  if len(target.get_shape()) == 1:
    target = array_ops.expand_dims(target, axis=1)
  loss_vec = nn.sigmoid_cross_entropy_with_logits(
      labels=math_ops.cast(target, dtypes.float32), logits=logits)
  return loss_vec
示例#5
0
def logistic(logit, target, name=None):
  """Calculates the logistic cross-entropy loss, averaged across batches.

  **WARNING:** `logit` must be unscaled, while the `target` should be a
  normalized probability prediction. See
  `tf.nn.sigmoid_cross_entropy_with_logits` for more details.

  Args:
    logit: A `Tensor` of shape `[batch_size, dim_1, ..., dim_n]`
      of predicted logit values.
    target: A `Tensor` of shape `[batch_size, dim_1, ..., dim_n]` of
      target values. The shape of the target tensor should match the
      `logit` tensor.
    name: A name for the operation (optional).

  Returns:
    A scalar `tensor` of the logistic cross-entropy loss, averaged across
    batches.

  Raises:
    ValueError: If `logit` and `target` shapes do not match.
  """
  with ops.op_scope([logit, target], name, "logistic_loss") as scope:
    return _reduce_to_scalar(
        nn.sigmoid_cross_entropy_with_logits(logit, target), name=scope)
示例#6
0
def _log_loss_with_two_classes(logits, target):
  # sigmoid_cross_entropy_with_logits requires [batch_size, 1] target.
  if len(target.get_shape()) == 1:
    target = array_ops.expand_dims(target, dim=[1])
  loss_vec = nn.sigmoid_cross_entropy_with_logits(logits,
                                                  math_ops.to_float(target))
  return loss_vec
示例#7
0
  def unregularized_loss(self, examples):
    """Add operations to compute the loss (without the regularization loss).

        Args:
          examples: Examples to compute unregularized loss on.

        Returns:
          An Operation that computes mean (unregularized) loss for given set of
          examples.
        Raises:
          ValueError: if examples are not well defined.
        """
    self._assertSpecified(
        ['example_labels', 'example_weights', 'sparse_features',
         'dense_features'], examples)
    self._assertList(['sparse_features', 'dense_features'], examples)
    with name_scope('sdca/unregularized_loss'):
      predictions = self._linear_predictions(examples)
      labels = convert_to_tensor(examples['example_labels'])
      weights = convert_to_tensor(examples['example_weights'])

      if self._options['loss_type'] == 'logistic_loss':
        return math_ops.reduce_sum(math_ops.mul(
            sigmoid_cross_entropy_with_logits(
                predictions, labels), weights)) / math_ops.reduce_sum(weights)

      # squared loss
      err = math_ops.sub(labels, predictions)

      weighted_squared_err = math_ops.mul(math_ops.square(err), weights)
      return (math_ops.reduce_sum(weighted_squared_err) /
              math_ops.reduce_sum(weights))
示例#8
0
 def create_loss(self, features, mode, logits, labels):
   """See `Head`."""
   del mode  # Unused for this head.
   logits = ops.convert_to_tensor(logits)
   labels = _check_dense_labels_match_logits_and_reshape(
       labels=labels, logits=logits, expected_labels_dimension=1)
   if self._label_vocabulary is not None:
     labels = lookup_ops.index_table_from_tensor(
         vocabulary_list=tuple(self._label_vocabulary),
         name='class_id_lookup').lookup(labels)
   labels = math_ops.to_float(labels)
   labels = _assert_range(labels, 2)
   unweighted_loss = nn.sigmoid_cross_entropy_with_logits(
       labels=labels, logits=logits)
   weights = _get_weights_and_check_match_logits(
       features=features, weight_column=self._weight_column, logits=logits)
   weighted_sum_loss = losses.compute_weighted_loss(
       unweighted_loss, weights=weights, reduction=losses.Reduction.SUM)
   # _weights() can return 1.
   example_weight_sum = math_ops.reduce_sum(
       weights * array_ops.ones_like(unweighted_loss))
   return LossSpec(
       weighted_sum_loss=weighted_sum_loss,
       example_weight_sum=example_weight_sum,
       processed_labels=labels)
示例#9
0
def sigmoid_cross_entropy(
    multi_class_labels, logits, weights=1.0, label_smoothing=0, scope=None,
    loss_collection=ops.GraphKeys.LOSSES,
    reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
  """Creates a cross-entropy loss using tf.nn.sigmoid_cross_entropy_with_logits.

  `weights` acts as a coefficient for the loss. If a scalar is provided,
  then the loss is simply scaled by the given value. If `weights` is a
  tensor of shape `[batch_size]`, then the loss weights apply to each
  corresponding sample.

  If `label_smoothing` is nonzero, smooth the labels towards 1/2:

      new_multiclass_labels = multiclass_labels * (1 - label_smoothing)
                              + 0.5 * label_smoothing

  Args:
    multi_class_labels: `[batch_size, num_classes]` target integer labels in
      `(0, 1)`.
    logits: Float `[batch_size, num_classes]` logits outputs of the network.
    weights: Optional `Tensor` whose rank is either 0, or the same rank as
      `labels`, and must be broadcastable to `labels` (i.e., all dimensions must
      be either `1`, or the same as the corresponding `losses` dimension).
    label_smoothing: If greater than `0` then smooth the labels.
    scope: The scope for the operations performed in computing the loss.
    loss_collection: collection to which the loss will be added.
    reduction: Type of reduction to apply to loss.

  Returns:
    Weighted loss `Tensor` of the same type as `logits`. If `reduction` is
    `NONE`, this has the same shape as `logits`; otherwise, it is scalar.

  Raises:
    ValueError: If the shape of `logits` doesn't match that of
      `multi_class_labels` or if the shape of `weights` is invalid, or if
      `weights` is None.  Also if `multi_class_labels` or `logits` is None.
  """
  if multi_class_labels is None:
    raise ValueError("multi_class_labels must not be None.")
  if logits is None:
    raise ValueError("logits must not be None.")
  with ops.name_scope(scope, "sigmoid_cross_entropy_loss",
                      (logits, multi_class_labels, weights)) as scope:
    logits = ops.convert_to_tensor(logits)
    logging.info("logits.dtype=%s.", logits.dtype)
    multi_class_labels = math_ops.cast(multi_class_labels, logits.dtype)
    logging.info("multi_class_labels.dtype=%s.", multi_class_labels.dtype)
    logits.get_shape().assert_is_compatible_with(multi_class_labels.get_shape())

    if label_smoothing > 0:
      multi_class_labels = (multi_class_labels * (1 - label_smoothing) +
                            0.5 * label_smoothing)

    losses = nn.sigmoid_cross_entropy_with_logits(labels=multi_class_labels,
                                                  logits=logits,
                                                  name="xentropy")
    logging.info("losses.dtype=%s.", losses.dtype)
    return compute_weighted_loss(
        losses, weights, scope, loss_collection, reduction=reduction)
示例#10
0
  def unregularized_loss(self, examples):
    """Add operations to compute the loss (without the regularization loss).

    Args:
      examples: Examples to compute unregularized loss on.

    Returns:
      An Operation that computes mean (unregularized) loss for given set of
      examples.

    Raises:
      ValueError: if examples are not well defined.
    """
    self._assertSpecified([
        'example_labels', 'example_weights', 'sparse_features', 'dense_features'
    ], examples)
    self._assertList(['sparse_features', 'dense_features'], examples)
    with name_scope('sdca/unregularized_loss'):
      predictions = math_ops.cast(
          self._linear_predictions(examples), dtypes.float64)
      labels = math_ops.cast(
          internal_convert_to_tensor(examples['example_labels']),
          dtypes.float64)
      weights = math_ops.cast(
          internal_convert_to_tensor(examples['example_weights']),
          dtypes.float64)

      if self._options['loss_type'] == 'logistic_loss':
        return math_ops.reduce_sum(math_ops.multiply(
            sigmoid_cross_entropy_with_logits(labels=labels,
                                              logits=predictions),
            weights)) / math_ops.reduce_sum(weights)

      if self._options['loss_type'] == 'poisson_loss':
        return math_ops.reduce_sum(math_ops.multiply(
            log_poisson_loss(targets=labels, log_input=predictions),
            weights)) / math_ops.reduce_sum(weights)

      if self._options['loss_type'] in ['hinge_loss', 'smooth_hinge_loss']:
        # hinge_loss = max{0, 1 - y_i w*x} where y_i \in {-1, 1}. So, we need to
        # first convert 0/1 labels into -1/1 labels.
        all_ones = array_ops.ones_like(predictions)
        adjusted_labels = math_ops.subtract(2 * labels, all_ones)
        # Tensor that contains (unweighted) error (hinge loss) per
        # example.
        error = nn_ops.relu(
            math_ops.subtract(all_ones,
                              math_ops.multiply(adjusted_labels, predictions)))
        weighted_error = math_ops.multiply(error, weights)
        return math_ops.reduce_sum(weighted_error) / math_ops.reduce_sum(
            weights)

      # squared loss
      err = math_ops.subtract(labels, predictions)

      weighted_squared_err = math_ops.multiply(math_ops.square(err), weights)
      # SDCA squared loss function is sum(err^2) / (2*sum(weights))
      return (math_ops.reduce_sum(weighted_squared_err) /
              (2.0 * math_ops.reduce_sum(weights)))
示例#11
0
def _log_loss_with_two_classes(logits, target):
  check_shape_op = control_flow_ops.Assert(
      math_ops.less_equal(array_ops.rank(target), 2),
      ["target's shape should be either [batch_size, 1] or [batch_size]"])
  with ops.control_dependencies([check_shape_op]):
    target = array_ops.reshape(target, shape=[array_ops.shape(target)[0], 1])
  return nn.sigmoid_cross_entropy_with_logits(
      logits, math_ops.to_float(target))
示例#12
0
 def testGradient(self):
   sizes = [4, 2]
   with self.test_session():
     logits, targets, _ = self._Inputs(sizes=sizes)
     loss = nn.sigmoid_cross_entropy_with_logits(logits, targets)
     err = gc.ComputeGradientError(logits, sizes, loss, sizes)
   print "logistic loss gradient err = ", err
   self.assertLess(err, 1e-7)
示例#13
0
def _log_loss_with_two_classes(logits, labels):
  with ops.name_scope(
      None, "log_loss_with_two_classes", (logits, labels)) as name:
    # sigmoid_cross_entropy_with_logits requires [batch_size, 1] labels.
    if len(labels.get_shape()) == 1:
      labels = array_ops.expand_dims(labels, dim=(1,))
    return nn.sigmoid_cross_entropy_with_logits(
        logits, math_ops.to_float(labels), name=name)
示例#14
0
 def testLogisticOutput(self):
   for use_gpu in [True, False]:
     with self.test_session(use_gpu=use_gpu):
       logits, targets, losses = self._Inputs(dtype=types.float32)
       loss = nn.sigmoid_cross_entropy_with_logits(logits, targets)
       np_loss = np.array(losses).astype(np.float32)
       tf_loss = loss.eval()
     self.assertAllClose(np_loss, tf_loss, atol=0.001)
示例#15
0
def sigmoid_cross_entropy(
    multi_class_labels, logits, weights=1.0, label_smoothing=0, scope=None,
    loss_collection=ops.GraphKeys.LOSSES):
  """Creates a cross-entropy loss using tf.nn.sigmoid_cross_entropy_with_logits.

  `weights` acts as a coefficient for the loss. If a scalar is provided,
  then the loss is simply scaled by the given value. If `weights` is a
  tensor of shape `[batch_size]`, then the loss weights apply to each
  corresponding sample.

  WARNING: `weights` also supports dimensions of 1, but the broadcasting does
  not work as advertised, you'll wind up with weighted sum instead of weighted
  mean for any but the last dimension. This will be cleaned up soon, so please
  do not rely on the current behavior for anything but the shapes documented for
  `weights` below.

  If `label_smoothing` is nonzero, smooth the labels towards 1/2:

      new_multiclass_labels = multiclass_labels * (1 - label_smoothing)
                              + 0.5 * label_smoothing

  Args:
    multi_class_labels: `[batch_size, num_classes]` target integer labels in
      `(0, 1)`.
    logits: `[batch_size, num_classes]` logits outputs of the network.
    weights: Coefficients for the loss. This must be of shape `[]`,
      `[batch_size]` or `[batch_size, num_classes]`.
    label_smoothing: If greater than `0` then smooth the labels.
    scope: The scope for the operations performed in computing the loss.
    loss_collection: collection to which the loss will be added.

  Returns:
    A scalar `Tensor` representing the loss value.

  Raises:
    ValueError: If the shape of `logits` doesn't match that of
      `multi_class_labels` or if the shape of `weights` is invalid, or if
      `weights` is None.
  """
  with ops.name_scope(scope, "sigmoid_cross_entropy_loss",
                      (logits, multi_class_labels, weights)) as scope:
    logits = ops.convert_to_tensor(logits)
    multi_class_labels = math_ops.cast(multi_class_labels, logits.dtype)
    logits.get_shape().assert_is_compatible_with(multi_class_labels.get_shape())

    if label_smoothing > 0:
      multi_class_labels = (multi_class_labels * (1 - label_smoothing) +
                            0.5 * label_smoothing)

    losses = nn.sigmoid_cross_entropy_with_logits(labels=multi_class_labels,
                                                  logits=logits,
                                                  name="xentropy")
    return compute_weighted_loss(losses, weights, scope, loss_collection)
示例#16
0
 def create_loss(self, features, mode, logits, labels):
   """See `Head`."""
   del mode, features  # Unused for this head.
   labels = _check_and_reshape_dense_labels(labels, self.logits_dimension)
   if self._label_vocabulary is not None:
     labels = lookup_ops.index_table_from_tensor(
         vocabulary_list=tuple(self._label_vocabulary),
         name='class_id_lookup').lookup(labels)
   labels = math_ops.to_float(labels)
   labels = _assert_range(labels, 2)
   return LossAndLabels(
       unweighted_loss=nn.sigmoid_cross_entropy_with_logits(
           labels=labels, logits=logits),
       processed_labels=labels)
示例#17
0
def deprecated_flipped_sigmoid_cross_entropy_with_logits(logits,
                                                         targets,
                                                         name=None):
  """Computes sigmoid cross entropy given `logits`.

  This function diffs from tf.nn.sigmoid_cross_entropy_with_logits only in the
  argument order.

  Measures the probability error in discrete classification tasks in which each
  class is independent and not mutually exclusive.  For instance, one could
  perform multilabel classification where a picture can contain both an elephant
  and a dog at the same time.

  For brevity, let `x = logits`, `z = targets`.  The logistic loss is

        z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
      = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
      = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
      = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
      = (1 - z) * x + log(1 + exp(-x))
      = x - x * z + log(1 + exp(-x))

  For x < 0, to avoid overflow in exp(-x), we reformulate the above

        x - x * z + log(1 + exp(-x))
      = log(exp(x)) - x * z + log(1 + exp(-x))
      = - x * z + log(1 + exp(x))

  Hence, to ensure stability and avoid overflow, the implementation uses this
  equivalent formulation

      max(x, 0) - x * z + log(1 + exp(-abs(x)))

  `logits` and `targets` must have the same type and shape.

  Args:
    logits: A `Tensor` of type `float32` or `float64`.
    targets: A `Tensor` of the same type and shape as `logits`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of the same shape as `logits` with the componentwise
    logistic losses.

  Raises:
    ValueError: If `logits` and `targets` do not have the same shape.
  """
  return nn.sigmoid_cross_entropy_with_logits(
      labels=targets, logits=logits, name=name)
示例#18
0
    def _loss(self, logits, target, weight_tensor):
        if self._n_classes < 2:
            loss_vec = math_ops.square(logits - math_ops.to_float(target))
        elif self._n_classes == 2:
            loss_vec = nn.sigmoid_cross_entropy_with_logits(logits, math_ops.to_float(target))
        else:
            loss_vec = nn.sparse_softmax_cross_entropy_with_logits(logits, array_ops.reshape(target, [-1]))

        if weight_tensor is None:
            return math_ops.reduce_mean(loss_vec, name="loss")
        else:
            loss_vec = array_ops.reshape(loss_vec, shape=(-1,))
            loss_vec = math_ops.mul(loss_vec, array_ops.reshape(weight_tensor, shape=(-1,)))
            return math_ops.div(
                math_ops.reduce_sum(loss_vec), math_ops.to_float(math_ops.reduce_sum(weight_tensor)), name="loss"
            )
示例#19
0
 def _log_prob(self, event):
   # TODO(jaana): The current sigmoid_cross_entropy_with_logits has
   # inconsistent  behavior for logits = inf/-inf.
   event = ops.convert_to_tensor(event, name="event")
   event = math_ops.cast(event, self.logits.dtype)
   logits = self.logits
   # sigmoid_cross_entropy_with_logits doesn't broadcast shape,
   # so we do this here.
   # TODO(b/30637701): Check dynamic shape, and don't broadcast if the
   # dynamic shapes are the same.
   if (not event.get_shape().is_fully_defined() or
       not logits.get_shape().is_fully_defined() or
       event.get_shape() != logits.get_shape()):
     logits = array_ops.ones_like(event) * logits
     event = array_ops.ones_like(logits) * event
   return -nn.sigmoid_cross_entropy_with_logits(logits, event)
示例#20
0
def per_example_logistic_loss(labels, weights, predictions):
  """Logistic loss given labels, example weights and predictions.

  Args:
    labels: Rank 2 (N, 1) tensor of per-example labels.
    weights: Rank 2 (N, 1) tensor of per-example weights.
    predictions: Rank 2 (N, 1) tensor of per-example predictions.

  Returns:
    loss: A Rank 2 (N, 1) tensor of per-example logistic loss.
    update_op: An update operation to update the loss's internal state.
  """
  labels = math_ops.to_float(labels)
  unweighted_loss = nn.sigmoid_cross_entropy_with_logits(
      labels=labels, logits=predictions)
  return unweighted_loss * weights, control_flow_ops.no_op()
示例#21
0
  def unregularized_loss(self, examples):
    """Add operations to compute the loss (without the regularization loss).

    Args:
      examples: Examples to compute unregularized loss on.

    Returns:
      An Operation that computes mean (unregularized) loss for given set of
      examples.

    Raises:
      ValueError: if examples are not well defined.
    """
    self._assertSpecified(
        ['example_labels', 'example_weights', 'sparse_features',
         'dense_features'], examples)
    self._assertList(['sparse_features', 'dense_features'], examples)
    with name_scope('sdca/unregularized_loss'):
      predictions = self._linear_predictions(examples)
      labels = convert_to_tensor(examples['example_labels'])
      weights = convert_to_tensor(examples['example_weights'])

      if self._options['loss_type'] == 'logistic_loss':
        return math_ops.reduce_sum(math_ops.mul(
            sigmoid_cross_entropy_with_logits(
                predictions, labels), weights)) / math_ops.reduce_sum(weights)

      if self._options['loss_type'] == 'hinge_loss':
        # hinge_loss = max{0, 1 - y_i w*x} where y_i \in {-1, 1}. So, we need to
        # first convert 0/1 labels into -1/1 labels.
        all_ones = array_ops.ones_like(predictions)
        adjusted_labels = math_ops.sub(2 * labels, all_ones)
        all_zeros = array_ops.zeros_like(predictions)
        # Tensor that contains (unweighted) error (hinge loss) per
        # example.
        error = math_ops.maximum(all_zeros, math_ops.sub(
            all_ones, math_ops.mul(adjusted_labels, predictions)))
        weighted_error = math_ops.mul(error, weights)
        return math_ops.reduce_sum(weighted_error) / math_ops.reduce_sum(
            weights)

      # squared loss
      err = math_ops.sub(labels, predictions)

      weighted_squared_err = math_ops.mul(math_ops.square(err), weights)
      return (math_ops.reduce_sum(weighted_squared_err) /
              math_ops.reduce_sum(weights))
示例#22
0
def sigmoid_cross_entropy(logits,
                          multi_class_labels,
                          weights=1.0,
                          label_smoothing=0,
                          scope=None):
  """Creates a cross-entropy loss using tf.nn.sigmoid_cross_entropy_with_logits.

  `weights` acts as a coefficient for the loss. If a scalar is provided,
  then the loss is simply scaled by the given value. If `weights` is a
  tensor of size [`batch_size`], then the loss weights apply to each
  corresponding sample.

  If `label_smoothing` is nonzero, smooth the labels towards 1/2:

      new_multiclass_labels = multiclass_labels * (1 - label_smoothing)
                              + 0.5 * label_smoothing

  Args:
    logits: [batch_size, num_classes] logits outputs of the network .
    multi_class_labels: [batch_size, num_classes] labels in (0, 1).
    weights: Coefficients for the loss. The tensor must be a scalar, a tensor of
      shape [batch_size] or shape [batch_size, num_classes].
    label_smoothing: If greater than 0 then smooth the labels.
    scope: The scope for the operations performed in computing the loss.

  Returns:
    A scalar `Tensor` representing the loss value.

  Raises:
    ValueError: If the shape of `logits` doesn't match that of
      `multi_class_labels` or if the shape of `weights` is invalid, or if
      `weights` is None.
  """
  with ops.name_scope(scope, "sigmoid_cross_entropy_loss",
                      [logits, multi_class_labels, weights]) as scope:
    logits.get_shape().assert_is_compatible_with(multi_class_labels.get_shape())

    multi_class_labels = math_ops.cast(multi_class_labels, logits.dtype)

    if label_smoothing > 0:
      multi_class_labels = (
          multi_class_labels * (1 - label_smoothing) + 0.5 * label_smoothing)

    losses = nn.sigmoid_cross_entropy_with_logits(
        labels=multi_class_labels, logits=logits, name="xentropy")
    return compute_weighted_loss(losses, weights, scope=scope)
示例#23
0
 def _loss_vec(self, logits, target):
   if self._n_classes == 2:
     # sigmoid_cross_entropy_with_logits requires [batch_size, 1] target.
     if len(target.get_shape()) == 1:
       target = array_ops.expand_dims(target, dim=[1])
     loss_vec = nn.sigmoid_cross_entropy_with_logits(
         logits, math_ops.to_float(target))
   else:
     # Check that we got int32/int64 for classification.
     if (not target.dtype.is_compatible_with(dtypes.int64) and
         not target.dtype.is_compatible_with(dtypes.int32)):
       raise ValueError("Target's dtype should be int32, int64 or compatible. "
                        "Instead got %s." % target.dtype)
     # sparse_softmax_cross_entropy_with_logits requires [batch_size] target.
     if len(target.get_shape()) == 2:
       target = array_ops.squeeze(target, squeeze_dims=[1])
     loss_vec = nn.sparse_softmax_cross_entropy_with_logits(
         logits, target)
   return loss_vec
示例#24
0
  def log_prob(self, event, name="log_prob"):
    """Log of the probability mass function.

    Args:
      event: `int32` or `int64` binary Tensor.
      name: A name for this operation (optional).

    Returns:
      The log-probabilities of the events.
    """
    # TODO(jaana): The current sigmoid_cross_entropy_with_logits has
    # inconsistent  behavior for logits = inf/-inf.
    with ops.name_scope(self.name):
      with ops.op_scope([self.logits, event], name):
        event = ops.convert_to_tensor(event, name="event")
        event = math_ops.cast(event, self.logits.dtype)
        logits = array_ops.ones_like(event) * self.logits
        event = array_ops.ones_like(self.logits) * event
        return -nn.sigmoid_cross_entropy_with_logits(logits, event)
示例#25
0
def logistic_loss(logit, target, name=None):
  """Calculates the logistic cross-entropy loss.

  **WARNING:** `logit` must be unscaled, while the `target` should be a
  normalized probability prediction. See
  `tf.nn.sigmoid_cross_entropy_with_logits` for more details.

  Args:
    logit: A `Tensor` of shape `[batch_size, dim_1, ..., dim_n]`
      of predicted logit values.
    target: A `Tensor` of shape `[batch_size, dim_1, ..., dim_n]` of
      target values. The shape of the target tensor should match the
      `predicted` tensor.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of the logistic cross-entropy loss.
  """
  return nn.sigmoid_cross_entropy_with_logits(logit, target, name=name)
示例#26
0
    def unregularized_loss(self, examples):
        """Add operations to compute the loss (without the regularization loss).

    Args:
      examples: Examples to compute unregularized loss on.

    Returns:
      An Operation that computes mean (unregularized) loss for given set of
      examples.

    Raises:
      ValueError: if examples are not well defined.
    """
        self._assertSpecified(["example_labels", "example_weights", "sparse_features", "dense_features"], examples)
        self._assertList(["sparse_features", "dense_features"], examples)
        with name_scope("sdca/unregularized_loss"):
            predictions = math_ops.cast(self._linear_predictions(examples), dtypes.float64)
            labels = math_ops.cast(convert_to_tensor(examples["example_labels"]), dtypes.float64)
            weights = math_ops.cast(convert_to_tensor(examples["example_weights"]), dtypes.float64)

            if self._options["loss_type"] == "logistic_loss":
                return math_ops.reduce_sum(
                    math_ops.mul(sigmoid_cross_entropy_with_logits(predictions, labels), weights)
                ) / math_ops.reduce_sum(weights)

            if self._options["loss_type"] == "hinge_loss":
                # hinge_loss = max{0, 1 - y_i w*x} where y_i \in {-1, 1}. So, we need to
                # first convert 0/1 labels into -1/1 labels.
                all_ones = array_ops.ones_like(predictions)
                adjusted_labels = math_ops.sub(2 * labels, all_ones)
                # Tensor that contains (unweighted) error (hinge loss) per
                # example.
                error = nn_ops.relu(math_ops.sub(all_ones, math_ops.mul(adjusted_labels, predictions)))
                weighted_error = math_ops.mul(error, weights)
                return math_ops.reduce_sum(weighted_error) / math_ops.reduce_sum(weights)

            # squared loss
            err = math_ops.sub(labels, predictions)

            weighted_squared_err = math_ops.mul(math_ops.square(err), weights)
            # SDCA squared loss function is sum(err^2) / (2*sum(weights))
            return math_ops.reduce_sum(weighted_squared_err) / (2.0 * math_ops.reduce_sum(weights))
示例#27
0
  def _log_prob(self, event):
    if self.validate_args:
      event = distribution_util.embed_check_integer_casting_closed(
          event, target_dtype=dtypes.bool)

    # TODO(jaana): The current sigmoid_cross_entropy_with_logits has
    # inconsistent behavior for logits = inf/-inf.
    event = math_ops.cast(event, self.logits.dtype)
    logits = self.logits
    # sigmoid_cross_entropy_with_logits doesn't broadcast shape,
    # so we do this here.

    def _broadcast(logits, event):
      return (array_ops.ones_like(event) * logits,
              array_ops.ones_like(logits) * event)

    if not (event.get_shape().is_fully_defined() and
            logits.get_shape().is_fully_defined() and
            event.get_shape() == logits.get_shape()):
      logits, event = _broadcast(logits, event)
    return -nn.sigmoid_cross_entropy_with_logits(labels=event, logits=logits)
示例#28
0
 def create_loss(self, features, mode, logits, labels):
   """See `Head`."""
   del mode  # Unused for this head.
   labels = _check_and_reshape_dense_labels(labels, self.logits_dimension)
   if self._label_vocabulary is not None:
     labels = lookup_ops.index_table_from_tensor(
         vocabulary_list=tuple(self._label_vocabulary),
         name='class_id_lookup').lookup(labels)
   labels = math_ops.to_float(labels)
   labels = _assert_range(labels, 2)
   unweighted_loss = nn.sigmoid_cross_entropy_with_logits(
       labels=labels, logits=logits)
   weights = _weights(features, self._weight_column)
   weighted_sum_loss = losses.compute_weighted_loss(
       unweighted_loss, weights=weights, reduction=losses.Reduction.SUM)
   # _weights() can return 1.
   example_weight_sum = math_ops.reduce_sum(
       weights * array_ops.ones_like(unweighted_loss))
   return LossSpec(
       weighted_sum_loss=weighted_sum_loss,
       example_weight_sum=example_weight_sum,
       processed_labels=labels)
示例#29
0
    def _log_prob(self, event):
        # TODO(jaana): The current sigmoid_cross_entropy_with_logits has
        # inconsistent  behavior for logits = inf/-inf.
        event = ops.convert_to_tensor(event, name="event")
        event = math_ops.cast(event, self.logits.dtype)
        logits = self.logits
        # sigmoid_cross_entropy_with_logits doesn't broadcast shape,
        # so we do this here.

        broadcast = lambda logits, event: (array_ops.ones_like(event) * logits,
                                           array_ops.ones_like(logits) * event)

        # First check static shape.
        if (event.get_shape().is_fully_defined()
                and logits.get_shape().is_fully_defined()):
            if event.get_shape() != logits.get_shape():
                logits, event = broadcast(logits, event)
        else:
            logits, event = control_flow_ops.cond(
                distribution_util.same_dynamic_shape(logits, event), lambda:
                (logits, event), lambda: broadcast(logits, event))
        return -nn.sigmoid_cross_entropy_with_logits(logits, event)
示例#30
0
    def log_prob(self, event, name="log_prob"):
        """Log of the probability mass function.

    Args:
      event: `int32` or `int64` binary Tensor.
      name: A name for this operation (optional).

    Returns:
      The log-probabilities of the events.
    """
        # TODO(jaana): The current sigmoid_cross_entropy_with_logits has
        # inconsistent  behavior for logits = inf/-inf.
        with ops.name_scope(self.name):
            with ops.op_scope([self.logits, event], name):
                event = ops.convert_to_tensor(event, name="event")
                event = math_ops.cast(event, self.logits.dtype)
                logits = self.logits
                if ((event.get_shape().ndims is not None)
                        or (logits.get_shape().ndims is not None)
                        or event.get_shape() != logits.get_shape()):
                    logits = array_ops.ones_like(event) * logits
                    event = array_ops.ones_like(logits) * event
                return -nn.sigmoid_cross_entropy_with_logits(logits, event)
示例#31
0
  def unregularized_loss(self, examples):
    """Add operations to compute the loss (without the regularization loss).

        Args:
          examples: Examples to compute unregularized loss on.

        Returns:
          An Operation that computes mean (unregularized) loss for given set of
          examples.
        Raises:
          ValueError: if examples are not well defined.
        """
    self._assertSpecified(
        ['example_labels', 'example_weights', 'sparse_features',
         'dense_features'], examples)
    self._assertList(['sparse_features', 'dense_features'], examples)
    with name_scope('sdca/unregularized_loss'):
      logits = self._logits(examples)
      # TODO(rohananil): Change loss when supporting linear regression.
      return math_ops.reduce_sum(math_ops.mul(
          sigmoid_cross_entropy_with_logits(logits, convert_to_tensor(examples[
              'example_labels'])), convert_to_tensor(examples[
                  'example_weights']))) / math_ops.reduce_sum(
                      ops.convert_to_tensor(examples['example_weights']))
示例#32
0
  def unregularized_loss(self, examples):
    """Add operations to compute the loss (without the regularization loss).

        Args:
          examples: Examples to compute unregularized loss on.

        Returns:
          An Operation that computes mean (unregularized) loss for given set of
          examples.
        Raises:
          ValueError: if examples are not well defined.
        """
    self._assertSpecified(
        ['example_labels', 'example_weights', 'sparse_features',
         'dense_features'], examples)
    self._assertList(['sparse_features', 'dense_features'], examples)
    with name_scope('sdca/unregularized_loss'):
      logits = self._logits(examples)
      # TODO(rohananil): Change loss when supporting linear regression.
      return math_ops.reduce_sum(math_ops.mul(
          sigmoid_cross_entropy_with_logits(logits, convert_to_tensor(examples[
              'example_labels'])), convert_to_tensor(examples[
                  'example_weights']))) / math_ops.reduce_sum(
                      ops.convert_to_tensor(examples['example_weights']))
示例#33
0
  def _log_prob(self, event):
    # TODO(jaana): The current sigmoid_cross_entropy_with_logits has
    # inconsistent  behavior for logits = inf/-inf.
    event = ops.convert_to_tensor(event, name="event")
    event = math_ops.cast(event, self.logits.dtype)
    logits = self.logits
    # sigmoid_cross_entropy_with_logits doesn't broadcast shape,
    # so we do this here.

    broadcast = lambda logits, event: (
        array_ops.ones_like(event) * logits,
        array_ops.ones_like(logits) * event)

    # First check static shape.
    if (event.get_shape().is_fully_defined() and
        logits.get_shape().is_fully_defined()):
      if event.get_shape() != logits.get_shape():
        logits, event = broadcast(logits, event)
    else:
      logits, event = control_flow_ops.cond(
          distribution_util.same_dynamic_shape(logits, event),
          lambda: (logits, event),
          lambda: broadcast(logits, event))
    return -nn.sigmoid_cross_entropy_with_logits(logits, event)
示例#34
0
def logistic(logit, target, name=None):
  """Calculates the logistic cross-entropy loss, averaged across batches.

  **WARNING:** `logit` must be unscaled.
  See `tf.nn.sigmoid_cross_entropy_with_logits` for more details.

  Args:
    logit: A `Tensor` of shape `[batch_size, dim_1, ..., dim_n]`
      of predicted logit values.
    target: A `Tensor` of shape `[batch_size, dim_1, ..., dim_n]` of
      target values. The shape of the target tensor should match the
      `logit` tensor.
    name: A name for the operation (optional).

  Returns:
    A scalar `tensor` of the logistic cross-entropy loss, averaged across
    batches.

  Raises:
    ValueError: If `logit` and `target` shapes do not match.
  """
  with ops.op_scope([logit, target], name, "logistic_loss") as scope:
    return _reduce_to_scalar(
        nn.sigmoid_cross_entropy_with_logits(logit, target), name=scope)
示例#35
0
 def create_loss(self, logits):
     labels = math_ops.to_float(self.features['read'])
     return LossAndLabels(
         unweighted_loss=nn.sigmoid_cross_entropy_with_logits(
             labels=labels, logits=logits),
         processed_labels=labels)
示例#36
0
def _sigmoid_cross_entropy_loss(logits, labels):
  # sigmoid_cross_entropy_with_logits requires [batch_size, n_classes] labels.
  return nn.sigmoid_cross_entropy_with_logits(logits, math_ops.to_float(labels))
示例#37
0
def sigmoid_cross_entropy(multi_class_labels,
                          logits,
                          weights=1.0,
                          label_smoothing=0,
                          scope=None,
                          loss_collection=ops.GraphKeys.LOSSES,
                          reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
    """Creates a cross-entropy loss using tf.nn.sigmoid_cross_entropy_with_logits.

  `weights` acts as a coefficient for the loss. If a scalar is provided,
  then the loss is simply scaled by the given value. If `weights` is a
  tensor of shape `[batch_size]`, then the loss weights apply to each
  corresponding sample.

  If `label_smoothing` is nonzero, smooth the labels towards 1/2:

      new_multiclass_labels = multiclass_labels * (1 - label_smoothing)
                              + 0.5 * label_smoothing

  Args:
    multi_class_labels: `[batch_size, num_classes]` target integer labels in
      `{0, 1}`.
    logits: Float `[batch_size, num_classes]` logits outputs of the network.
    weights: Optional `Tensor` whose rank is either 0, or the same rank as
      `labels`, and must be broadcastable to `labels` (i.e., all dimensions must
      be either `1`, or the same as the corresponding `losses` dimension).
    label_smoothing: If greater than `0` then smooth the labels.
    scope: The scope for the operations performed in computing the loss.
    loss_collection: collection to which the loss will be added.
    reduction: Type of reduction to apply to loss.

  Returns:
    Weighted loss `Tensor` of the same type as `logits`. If `reduction` is
    `NONE`, this has the same shape as `logits`; otherwise, it is scalar.

  Raises:
    ValueError: If the shape of `logits` doesn't match that of
      `multi_class_labels` or if the shape of `weights` is invalid, or if
      `weights` is None.  Also if `multi_class_labels` or `logits` is None.

  @compatibility(eager)
  The `loss_collection` argument is ignored when executing eagerly. Consider
  holding on to the return value or collecting losses via a `tf.keras.Model`.
  @end_compatibility
  """
    if multi_class_labels is None:
        raise ValueError("multi_class_labels must not be None.")
    if logits is None:
        raise ValueError("logits must not be None.")
    with ops.name_scope(scope, "sigmoid_cross_entropy_loss",
                        (logits, multi_class_labels, weights)) as scope:
        logits = ops.convert_to_tensor(logits)
        multi_class_labels = math_ops.cast(multi_class_labels, logits.dtype)
        logits.get_shape().assert_is_compatible_with(
            multi_class_labels.get_shape())

        if label_smoothing > 0:
            multi_class_labels = (multi_class_labels * (1 - label_smoothing) +
                                  0.5 * label_smoothing)

        losses = nn.sigmoid_cross_entropy_with_logits(
            labels=multi_class_labels, logits=logits, name="xentropy")
        return compute_weighted_loss(losses,
                                     weights,
                                     scope,
                                     loss_collection,
                                     reduction=reduction)
示例#38
0
def _sigmoid_cross_entropy_loss(logits, target):
  # sigmoid_cross_entropy_with_logits requires [batch_size, n_classes] target.
  return nn.sigmoid_cross_entropy_with_logits(logits, math_ops.to_float(target))
示例#39
0
    def unregularized_loss(self, examples):
        """Add operations to compute the loss (without the regularization loss).

    Args:
      examples: Examples to compute unregularized loss on.

    Returns:
      An Operation that computes mean (unregularized) loss for given set of
      examples.

    Raises:
      ValueError: if examples are not well defined.
    """
        self._assertSpecified([
            'example_labels', 'example_weights', 'sparse_features',
            'dense_features'
        ], examples)
        self._assertList(['sparse_features', 'dense_features'], examples)
        with name_scope('sdca/unregularized_loss'):
            predictions = math_ops.cast(self._linear_predictions(examples),
                                        dtypes.float64)
            labels = math_ops.cast(
                internal_convert_to_tensor(examples['example_labels']),
                dtypes.float64)
            weights = math_ops.cast(
                internal_convert_to_tensor(examples['example_weights']),
                dtypes.float64)

            if self._options['loss_type'] == 'logistic_loss':
                return math_ops.reduce_sum(
                    math_ops.multiply(
                        sigmoid_cross_entropy_with_logits(labels=labels,
                                                          logits=predictions),
                        weights)) / math_ops.reduce_sum(weights)

            if self._options['loss_type'] == 'poisson_loss':
                return math_ops.reduce_sum(
                    math_ops.multiply(
                        log_poisson_loss(targets=labels,
                                         log_input=predictions),
                        weights)) / math_ops.reduce_sum(weights)

            if self._options['loss_type'] in [
                    'hinge_loss', 'smooth_hinge_loss'
            ]:
                # hinge_loss = max{0, 1 - y_i w*x} where y_i \in {-1, 1}. So, we need to
                # first convert 0/1 labels into -1/1 labels.
                all_ones = array_ops.ones_like(predictions)
                adjusted_labels = math_ops.subtract(2 * labels, all_ones)
                # Tensor that contains (unweighted) error (hinge loss) per
                # example.
                error = nn_ops.relu(
                    math_ops.subtract(
                        all_ones,
                        math_ops.multiply(adjusted_labels, predictions)))
                weighted_error = math_ops.multiply(error, weights)
                return math_ops.reduce_sum(
                    weighted_error) / math_ops.reduce_sum(weights)

            # squared loss
            err = math_ops.subtract(labels, predictions)

            weighted_squared_err = math_ops.multiply(math_ops.square(err),
                                                     weights)
            # SDCA squared loss function is sum(err^2) / (2*sum(weights))
            return (math_ops.reduce_sum(weighted_squared_err) /
                    (2.0 * math_ops.reduce_sum(weights)))
示例#40
0
 def _logistic_loss(labels, logits):
   labels = head_lib._assert_range(  # pylint:disable=protected-access
       labels, n_classes=2, message='Labels must be in range [0, 1]')
   return nn.sigmoid_cross_entropy_with_logits(
       labels=labels, logits=logits)
示例#41
0
def _sigmoid_cross_entropy_loss(logits, target):
    # sigmoid_cross_entropy_with_logits requires [batch_size, n_classes] target.
    return nn.sigmoid_cross_entropy_with_logits(logits,
                                                math_ops.to_float(target))
示例#42
0
 def _logistic_loss(labels, logits):
   labels = head_lib._assert_range(  # pylint:disable=protected-access
       labels, n_classes=2, message='Labels must be in range [0, 1]')
   return nn.sigmoid_cross_entropy_with_logits(
       labels=labels, logits=logits)
示例#43
0
def _log_loss_with_two_classes(logits, labels):
    # sigmoid_cross_entropy_with_logits requires [batch_size, 1] labels.
    if len(labels.get_shape()) == 1:
        labels = array_ops.expand_dims(labels, dim=[1])
    return nn.sigmoid_cross_entropy_with_logits(logits,
                                                math_ops.to_float(labels))
示例#44
0
  def create_estimator_spec(
      self, features, mode, logits, labels=None, train_op_fn=None):
    """See `Head`."""
    with variable_scope.variable_scope(
        None, default_name='binary_logistic_head',
        values=(tuple(six.itervalues(features)) + (labels, logits))):

      # Predict.
      pred_keys = prediction_keys.PredictionKeys
      logits = _check_logits(logits, self.logits_dimension)
      logistic = math_ops.sigmoid(logits, name=pred_keys.LOGISTIC)
      two_class_logits = array_ops.concat(
          (array_ops.zeros_like(logits), logits), 1, name='two_class_logits')
      scores = nn.softmax(two_class_logits, name=pred_keys.PROBABILITIES)
      classes = array_ops.reshape(
          math_ops.argmax(two_class_logits, axis=1), (-1, 1), name='classes')
      predictions = {
          pred_keys.LOGITS: logits,
          pred_keys.LOGISTIC: logistic,
          pred_keys.PROBABILITIES: scores,
          pred_keys.CLASS_IDS: classes
      }
      if mode == model_fn.ModeKeys.PREDICT:
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.PREDICT,
            predictions=predictions,
            export_outputs={'': export_output.ClassificationOutput(
                scores=scores,
                # `ClassificationOutput` requires string classes.
                # TODO(ptucker): Support label_keys.
                classes=string_ops.as_string(classes, name='str_classes'))})

      # Eval.
      labels = _check_labels(_maybe_expand_dim(math_ops.to_float(labels)),
                             self.logits_dimension)
      unweighted_loss = nn.sigmoid_cross_entropy_with_logits(
          labels=labels, logits=logits, name='loss')
      weights = (
          1. if (self._weight_feature_key is None) else
          features[self._weight_feature_key])
      weights = _maybe_expand_dim(math_ops.to_float(weights, name='weights'))
      training_loss = losses.compute_weighted_loss(
          unweighted_loss, weights=weights, reduction=losses.Reduction.SUM)
      if mode == model_fn.ModeKeys.EVAL:
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.EVAL,
            predictions=predictions,
            loss=training_loss,
            eval_metric_ops=self._eval_metric_ops(
                labels=labels,
                logits=logits,
                logistic=logistic,
                scores=scores,
                classes=classes,
                unweighted_loss=unweighted_loss,
                weights=weights))

      # Train.
      if train_op_fn is None:
        raise ValueError('train_op_fn can not be None.')
      logging_ops.scalar_summary(metric_keys.MetricKeys.LOSS, training_loss)
      logging_ops.scalar_summary(
          metric_keys.MetricKeys.LOSS_MEAN,
          losses.compute_weighted_loss(
              unweighted_loss, weights=weights,
              reduction=losses.Reduction.MEAN))
      return model_fn.EstimatorSpec(
          mode=model_fn.ModeKeys.TRAIN,
          predictions=predictions,
          loss=training_loss,
          train_op=train_op_fn(training_loss))
示例#45
0
 def _logistic_loss(self, labels, logits):
     labels = base_head.check_label_range(
         labels, n_classes=2, message='Labels must be in range [0, 1]')
     return nn.sigmoid_cross_entropy_with_logits(labels=labels,
                                                 logits=logits)
示例#46
0
  def create_estimator_spec(
      self, features, mode, logits, labels=None, train_op_fn=None):
    """See `Head`."""
    # Predict.
    with ops.name_scope('head'):
      with ops.name_scope(None, 'predictions', (logits,)):
        pred_keys = prediction_keys.PredictionKeys
        logits = _check_logits(logits, self.logits_dimension)
        logistic = math_ops.sigmoid(logits, name=pred_keys.LOGISTIC)
        two_class_logits = array_ops.concat(
            (array_ops.zeros_like(logits), logits), 1, name='two_class_logits')
        scores = nn.softmax(two_class_logits, name=pred_keys.PROBABILITIES)
        class_ids = array_ops.reshape(
            math_ops.argmax(two_class_logits, axis=1), (-1, 1), name='classes')
        if self._label_vocabulary:
          table = lookup_ops.index_to_string_table_from_tensor(
              vocabulary_list=self._label_vocabulary,
              name='class_string_lookup')
          classes = table.lookup(class_ids)
        else:
          classes = string_ops.as_string(class_ids, name='str_classes')
        predictions = {
            pred_keys.LOGITS: logits,
            pred_keys.LOGISTIC: logistic,
            pred_keys.PROBABILITIES: scores,
            pred_keys.CLASS_IDS: class_ids,
            pred_keys.CLASSES: classes,
        }
      if mode == model_fn.ModeKeys.PREDICT:
        batch_size = array_ops.shape(logistic)[0]
        export_class_list = self._label_vocabulary
        if not export_class_list:
          export_class_list = string_ops.as_string([0, 1])
        export_output_classes = array_ops.tile(
            input=array_ops.expand_dims(input=export_class_list, axis=0),
            multiples=[batch_size, 1])
        classifier_output = export_output.ClassificationOutput(
            scores=scores,
            # `ClassificationOutput` requires string classes.
            classes=export_output_classes)
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.PREDICT,
            predictions=predictions,
            export_outputs={
                '': classifier_output,  # to be same as other heads.
                'classification': classifier_output,  # to be called by name.
                _DEFAULT_SERVING_KEY: classifier_output,  # default
                'regression': export_output.RegressionOutput(value=logistic)
            })

      # Eval.
      labels = _check_labels(_maybe_expand_dim(labels), self.logits_dimension)
      if self._label_vocabulary is not None:
        labels = lookup_ops.index_table_from_tensor(
            vocabulary_list=tuple(self._label_vocabulary),
            name='class_id_lookup').lookup(labels)
      labels = math_ops.to_float(labels)
      labels = _assert_range(labels, 2)
      unweighted_loss = nn.sigmoid_cross_entropy_with_logits(
          labels=labels, logits=logits, name='loss')
      weights = _weights(features, self._weight_column)
      training_loss = losses.compute_weighted_loss(
          unweighted_loss, weights=weights, reduction=losses.Reduction.SUM)
      if mode == model_fn.ModeKeys.EVAL:
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.EVAL,
            predictions=predictions,
            loss=training_loss,
            eval_metric_ops=self._eval_metric_ops(
                labels=labels,
                logits=logits,
                logistic=logistic,
                scores=scores,
                class_ids=class_ids,
                unweighted_loss=unweighted_loss,
                weights=weights))

      # Train.
      if train_op_fn is None:
        raise ValueError('train_op_fn can not be None.')
    with ops.name_scope(''):
      summary.scalar(metric_keys.MetricKeys.LOSS, training_loss)
      summary.scalar(metric_keys.MetricKeys.LOSS_MEAN,
                     losses.compute_weighted_loss(
                         unweighted_loss,
                         weights=weights,
                         reduction=losses.Reduction.MEAN))
    return model_fn.EstimatorSpec(
        mode=model_fn.ModeKeys.TRAIN,
        predictions=predictions,
        loss=training_loss,
        train_op=train_op_fn(training_loss))
示例#47
0
 def testConstructionNamed(self):
   with self.test_session():
     logits, targets, _ = self._Inputs()
     loss = nn.sigmoid_cross_entropy_with_logits(logits, targets,
                                                 name="mylogistic")
   self.assertEqual("mylogistic", loss.op.name)