def weighted(y_true, y_pred, weights, mask=None):
    """Wrapper function.

    Arguments:
        y_true: `y_true` argument of `fn`.
        y_pred: `y_pred` argument of `fn`.
        weights: Weights tensor.
        mask: Mask tensor.

    Returns:
        Scalar tensor.
    """
    # score_array has ndim >= 2
    score_array = fn(y_true, y_pred)
    if mask is not None:
      # Cast the mask to floatX to avoid float64 upcasting in theano
      mask = K.cast(mask, K.floatx())
      # mask should have the same shape as score_array
      score_array *= mask
      #  the loss per batch should be proportional
      #  to the number of unmasked samples.
      score_array /= K.mean(mask)

    # apply sample weighting
    if weights is not None:
      # reduce score_array to same ndim as weight array
      ndim = K.ndim(score_array)
      weight_ndim = K.ndim(weights)
      score_array = K.mean(score_array, axis=list(range(weight_ndim, ndim)))
      score_array *= weights
      score_array /= K.mean(K.cast(K.not_equal(weights, 0), K.floatx()))
    return K.mean(score_array)
Esempio n. 2
0
  def weighted(y_true, y_pred, weights, mask=None):
    """Wrapper function.

    Arguments:
        y_true: `y_true` argument of `fn`.
        y_pred: `y_pred` argument of `fn`.
        weights: Weights tensor.
        mask: Mask tensor.

    Returns:
        Scalar tensor.
    """
    # score_array has ndim >= 2
    score_array = fn(y_true, y_pred)
    if mask is not None:
      # Cast the mask to floatX to avoid float64 upcasting in theano
      mask = K.cast(mask, K.floatx())
      # mask should have the same shape as score_array
      score_array *= mask
      #  the loss per batch should be proportional
      #  to the number of unmasked samples.
      score_array /= K.mean(mask)

    # apply sample weighting
    if weights is not None:
      # reduce score_array to same ndim as weight array
      ndim = K.ndim(score_array)
      weight_ndim = K.ndim(weights)
      score_array = K.mean(score_array, axis=list(range(weight_ndim, ndim)))
      score_array *= weights
      score_array /= K.mean(K.cast(K.not_equal(weights, 0), K.floatx()))
    return K.mean(score_array)
Esempio n. 3
0
 def compute_mask(self, inputs, mask=None):
     if not self.mask_zero:
         return None
     else:
         return K.not_equal(inputs, 0)
Esempio n. 4
0
 def compute_mask(self, inputs, mask=None):
   if not self.mask_zero:
     return None
   else:
     return K.not_equal(inputs, 0)
 def call(self, inputs):
     boolean_mask = K.any(K.not_equal(inputs, self.mask_value),
                          axis=-1,
                          keepdims=True)
     return inputs * K.cast(boolean_mask, inputs.dtype)
 def compute_mask(self, inputs, mask=None):
     return K.any(K.not_equal(inputs, self.mask_value), axis=-1)
Esempio n. 7
0
 def call(self, inputs):
   boolean_mask = K.any(
       K.not_equal(inputs, self.mask_value), axis=-1, keepdims=True)
   return inputs * K.cast(boolean_mask, inputs.dtype)
Esempio n. 8
0
 def compute_mask(self, inputs, mask=None):
   return K.any(K.not_equal(inputs, self.mask_value), axis=-1)
Esempio n. 9
0
def _model_loss(model, inputs, targets):
    """Calculates the loss for a given model.

  Arguments:
     model: The model on which metrics are being calculated.
     inputs: The inputs of the given model. This is typically the mini batch of
              data that is fed to the model.
     targets: The predictions or targets of the given model.

  Returns:
     Returns the model output, total loss and loss value calculated using the
     specified loss function. The total loss includes regularization losses and
     applies masking and sample weighting to the loss value.
  """
    total_loss = 0
    outs = model(inputs)
    if not isinstance(outs, list):
        outs = [outs]

    if not isinstance(targets, list):
        targets = [targets]

    loss_metrics = []
    with K.name_scope('loss'):
        for i, loss_fn in enumerate(model.loss_functions):
            # compute the loss
            output_loss = _eager_loss_fn(outs[i], targets[i], loss_fn,
                                         model.output_names[i])
            loss_metrics.append(K.mean(output_loss))

            mask = outs[i]._keras_mask
            # adapted from weighted_loss_fn
            if mask is not None:
                # mask should have the same shape as output_loss
                output_loss *= mask
                #  the loss per batch should be proportional
                #  to the number of unmasked samples.
                output_loss /= K.mean(mask)

            # adapted from weighted_loss_fn
            # apply sample weighting
            if model.sample_weights:
                # reduce score_array to same ndim as weight array
                ndim = K.ndim(output_loss)
                weight_ndim = K.ndim(model.sample_weights)
                output_loss = K.mean(output_loss,
                                     axis=list(range(weight_ndim, ndim)))
                output_loss *= model.sample_weights
                output_loss /= K.mean(
                    K.cast(K.not_equal(model.sample_weights, 0), K.floatx()))
                output_loss = K.mean(output_loss)

            loss_weight = model.loss_weights_list[i]
            if total_loss is None:
                total_loss = loss_weight * output_loss
            else:
                total_loss += loss_weight * output_loss

        total_loss = K.mean(total_loss)
        # Add regularization losses
        custom_losses = []
        for layer in model.layers:
            if layer.losses:
                custom_losses += layer.losses

        if custom_losses:
            total_loss += sum(custom_losses)

    return outs, total_loss, loss_metrics
Esempio n. 10
0
def _model_loss(model, inputs, targets):
  """Calculates the loss for a given model.

  Arguments:
     model: The model on which metrics are being calculated.
     inputs: The inputs of the given model. This is typically the mini batch of
              data that is fed to the model.
     targets: The predictions or targets of the given model.

  Returns:
     Returns the model output, total loss and loss value calculated using the
     specified loss function. The total loss includes regularization losses and
     applies masking and sample weighting to the loss value.
  """
  total_loss = 0
  if len(inputs) == 1:
    outs = model.call(inputs[0])
  else:
    outs = model.call(inputs)
  if not isinstance(outs, list):
    outs = [outs]

  if not isinstance(targets, list):
    targets = [targets]

  loss_metrics = []
  with K.name_scope('loss'):
    for i, loss_fn in enumerate(model.loss_functions):
      # compute the loss
      output_loss = _eager_loss_fn(outs[i], targets[i], loss_fn,
                                   model.output_names[i])
      loss_metrics.append(K.mean(output_loss))

      mask = outs[i]._keras_mask
      # adapted from weighted_loss_fn
      if mask is not None:
        # mask should have the same shape as output_loss
        output_loss *= mask
        #  the loss per batch should be proportional
        #  to the number of unmasked samples.
        output_loss /= K.mean(mask)

      # adapted from weighted_loss_fn
      # apply sample weighting
      if model.sample_weights:
        # reduce score_array to same ndim as weight array
        ndim = K.ndim(output_loss)
        weight_ndim = K.ndim(model.sample_weights)
        output_loss = K.mean(output_loss, axis=list(range(weight_ndim, ndim)))
        output_loss *= model.sample_weights
        output_loss /= K.mean(K.cast(K.not_equal(model.sample_weights, 0),
                                     K.floatx()))
        output_loss = K.mean(output_loss)

      loss_weight = model.loss_weights_list[i]
      if total_loss is None:
        total_loss = loss_weight * output_loss
      else:
        total_loss += loss_weight * output_loss

    total_loss = K.mean(total_loss)
    # Add regularization losses
    custom_losses = []
    for layer in model.layers:
      if layer.losses:
        custom_losses += layer.losses

    if custom_losses:
      total_loss += sum(custom_losses)

  return outs, total_loss, loss_metrics