Ejemplo n.º 1
0
    def call(self, x):
        if len(x) != 2:
            raise Exception('input layers must be a list: mean and stddev')
        if len(x[0].shape) != 2 or len(x[1].shape) != 2:
            raise Exception('input shape is not a vector [batchSize, latentSize]')

        mean = x[0]
        stddev = x[1]

        if self.reg == 'bvae':
            # kl divergence:
            latent_loss = -0.5 * K.mean(1 + stddev
                                - K.square(mean)
                                - K.exp(stddev), axis=-1)
            # use beta to force less usage of vector space:
            # also try to use <capacity> dimensions of the space:
            latent_loss = self.beta * K.abs(latent_loss - self.capacity/self.shape.as_list()[1])
            self.add_loss(latent_loss, x)
        elif self.reg == 'vae':
            # kl divergence:
            latent_loss = -0.5 * K.mean(1 + stddev
                                - K.square(mean)
                                - K.exp(stddev), axis=-1)
            self.add_loss(latent_loss, x)

        epsilon = K.random_normal(shape=self.shape,
                              mean=0., stddev=1.)
        if self.random:
            # 'reparameterization trick':
            return mean + K.exp(stddev) * epsilon
        else: # do not perform random sampling, simply grab the impulse value
            return mean + 0*stddev # Keras needs the *0 so the gradinent is not None
Ejemplo n.º 2
0
  def weighted(y_true, y_pred, weights, mask=None):
    """Wrapper function.

    Arguments:
        y_true: `y_true` argument of `fn`.
        y_pred: `y_pred` argument of `fn`.
        weights: Weights tensor.
        mask: Mask tensor.

    Returns:
        Scalar tensor.
    """
    # score_array has ndim >= 2
    score_array = fn(y_true, y_pred)
    if mask is not None:
      # Cast the mask to floatX to avoid float64 upcasting in theano
      mask = math_ops.cast(mask, K.floatx())
      # mask should have the same shape as score_array
      score_array *= mask
      #  the loss per batch should be proportional
      #  to the number of unmasked samples.
      score_array /= K.mean(mask)

    # apply sample weighting
    if weights is not None:
      # reduce score_array to same ndim as weight array
      ndim = K.ndim(score_array)
      weight_ndim = K.ndim(weights)
      score_array = K.mean(score_array, axis=list(range(weight_ndim, ndim)))
      score_array *= weights
      score_array /= K.mean(
          math_ops.cast(math_ops.not_equal(weights, 0), K.floatx()))
    return K.mean(score_array)
Ejemplo n.º 3
0
  def weighted(y_true, y_pred, weights, mask=None):
    """Wrapper function.

    Arguments:
        y_true: `y_true` argument of `fn`.
        y_pred: `y_pred` argument of `fn`.
        weights: Weights tensor.
        mask: Mask tensor.

    Returns:
        Scalar tensor.
    """
    # score_array has ndim >= 2
    score_array = fn(y_true, y_pred)
    if mask is not None:
      mask = math_ops.cast(mask, y_pred.dtype)
      # Update weights with mask.
      if weights is None:
        weights = mask
      else:
        # Update shape of weights if possible before adding mask.
        # Update dimensions of weights to match with mask if possible.
        mask, _, weights = metrics_module.squeeze_or_expand_dimensions(
            mask, None, weights)
        try:
          # Broadcast weights if possible.
          weights = weights_broadcast_ops.broadcast_weights(weights, mask)
          weights *= mask
        except ValueError:
          score_array *= mask
          score_array /= K.mean(mask)
          # TODO(psv): Handle case when mask and weight shapes are not
          # compatible.

    # Apply sample weighting.
    if weights is not None:

      # Update dimensions of weights to match with values if possible.
      score_array, _, weights = metrics_module.squeeze_or_expand_dimensions(
          score_array, None, weights)
      try:
        # Broadcast weights if possible.
        weights = weights_broadcast_ops.broadcast_weights(weights, score_array)
      except ValueError:
        # Reduce values to same ndim as weight array.
        ndim = K.ndim(score_array)
        weight_ndim = K.ndim(weights)
        score_array = K.mean(score_array, axis=list(range(weight_ndim, ndim)))

      score_array = math_ops.multiply(score_array, weights)
      score_array = math_ops.reduce_sum(score_array)
      weights = math_ops.reduce_sum(weights)
      score_array = metrics_module.safe_div(score_array, weights)
    return K.mean(score_array)
Ejemplo n.º 4
0
def correlation_coefficient_loss(y_true, y_pred):
    x = y_true
    y = y_pred
    mx = K.mean(x)
    my = K.mean(y)
    xm, ym = x-mx, y-my
    r_num = K.sum(tf.multiply(xm,ym))
    r_den = K.sqrt(tf.multiply(K.sum(K.square(xm)), K.sum(K.square(ym))))
    r = r_num / r_den

    r = K.maximum(K.minimum(r, 1.0), -1.0)
    return 1 - K.square(r)
Ejemplo n.º 5
0
def _eager_metrics_fn(model,
                      outputs,
                      targets,
                      sample_weights=None,
                      masks=None,
                      return_stateful_result=True):
  """Calculates the metrics for each output of the given model.

  Arguments:
      model: The model on which metrics are being calculated.
      outputs: The outputs of the given model.
      targets: The predictions or targets of the given model.
      sample_weights: Optional list of sample weights for each output.
      masks: Optional list of masks for each output.
      return_stateful_result: Boolean, indicates whether the stateful
        (aggregated)/stateless metric result should be returned.

  Returns:
      Returns the metric results for each output of the model.
  """
  outputs = nest.flatten(outputs)
  targets = nest.flatten(targets)
  # TODO(psv): Consider supporting skip target indices in eager mode?
  metric_results = model._handle_metrics(
      outputs,
      targets=targets,
      sample_weights=sample_weights,
      masks=masks,
      return_stateful_result=return_stateful_result)
  return [backend.mean(t) for t in metric_results]
Ejemplo n.º 6
0
def logloss(y_true, y_pred):
  y_pred = ops.convert_to_tensor(y_pred)
  y_true = math_ops.cast(y_true, y_pred.dtype)
  losses = math_ops.multiply(y_true, math_ops.log(y_pred + K.epsilon()))
  losses += math_ops.multiply((1 - y_true),
                              math_ops.log(1 - y_pred + K.epsilon()))
  return K.mean(-losses, axis=-1)
Ejemplo n.º 7
0
def compute_weighted_loss(losses,
                          sample_weight=None,
                          reduction=ReductionV2.SUM_OVER_BATCH_SIZE,
                          name=None):
  """Computes the weighted loss.

  Args:
    losses: `Tensor` of shape `[batch_size, d1, ... dN]`.
    sample_weight: Optional `Tensor` whose rank is either 0, or the same rank as
      `losses`, or be broadcastable to `losses`.
    reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.
      Default value is `SUM_OVER_BATCH_SIZE`.
    name: Optional name for the op.

  Raises:
    ValueError: If the shape of `sample_weight` is not compatible with `losses`.

  Returns:
    Weighted loss `Tensor` of the same type as `losses`. If `reduction` is
    `NONE`, this has the same shape as `losses`; otherwise, it is scalar.
  """
  ReductionV2.validate(reduction)

  # If this function is called directly, then we just default 'AUTO' to
  # 'SUM_OVER_BATCH_SIZE'. Eg. Canned estimator use cases.
  if reduction == ReductionV2.AUTO:
    reduction = ReductionV2.SUM_OVER_BATCH_SIZE
  if sample_weight is None:
    sample_weight = 1.0
  with K.name_scope(name or 'weighted_loss'):
    # Save the `reduction` argument for loss normalization when distributing
    # to multiple replicas. Used only for estimator + v1 optimizer flow.
    ops.get_default_graph()._last_loss_reduction = reduction  # pylint: disable=protected-access

    # Update dimensions of `sample_weight` to match with `losses` if possible.
    losses, _, sample_weight = squeeze_or_expand_dimensions(
        losses, None, sample_weight)
    losses = ops.convert_to_tensor(losses)
    input_dtype = losses.dtype
    losses = math_ops.cast(losses, dtypes.float32)
    sample_weight = math_ops.cast(sample_weight, dtypes.float32)

    try:
      # Broadcast weights if possible.
      sample_weight = weights_broadcast_ops.broadcast_weights(
          sample_weight, losses)
    except ValueError:
      # Reduce values to same ndim as weight array.
      ndim = K.ndim(losses)
      weight_ndim = K.ndim(sample_weight)
      losses = K.mean(losses, axis=list(range(weight_ndim, ndim)))

    sample_weight.shape.assert_is_compatible_with(losses.shape)
    weighted_losses = math_ops.multiply(losses, sample_weight)
    # Apply reduction function to the individual weighted losses.
    loss = reduce_weighted_loss(weighted_losses, reduction)
    # Convert the result back to the input type.
    loss = math_ops.cast(loss, input_dtype)
    return loss
Ejemplo n.º 8
0
def binary_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0):

  def _smooth_labels():
    return y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing

  y_true = smart_cond.smart_cond(label_smoothing,
                                 _smooth_labels, lambda: y_true)
  return K.mean(
      K.binary_crossentropy(y_true, y_pred, from_logits=from_logits), axis=-1)
Ejemplo n.º 9
0
  def weighted(y_true, y_pred, weights, mask=None):
    """Wrapper function.

    Arguments:
        y_true: `y_true` argument of `fn`.
        y_pred: `y_pred` argument of `fn`.
        weights: Weights tensor.
        mask: Mask tensor.

    Returns:
        Scalar tensor.
    """
    # score_array has ndim >= 2
    score_array = fn(y_true, y_pred)
    if mask is not None:
      mask = math_ops.cast(mask, y_pred.dtype)
      # Update weights with mask.
      if weights is None:
        weights = mask
      else:
        # Update dimensions of weights to match with mask if possible.
        mask, _, weights = squeeze_or_expand_dimensions(mask, None, weights)
        weights *= mask

    # Apply sample weighting.
    if weights is not None:

      # Update dimensions of weights to match with values if possible.
      score_array, _, weights = squeeze_or_expand_dimensions(
          score_array, None, weights)
      try:
        # Broadcast weights if possible.
        weights = weights_broadcast_ops.broadcast_weights(weights, score_array)
      except ValueError:
        # Reduce values to same ndim as weight array.
        ndim = K.ndim(score_array)
        weight_ndim = K.ndim(weights)
        score_array = K.mean(score_array, axis=list(range(weight_ndim, ndim)))

      score_array = math_ops.multiply(score_array, weights)
      score_array = math_ops.reduce_sum(score_array)
      weights = math_ops.reduce_sum(weights)
      score_array = math_ops.div_no_nan(score_array, weights)
    return K.mean(score_array)
Ejemplo n.º 10
0
def compute_weighted_loss(losses,
                          sample_weight=None,
                          reduction=losses_impl.ReductionV2.SUM_OVER_BATCH_SIZE,
                          name=None):
  """Computes the weighted loss.

  Args:
    losses: `Tensor` of shape `[batch_size, d1, ... dN]`.
    sample_weight: Optional `Tensor` whose rank is either 0, or the same rank as
      `losses`, or be broadcastable to `losses`.
    reduction: Type of `tf.losses.Reduction` to apply to loss. Default value is
      `SUM_OVER_BATCH_SIZE`.
    name: Optional name for the op.

  Raises:
    ValueError: If the shape of `sample_weight` is not compatible with `losses`.

  Returns:
    Weighted loss `Tensor` of the same type as `losses`. If `reduction` is
    `NONE`, this has the same shape as `losses`; otherwise, it is scalar.
  """
  losses_impl.ReductionV2.validate(reduction)
  if sample_weight is None:
    sample_weight = 1.0
  with ops.name_scope(name, 'weighted_loss', (losses, sample_weight)):
    # Save the `reduction` argument for loss normalization when distributing
    # to multiple replicas.
    # TODO(josh11b): Associate it with the returned op for more precision.
    ops.get_default_graph()._last_loss_reduction = reduction  # pylint: disable=protected-access

    # Update dimensions of `sample_weight` to match with `losses` if possible.
    losses, _, sample_weight = squeeze_or_expand_dimensions(
        losses, None, sample_weight)
    losses = ops.convert_to_tensor(losses)
    input_dtype = losses.dtype
    losses = math_ops.to_float(losses)
    sample_weight = math_ops.to_float(sample_weight)

    try:
      # Broadcast weights if possible.
      sample_weight = weights_broadcast_ops.broadcast_weights(
          sample_weight, losses)
    except ValueError:
      # Reduce values to same ndim as weight array.
      ndim = K.ndim(losses)
      weight_ndim = K.ndim(sample_weight)
      losses = K.mean(losses, axis=list(range(weight_ndim, ndim)))

    sample_weight.get_shape().assert_is_compatible_with(losses.get_shape())
    weighted_losses = math_ops.multiply(losses, sample_weight)
    # Apply reduction function to the individual weighted losses.
    loss = _reduce_weighted_loss(weighted_losses, reduction)
    # Convert the result back to the input type.
    loss = math_ops.cast(loss, input_dtype)
    return loss
Ejemplo n.º 11
0
 def call(self, inputs, mask=None):
   steps_axis = 1 if self.data_format == 'channels_last' else 2
   if mask is not None:
     mask = math_ops.cast(mask, backend.floatx())
     input_shape = inputs.shape.as_list()
     broadcast_shape = [-1, input_shape[steps_axis], 1]
     mask = array_ops.reshape(mask, broadcast_shape)
     inputs *= mask
     return backend.sum(inputs, axis=steps_axis) / math_ops.reduce_sum(
         mask, axis=steps_axis)
   else:
     return backend.mean(inputs, axis=steps_axis)
Ejemplo n.º 12
0
def binary_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0):  # pylint: disable=missing-docstring
  y_pred = ops.convert_to_tensor(y_pred)
  y_true = math_ops.cast(y_true, y_pred.dtype)
  label_smoothing = ops.convert_to_tensor(label_smoothing, dtype=K.floatx())

  def _smooth_labels():
    return y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing

  y_true = smart_cond.smart_cond(label_smoothing,
                                 _smooth_labels, lambda: y_true)
  return K.mean(
      K.binary_crossentropy(y_true, y_pred, from_logits=from_logits), axis=-1)
Ejemplo n.º 13
0
def compute_weighted_loss(losses,
                          sample_weight=None,
                          reduction=ReductionV2.SUM_OVER_BATCH_SIZE,
                          name=None):
  """Computes the weighted loss.

  Args:
    losses: `Tensor` of shape `[batch_size, d1, ... dN]`.
    sample_weight: Optional `Tensor` whose rank is either 0, or the same rank as
      `losses`, or be broadcastable to `losses`.
    reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.
      Default value is `SUM_OVER_BATCH_SIZE`.
    name: Optional name for the op.

  Raises:
    ValueError: If the shape of `sample_weight` is not compatible with `losses`.

  Returns:
    Weighted loss `Tensor` of the same type as `losses`. If `reduction` is
    `NONE`, this has the same shape as `losses`; otherwise, it is scalar.
  """
  ReductionV2.validate(reduction)
  if sample_weight is None:
    sample_weight = 1.0
  with ops.name_scope(name, 'weighted_loss', (losses, sample_weight)):
    # Update dimensions of `sample_weight` to match with `losses` if possible.
    losses, _, sample_weight = squeeze_or_expand_dimensions(
        losses, None, sample_weight)
    losses = ops.convert_to_tensor(losses)
    input_dtype = losses.dtype
    losses = math_ops.cast(losses, dtypes.float32)
    sample_weight = math_ops.cast(sample_weight, dtypes.float32)

    try:
      # Broadcast weights if possible.
      sample_weight = weights_broadcast_ops.broadcast_weights(
          sample_weight, losses)
    except ValueError:
      # Reduce values to same ndim as weight array.
      ndim = K.ndim(losses)
      weight_ndim = K.ndim(sample_weight)
      losses = K.mean(losses, axis=list(range(weight_ndim, ndim)))

    sample_weight.shape.assert_is_compatible_with(losses.shape)
    weighted_losses = math_ops.multiply(losses, sample_weight)
    # Apply reduction function to the individual weighted losses.
    loss = reduce_weighted_loss(weighted_losses, reduction)
    # Convert the result back to the input type.
    loss = math_ops.cast(loss, input_dtype)
    return loss
Ejemplo n.º 14
0
def hinge(y_true, y_pred):
  """Computes the hinge loss between `y_true` and `y_pred`.

  Args:
    y_true: The ground truth values. `y_true` values are expected to be -1 or 1.
      If binary (0 or 1) labels are provided we will convert them to -1 or 1.
    y_pred: The predicted values.

  Returns:
    Tensor with one scalar loss entry per sample.
  """
  y_pred = ops.convert_to_tensor(y_pred)
  y_true = math_ops.cast(y_true, y_pred.dtype)
  y_true = _maybe_convert_labels(y_true)
  return K.mean(math_ops.maximum(1. - y_true * y_pred, 0.), axis=-1)
Ejemplo n.º 15
0
def _eager_metrics_fn(model, outputs, targets, sample_weights=None, masks=None):
  """Calculates the metrics for each output of the given model.

  Arguments:
      model: The model on which metrics are being calculated.
      outputs: The outputs of the given model.
      targets: The predictions or targets of the given model.
      sample_weights: Optional list of sample weights for each output.
      masks: Optional list of masks for each output.

  Returns:
      Returns the metric results for each output of the model.
  """
  outputs = generic_utils.to_list(outputs)
  targets = generic_utils.to_list(targets)
  # TODO(psv): Consider supporting skip target indices in eager mode?
  metric_results = model._handle_metrics(
      outputs, targets=targets, sample_weights=sample_weights, masks=masks)
  return [backend.mean(t) for t in metric_results]
Ejemplo n.º 16
0
def logcosh(y_true, y_pred):
  """Logarithm of the hyperbolic cosine of the prediction error.

  `log(cosh(x))` is approximately equal to `(x ** 2) / 2` for small `x` and
  to `abs(x) - log(2)` for large `x`. This means that 'logcosh' works mostly
  like the mean squared error, but will not be so strongly affected by the
  occasional wildly incorrect prediction.

  Arguments:
      y_true: tensor of true targets.
      y_pred: tensor of predicted targets.

  Returns:
      Tensor with one scalar loss entry per sample.
  """

  def _logcosh(x):
    return x + nn.softplus(-2. * x) - math_ops.log(2.)

  return K.mean(_logcosh(y_pred - y_true), axis=-1)
Ejemplo n.º 17
0
def _eager_metrics_fn(model, outputs, targets):
  """Calculates the metrics for each output of the given model.

  Arguments:
      model: The model on which metrics are being calculated.
      outputs: The outputs of the given model.
      targets: The predictions or targets of the given model.

  Returns:
      Returns the metric names and metric results for each output of the model.
  """
  metric_names = []
  metric_results = []
  if not isinstance(outputs, list):
    outputs = [outputs]

  if not isinstance(targets, list):
    targets = [targets]

  for i in range(len(model.outputs)):
    output_metrics = model.nested_metrics[i]
    for nested_output_metric in output_metrics:
      metric_name, metric_fn = _get_metrics_info(
          nested_output_metric, backend.int_shape(model.outputs[i]),
          model.loss_functions[i])

      if len(model.output_names) > 1:
        metric_name = model.output_names[i] + '_' + metric_name
        if metric_name not in model.metrics_names:
          model.metrics_names.append(metric_name)

      with backend.name_scope(metric_name):
        metric_result = metric_fn(targets[i], outputs[i])
        metric_names.append(metric_name)
        metric_results.append(backend.mean(metric_result))

  return metric_results
Ejemplo n.º 18
0
def _model_loss(model, inputs, targets, sample_weights=None, training=False):
    """Calculates the loss for a given model.

  Arguments:
      model: The model on which metrics are being calculated.
      inputs: List of input arrays.
      targets: List of target arrays.
      sample_weights: Optional list of sample weight arrays.
      training: Whether the model should be run in inference or training mode.

  Returns:
     Returns the model output, total loss and loss value calculated using the
     specified loss function. The total loss includes regularization losses and
     applies masking and sample weighting to the loss value.
  """
    total_loss = 0
    if len(inputs) == 1:
        if model._expects_training_arg:
            outs = model.call(inputs[0], training=training)
        else:
            outs = model.call(inputs[0])
    else:
        if model._expects_training_arg:
            outs = model.call(inputs, training=training)
        else:
            outs = model.call(inputs)
    if not isinstance(outs, list):
        outs = [outs]

    if not isinstance(targets, list):
        targets = [targets]

    loss_metrics = []
    with backend.name_scope('loss'):
        for i, loss_fn in enumerate(model.loss_functions):
            if sample_weights:
                weights = sample_weights[i]
            else:
                weights = None

            # TODO(fchollet): support masking; in practice `_keras_mask` is never
            # set in this context currently.
            mask = outs[i]._keras_mask

            weighted_masked_fn = training_utils.weighted_masked_objective(
                loss_fn)
            with backend.name_scope(model.output_names[i] + '_loss'):
                output_loss = weighted_masked_fn(targets[i],
                                                 outs[i],
                                                 weights,
                                                 mask=mask)
            # If the number of outputs is 1 then we don't append the loss metric
            # associated with each model output. When there are multiple outputs
            # associated with a model, each output's loss is calculated and returned
            # as part of the loss_metrics.
            if len(model.outputs) > 1:
                loss_metrics.append(backend.mean(output_loss))

            loss_weight = model.loss_weights_list[i]
            if total_loss is None:
                total_loss = loss_weight * output_loss
            else:
                total_loss += loss_weight * output_loss

        total_loss = backend.mean(total_loss)
        # Add regularization losses
        custom_losses = []
        for layer in model.layers:
            if layer.losses:
                custom_losses += layer.losses

        if custom_losses:
            total_loss += sum(custom_losses)

    return outs, total_loss, loss_metrics
Ejemplo n.º 19
0
def iterator_fit_loop(model,
                      inputs,
                      class_weight,
                      steps_per_epoch,
                      epoch_logs,
                      val_inputs=None,
                      val_targets=None,
                      val_sample_weights=None,
                      epochs=1,
                      verbose=1,
                      callbacks=None,
                      validation_steps=None,
                      do_validation=False,
                      batch_size=None):
  """Fit function for eager execution when input is given as dataset iterator.

  Updates the given epoch logs.

  Arguments:
      model: Instance of the `Model`.
      inputs: Input dataset iterator.
      class_weight: Optional class-weight array to weight the importance of
          samples in `inputs` based on the class they belong to, as conveyed by
          the targets from the `inputs` iterator.
      steps_per_epoch: Total number of steps (batches of samples)
          before declaring one epoch finished and starting the
          next epoch.
      epoch_logs: Dictionary of logs from every epoch.
      val_inputs: Input data for validation.
      val_targets: Target data for validation.
      val_sample_weights: Sample weight data for validation.
      epochs: Number of times to iterate over the data
      verbose: Verbosity mode, 0, 1 or 2
      callbacks: CallbackList instance. Controls callbacks during training.
      validation_steps: Number of steps to run validation for (only if doing
        validation from data tensors). Ignored with default value of `None`.
      do_validation: Boolean value indicating whether we should do validation.
      batch_size: int, val_inputs and val_targets will be evaled batch by
        batch with size batch_size if they are array.

  Raises:
      ValueError: In case of mismatch between given number of inputs and
        expectations of the model.
  """
  assert isinstance(inputs, iterator_ops.EagerIterator)

  # make sure either x,y or x,y,sample_weights is provided
  if (not isinstance(inputs.output_shapes, (list, tuple)) or
      len(inputs.output_shapes) not in (2, 3)):
    raise ValueError('Please provide either inputs and targets '
                     'or inputs, targets, and sample_weights')

  for step_index in range(steps_per_epoch):
    batch_logs = {'batch': step_index, 'size': 1}
    callbacks.on_batch_begin(step_index, batch_logs)

    # Get data from the iterator.
    try:
      next_element = inputs.get_next()
    except errors.OutOfRangeError:
      logging.warning(
          'Your dataset iterator ran out of data; interrupting training. Make '
          'sure that your dataset can generate at least '
          '`steps_per_epoch * epochs` batches (in this case, %d batches). You '
          'may need to use the repeat() function when building your '
          'dataset.' % steps_per_epoch * epochs)
      break

    if len(inputs.output_shapes) == 2:
      x, y = next_element
      sample_weights = None
    else:
      x, y, sample_weights = next_element

    # Validate and standardize data.
    x, y, sample_weights = model._standardize_user_data(
        x, y, sample_weight=sample_weights, class_weight=class_weight)
    x = training_utils.cast_if_floating_dtype(x)
    y = training_utils.cast_if_floating_dtype(y)
    if sample_weights:
      sample_weights = [
          training_utils.cast_if_floating_dtype(
              ops.convert_to_tensor(val, dtype=backend.floatx()))
          if val is not None else None for val in sample_weights
      ]

    # Set stateful_metrics in callbacks. We do not do this before the
    # `steps_per_epoch` loop because model will be compiled only in the first
    # iteration of this loop in the deferred build scenario.
    if step_index == 0:
      for cbk in callbacks:
        if (isinstance(cbk, cbks.BaseLogger) or
            isinstance(cbk, cbks.ProgbarLogger)):
          cbk.stateful_metrics = model.stateful_metric_names

    if step_index == 0 and not callbacks.params['metrics']:
      callback_metrics = copy.copy(model.metrics_names)
      if do_validation:
        callback_metrics += ['val_' + n for n in model.metrics_names]
      callbacks.set_params({
          'batch_size': batch_size,
          'epochs': epochs,
          'steps': steps_per_epoch,
          'verbose': verbose,
          'do_validation': do_validation,
          'metrics': callback_metrics or [],
          'validation_steps': validation_steps
      })

    # Train model.
    outs, loss, loss_metrics, masks = _process_single_batch(
        model, x, y, sample_weights=sample_weights, training=True)
    outs = generic_utils.to_list(outs)

    # Calculate metrics.
    for l, o in zip(model.metrics_names, outs):
      batch_logs[l] = o
    # Required for eager execution
    metrics_results = _eager_metrics_fn(
        model, outs, y, sample_weights=sample_weights, masks=masks)
    batch_logs['loss'] = tensor_util.constant_value(backend.mean(loss))

    for k, v in zip(model.metrics_names,
                    [backend.mean(loss)] + loss_metrics + metrics_results):
      batch_logs[k] = tensor_util.constant_value(v)
    callbacks.on_batch_end(step_index, batch_logs)
    if callbacks.model.stop_training:
      break

    if step_index == steps_per_epoch - 1:
      if do_validation:
        val_outs = test_loop(
            model,
            val_inputs,
            val_targets,
            sample_weights=val_sample_weights,
            steps=validation_steps,
            verbose=0,
            batch_size=batch_size)
        if not isinstance(val_outs, list):
          val_outs = [val_outs]
        # Same labels assumed.
        for l, o in zip(model.metrics_names, val_outs):
          epoch_logs['val_' + l] = o
Ejemplo n.º 20
0
def batch_fit_loop(model,
                   inputs,
                   targets,
                   epoch_logs,
                   index_array,
                   out_labels,
                   callback_model,
                   batch_size,
                   sample_weights=None,
                   val_inputs=None,
                   val_targets=None,
                   val_sample_weights=None,
                   callbacks=None,
                   shuffle=True,
                   num_train_samples=None,
                   do_validation=False):
    """Fit function for eager execution when input is given as arrays or tensors.

  Updates the given epoch logs.

  Arguments:
      model: Instance of the `Model`.
      inputs: List of input arrays.
      targets: List of target arrays.
      epoch_logs: Dictionary of logs from every epoch.
      index_array: Index array generated from number of training samples.
      out_labels: Output labels generated from model metric names.
      callback_model: Instance of `Model` to callback.
      batch_size: Integer batch size or None if unknown.
      sample_weights: Optional list of sample weight arrays.
      val_inputs: Input data for validation.
      val_targets: Target data for validation.
      val_sample_weights: Sample weight data for validation.
      callbacks: List of callbacks to be called during training.
      shuffle: Whether to shuffle the data at the beginning of each epoch.
      num_train_samples: Integer number of training samples.
      do_validation: Boolean value indicating whether we should do validation.
  """
    # TODO(psv): Create a dataset iterator instead of manually creating batches
    # here and in batch_test_loop, batch_predict_loop.
    if shuffle == 'batch':
        index_array = model._batch_shuffle(index_array, batch_size)
    elif shuffle:
        np.random.shuffle(index_array)

    batches = generic_utils.make_batches(num_train_samples, batch_size)

    for batch_index, (batch_start, batch_end) in enumerate(batches):
        batch_ids = index_array[batch_start:batch_end]
        inputs_batch = slice_arrays(inputs, batch_ids, contiguous=not shuffle)
        targets_batch = slice_arrays(targets,
                                     batch_ids,
                                     contiguous=not shuffle)
        if sample_weights:
            sample_weights_batch = slice_arrays(sample_weights,
                                                batch_ids,
                                                contiguous=not shuffle)
        else:
            sample_weights_batch = None
        batch_logs = {}
        batch_logs['batch'] = batch_index
        batch_logs['size'] = len(batch_ids)

        callbacks.on_batch_begin(batch_index, batch_logs)

        inputs_batch = [
            ops.convert_to_tensor(val, dtype=backend.floatx())
            for val in inputs_batch
        ]
        targets_batch = [
            ops.convert_to_tensor(val, dtype=backend.floatx())
            for val in targets_batch
        ]
        if sample_weights:
            sample_weights_batch = [
                ops.convert_to_tensor(val, dtype=backend.floatx())
                if val is not None else None for val in sample_weights_batch
            ]

        outs, loss, loss_metrics = _process_single_batch(
            model,
            inputs_batch,
            targets_batch,
            sample_weights=sample_weights_batch,
            training=True)

        if not isinstance(outs, list):
            outs = [outs]

        for l, o in zip(out_labels, outs):
            batch_logs[l] = o
        # Required for eager execution
        metrics_results = _eager_metrics_fn(model, outs, targets_batch)
        batch_logs['loss'] = tensor_util.constant_value(backend.mean(loss))

        for k, v in zip(model.metrics_names,
                        [backend.mean(loss)] + loss_metrics + metrics_results):
            batch_logs[k] = tensor_util.constant_value(v)
        callbacks.on_batch_end(batch_index, batch_logs)
        if callback_model.stop_training:
            break

        if batch_index == len(batches) - 1:  # Last batch.
            if do_validation:
                val_outs = test_loop(model,
                                     val_inputs,
                                     val_targets,
                                     sample_weights=val_sample_weights,
                                     batch_size=batch_size,
                                     verbose=0)
                if not isinstance(val_outs, list):
                    val_outs = [val_outs]
                # Same labels assumed.
                for l, o in zip(out_labels, val_outs):
                    epoch_logs['val_' + l] = o
Ejemplo n.º 21
0
def hinge(y_true, y_pred):
  return K.mean(math_ops.maximum(1. - y_true * y_pred, 0.), axis=-1)
Ejemplo n.º 22
0
def top_k_categorical_accuracy(y_true, y_pred, k=5):
    return K.mean(nn.in_top_k(y_pred, math_ops.argmax(y_true, axis=-1), k),
                  axis=-1)
Ejemplo n.º 23
0
def binary_crossentropy(y_true, y_pred):
  return K.mean(K.binary_crossentropy(y_true, y_pred), axis=-1)
Ejemplo n.º 24
0
def mean_absolute_error(y_true, y_pred):
    y_pred = ops.convert_to_tensor(y_pred)
    y_true = math_ops.cast(y_true, y_pred.dtype)
    return K.mean(math_ops.abs(y_pred - y_true), axis=-1)
Ejemplo n.º 25
0
def mean_absolute_percentage_error(y_true, y_pred):  # pylint: disable=missing-docstring
    y_pred = ops.convert_to_tensor(y_pred)
    y_true = math_ops.cast(y_true, y_pred.dtype)
    diff = math_ops.abs(
        (y_true - y_pred) / K.clip(math_ops.abs(y_true), K.epsilon(), None))
    return 100. * K.mean(diff, axis=-1)
Ejemplo n.º 26
0
 def calculate_mse(tensor1, tensor2):
     return k.mean(k.square(tensor1 - tensor2))
Ejemplo n.º 27
0
def mean_squared_error(y_true, y_pred):
    y_pred = ops.convert_to_tensor(y_pred)
    y_true = math_ops.cast(y_true, y_pred.dtype)
    return K.mean(math_ops.squared_difference(y_pred, y_true), axis=-1)
Ejemplo n.º 28
0
 def calculate_kl_loss(mu, sigma):
     """ Function to calculate the KL loss term.
      Considers the tolerance value for which optimization for KL should stop """
     # kullback Leibler loss between normal distributions
     kl_cost = -0.5 * k.mean(1.0 + sigma - k.square(mu) - k.exp(sigma))
     return kl_cost
Ejemplo n.º 29
0
def batch_test_loop(model,
                    inputs,
                    targets,
                    batch_size,
                    sample_weights=None,
                    verbose=0):
    """Test function for eager execution when input is given as arrays or tensors.

  Arguments:
      model: Model instance that is being evaluated in Eager mode.
      inputs: List of input arrays.
      targets: List of target arrays.
      batch_size: Integer batch size.
      sample_weights: Optional list of sample weight arrays.
      verbose: Verbosity mode.

  Returns:
      Scalar loss (if the model has a single output and no metrics)
      or list of scalars (if the model has multiple outputs
      and/or metrics). The attribute `model.metrics_names` will give you
      the display labels for the scalar outputs.
  """
    outs = []
    feed_data = inputs + targets
    if sample_weights:
        feed_data += sample_weights
    num_samples = training_utils.check_num_samples(feed_data,
                                                   batch_size=batch_size)
    if verbose == 1:
        progbar = generic_utils.Progbar(target=num_samples)
    batches = generic_utils.make_batches(num_samples, batch_size)
    index_array = np.arange(num_samples)
    for batch_index, (batch_start, batch_end) in enumerate(batches):
        batch_ids = index_array[batch_start:batch_end]
        inputs_batch = slice_arrays(inputs, batch_ids)
        targets_batch = slice_arrays(targets, batch_ids)
        if sample_weights:
            sample_weights_batch = slice_arrays(sample_weights, batch_ids)
        else:
            sample_weights_batch = None

        inputs_batch = [
            ops.convert_to_tensor(val, dtype=backend.floatx())
            for val in inputs_batch
        ]
        targets_batch = [
            ops.convert_to_tensor(val, dtype=backend.floatx())
            for val in targets_batch
        ]
        if sample_weights:
            sample_weights_batch = [
                ops.convert_to_tensor(val, dtype=backend.floatx())
                if val is not None else None for val in sample_weights_batch
            ]

        loss_outs, loss, loss_metrics = _model_loss(
            model,
            inputs_batch,
            targets_batch,
            sample_weights=sample_weights_batch,
            training=False)
        metrics_results = _eager_metrics_fn(model, loss_outs, targets_batch)
        batch_outs = []
        for _, v in zip(model.metrics_names,
                        [backend.mean(loss)] + loss_metrics + metrics_results):
            batch_outs.append(tensor_util.constant_value(v))

        if isinstance(batch_outs, list):
            if batch_index == 0:
                for _ in enumerate(batch_outs):
                    outs.append(0.)
            for i, batch_out in enumerate(batch_outs):
                outs[i] += batch_out * len(batch_ids)
        else:
            if batch_index == 0:
                outs.append(0.)
            outs[0] += batch_outs * len(batch_ids)

        if verbose == 1:
            progbar.update(batch_end)

    for i in range(len(outs)):
        outs[i] /= num_samples
    if len(outs) == 1:
        return outs[0]
    return outs
Ejemplo n.º 30
0
def iterator_test_loop(model, inputs, steps, verbose=0):
    """Test function for eager execution when input is given as dataset iterator.

  Arguments:
      model: Model instance that is being evaluated in Eager mode.
      inputs: Input dataset iterator.
      steps: Total number of steps (batches of samples) before declaring
      predictions finished.
      verbose: Verbosity mode.

  Returns:
      Scalar loss (if the model has a single output and no metrics)
      or list of scalars (if the model has multiple outputs
      and/or metrics). The attribute `model.metrics_names` will give you
      the display labels for the scalar outputs.

  Raises:
      ValueError: In case of mismatch between given number of inputs and
        expectations of the model.
  """
    assert isinstance(inputs, iterator_ops.EagerIterator)
    outs = []
    num_samples = 0
    if verbose == 1:
        progbar = generic_utils.Progbar(target=steps)
    for step_index in range(steps):
        # Get data from the iterator.
        try:
            next_element = inputs.get_next()
        except errors.OutOfRangeError:
            logging.warning(
                'Your dataset iterator ran out of data interrupting testing. '
                'Make sure that your dataset can generate at least `steps` batches '
                '(in this case, %d batches).', steps)
            break

        if not isinstance(next_element,
                          (list, tuple)) or len(next_element) != 2:
            raise ValueError(
                'Please provide data as a list or tuple of 2 elements '
                ' - input and target pair. Received %s' % next_element)
        x, y = next_element

        # Validate and standardize data.
        x, y, sample_weights = model._standardize_user_data(x, y)

        # Calculate model output, loss values.
        loss_outs, loss, loss_metrics = _model_loss(
            model, x, y, sample_weights=sample_weights, training=False)
        metrics_results = _eager_metrics_fn(model, loss_outs, y)
        batch_outs = []
        for _, v in zip(model.metrics_names,
                        [backend.mean(loss)] + loss_metrics + metrics_results):
            batch_outs.append(tensor_util.constant_value(v))

        # Get current step size.
        if isinstance(x, list):
            step_size = x[0].get_shape().as_list()[0]
        else:
            step_size = x.get_shape().as_list()[0]

        # Accumulate results in output array.
        if not isinstance(batch_outs, list):
            batch_outs = [batch_outs]
        if step_index == 0:
            for _ in enumerate(batch_outs):
                outs.append(0.)
        for i, batch_out in enumerate(batch_outs):
            outs[i] += batch_out * step_size

        # Calculate sample size.
        num_samples += step_size
        if verbose == 1:
            progbar.update(step_index + 1)

        for i in range(len(outs)):
            outs[i] /= num_samples
        if len(outs) == 1:
            return outs[0]
        return outs
Ejemplo n.º 31
0
def sparse_top_k_categorical_accuracy(y_true, y_pred, k=5):
  return K.mean(
      nn.in_top_k(y_pred,
                  math_ops.cast(math_ops.reduce_max(y_true, axis=-1), 'int32'),
                  k),
      axis=-1)
Ejemplo n.º 32
0
def mean_squared_logarithmic_error(y_true, y_pred):  # pylint: disable=missing-docstring
    y_pred = ops.convert_to_tensor(y_pred)
    y_true = math_ops.cast(y_true, y_pred.dtype)
    first_log = math_ops.log(K.clip(y_pred, K.epsilon(), None) + 1.)
    second_log = math_ops.log(K.clip(y_true, K.epsilon(), None) + 1.)
    return K.mean(math_ops.squared_difference(first_log, second_log), axis=-1)
Ejemplo n.º 33
0
def hinge(y_true, y_pred):
  return K.mean(math_ops.maximum(1. - y_true * y_pred, 0.), axis=-1)
Ejemplo n.º 34
0
def binary_crossentropy(y_true, y_pred):
  return K.mean(K.binary_crossentropy(y_true, y_pred), axis=-1)
Ejemplo n.º 35
0
def poisson(y_true, y_pred):
  return K.mean(y_pred - y_true * math_ops.log(y_pred + K.epsilon()), axis=-1)
Ejemplo n.º 36
0
def mean_absolute_error(y_true, y_pred):
  return K.mean(math_ops.abs(y_pred - y_true), axis=-1)
Ejemplo n.º 37
0
def mean_absolute_percentage_error(y_true, y_pred):
  diff = math_ops.abs(
      (y_true - y_pred) / K.clip(math_ops.abs(y_true), K.epsilon(), None))
  return 100. * K.mean(diff, axis=-1)
Ejemplo n.º 38
0
def apply_local_cmvn(feats, epsilon=1e-9):
    ''' feats: (NHWC) '''
    mean = tf.expand_dims(keras_backend.mean(feats, axis=1), axis=1)
    var = tf.expand_dims(keras_backend.var(feats, axis=1), axis=1)
    feats = (feats - mean) * tf.rsqrt(var + epsilon)
    return feats
Ejemplo n.º 39
0
def squared_hinge(y_true, y_pred):
  return K.mean(
      math_ops.square(math_ops.maximum(1. - y_true * y_pred, 0.)), axis=-1)
Ejemplo n.º 40
0
def _model_loss(model,
                inputs,
                targets,
                output_loss_metrics=None,
                sample_weights=None,
                training=False):
  """Calculates the loss for a given model.

  Arguments:
      model: The model on which metrics are being calculated.
      inputs: Either a dictionary of inputs to the model or a list of input
        arrays.
      targets: List of target arrays.
      output_loss_metrics: List of metrics that are used to aggregated output
        loss values.
      sample_weights: Optional list of sample weight arrays.
      training: Whether the model should be run in inference or training mode.

  Returns:
     Returns the model output, total loss, loss value calculated using the
     specified loss function and masks for each output. The total loss includes
     regularization losses and applies masking and sample weighting
     to the loss value.
  """
  total_loss = 0
  kwargs = {}
  if model._expects_training_arg:
    kwargs['training'] = training
  if len(inputs) == 1 and not isinstance(inputs, dict):
    inputs = inputs[0]

  if model._compute_output_and_mask_jointly:
    outs, masks = model._call_and_compute_mask(inputs, **kwargs)
    masks = nest.flatten(masks)
  else:
    outs = model.call(inputs, **kwargs)
    masks = None

  outs = nest.flatten(outs)
  if masks is None:
    masks = [None for _ in outs]
  targets = nest.flatten(targets)

  loss_metrics = []
  aggregated_loss_metrics = []
  with backend.name_scope('loss'):
    for i, loss_fn in enumerate(model.loss_functions):
      if sample_weights:
        weights = sample_weights[i]
      else:
        weights = None
      mask = masks[i]
      with backend.name_scope(model.output_names[i] + '_loss'):
        if isinstance(loss_fn, losses_module.Loss):
          if mask is not None:
            mask = math_ops.cast(mask, outs[i].dtype)
            # Update weights with mask.
            if weights is None:
              weights = mask
            else:
              # Update dimensions of weights to match with mask if possible.
              mask, _, weights = squeeze_or_expand_dimensions(
                  mask, None, weights)
              weights *= mask
          output_loss = loss_fn(targets[i], outs[i], sample_weight=weights)
        else:
          weighted_masked_fn = training_utils.weighted_masked_objective(loss_fn)
          output_loss = weighted_masked_fn(
              targets[i], outs[i], weights, mask=mask)

      # If the number of outputs is 1 then we don't append the loss metric
      # associated with each model output. When there are multiple outputs
      # associated with a model, each output's loss is calculated and returned
      # as part of the loss_metrics.
      if len(model.outputs) > 1:
        loss_metrics.append(backend.mean(output_loss))

        if output_loss_metrics is not None:
          # Keep track of the stateful loss result.
          aggregated_loss_metrics.append(
              training_utils.call_metric_function(
                  output_loss_metrics[i],
                  targets[i],
                  outs[i],
                  weights=weights,
                  mask=mask))

      loss_weight = model.loss_weights_list[i]
      if total_loss is None:
        total_loss = loss_weight * output_loss
      else:
        total_loss += loss_weight * output_loss

    total_loss = backend.mean(total_loss)
    # Add regularization losses
    custom_losses = model.losses
    if custom_losses:
      total_loss += math_ops.add_n(custom_losses)
    model._clear_losses()

  return outs, total_loss, loss_metrics, aggregated_loss_metrics, masks
Ejemplo n.º 41
0
def poisson(y_true, y_pred):
    y_pred = ops.convert_to_tensor(y_pred)
    y_true = math_ops.cast(y_true, y_pred.dtype)
    return K.mean(y_pred - y_true * math_ops.log(y_pred + K.epsilon()),
                  axis=-1)
Ejemplo n.º 42
0
def iterator_fit_loop(model,
                      inputs,
                      class_weight,
                      steps_per_epoch,
                      callback_model,
                      out_labels,
                      epoch_logs,
                      val_inputs=None,
                      val_targets=None,
                      val_sample_weights=None,
                      epochs=1,
                      verbose=1,
                      callbacks=None,
                      callback_metrics=None,
                      validation_steps=None,
                      do_validation=False):
    """Fit function for eager execution when input is given as dataset iterator.

  Updates the given epoch logs.

  Arguments:
      model: Instance of the `Model`.
      inputs: Input dataset iterator.
      class_weight: Optional class-weight array to weight the importance of
          samples in `inputs` based on the class they belong to, as conveyed by
          the targets from the `inputs` iterator.
      steps_per_epoch: Total number of steps (batches of samples)
          before declaring one epoch finished and starting the
          next epoch.
      callback_model: Instance of `Model` to callback.
      out_labels: Output labels generated from model metric names.
      epoch_logs: Dictionary of logs from every epoch.
      val_inputs: Input data for validation.
      val_targets: Target data for validation.
      val_sample_weights: Sample weight data for validation.
      epochs: Number of times to iterate over the data
      verbose: Verbosity mode, 0, 1 or 2
      callbacks: List of callbacks to be called during training
      callback_metrics: List of strings, the display names of the metrics
          passed to the callbacks. They should be the
          concatenation of list the display names of the outputs of
           `f` and the list of display names of the outputs of `f_val`.
      validation_steps: Number of steps to run validation for (only if doing
        validation from data tensors). Ignored with default value of `None`.
      do_validation: Boolean value indicating whether we should do validation.

  Raises:
      ValueError: In case of mismatch between given number of inputs and
        expectations of the model.
  """
    assert isinstance(inputs, iterator_ops.EagerIterator)
    for step_index in range(steps_per_epoch):
        batch_logs = {}
        batch_logs['batch'] = step_index
        batch_logs['size'] = 1
        callbacks.on_batch_begin(step_index, batch_logs)

        # Get data from the iterator.
        try:
            next_element = inputs.get_next()
        except errors.OutOfRangeError:
            logging.warning(
                'Your dataset iterator ran out of data; '
                'interrupting training. Make sure that your dataset'
                ' can generate at least `steps_per_epoch * epochs` '
                'batches (in this case, %d batches).' % steps_per_epoch *
                epochs)
            break

        if not isinstance(next_element,
                          (list, tuple)) or len(next_element) != 2:
            raise ValueError(
                'Please provide data as a list or tuple of 2 elements '
                ' - input and target pair. Received %s' % next_element)
        x, y = next_element

        # Validate and standardize data.
        x, y, sample_weights = model._standardize_user_data(
            x, y, class_weight=class_weight)
        if sample_weights:
            sample_weights = [
                ops.convert_to_tensor(val, dtype=backend.floatx())
                if val is not None else None for val in sample_weights
            ]

        if step_index == 0 and not callback_metrics:
            out_labels = model.metrics_names
            if do_validation:
                callback_metrics = copy.copy(out_labels) + [
                    'val_' + n for n in out_labels
                ]
            else:
                callback_metrics = copy.copy(out_labels)
            callbacks.set_params({
                'epochs': epochs,
                'steps': steps_per_epoch,
                'verbose': verbose,
                'do_validation': do_validation,
                'metrics': callback_metrics or [],
            })

        # Train model.
        outs, loss, loss_metrics = _process_single_batch(
            model, x, y, sample_weights=sample_weights, training=True)
        if not isinstance(outs, list):
            outs = [outs]

        # Calculate metrics.
        for l, o in zip(out_labels, outs):
            batch_logs[l] = o
        # Required for eager execution
        metrics_results = _eager_metrics_fn(model, outs, y)
        batch_logs['loss'] = tensor_util.constant_value(backend.mean(loss))

        for k, v in zip(model.metrics_names,
                        [backend.mean(loss)] + loss_metrics + metrics_results):
            batch_logs[k] = tensor_util.constant_value(v)
        callbacks.on_batch_end(step_index, batch_logs)
        if callback_model.stop_training:
            break

        if step_index == steps_per_epoch - 1:
            if do_validation:
                val_outs = test_loop(model,
                                     val_inputs,
                                     val_targets,
                                     sample_weights=val_sample_weights,
                                     steps=validation_steps,
                                     verbose=0)
                if not isinstance(val_outs, list):
                    val_outs = [val_outs]
                # Same labels assumed.
                for l, o in zip(out_labels, val_outs):
                    epoch_logs['val_' + l] = o
Ejemplo n.º 43
0
def poisson(y_true, y_pred):
  return K.mean(y_pred - y_true * math_ops.log(y_pred + K.epsilon()), axis=-1)
Ejemplo n.º 44
0
 def mapemae(y_true, y_pred):
     mae = math_ops.abs(y_true - y_pred)
     mape = mae / K.clip(math_ops.abs(y_true), K.epsilon(), None)
     return scale * K.mean(mape + mae / mae_div, axis=-1)
Ejemplo n.º 45
0
def mean_squared_error(y_true, y_pred):
  return K.mean(math_ops.square(y_pred - y_true), axis=-1)
Ejemplo n.º 46
0
 def call(self, inputs):
   if self.data_format == 'channels_last':
     return backend.mean(inputs, axis=[1, 2, 3])
   else:
     return backend.mean(inputs, axis=[2, 3, 4])
Ejemplo n.º 47
0
def _model_loss(model, inputs, targets, sample_weights=None, training=False):
    """Calculates the loss for a given model.

  Arguments:
      model: The model on which metrics are being calculated.
      inputs: Either a dictionary of inputs to the model or a list of input
        arrays.
      targets: List of target arrays.
      sample_weights: Optional list of sample weight arrays.
      training: Whether the model should be run in inference or training mode.

  Returns:
     Returns the model output, total loss, loss value calculated using the
     specified loss function and masks for each output. The total loss includes
     regularization losses and applies masking and sample weighting
     to the loss value.
  """
    total_loss = 0
    kwargs = {}
    if model._expects_training_arg:
        kwargs['training'] = training
    if len(inputs) == 1 and not isinstance(inputs, dict):
        inputs = inputs[0]

    if model._compute_output_and_mask_jointly:
        outs, masks = model._call_and_compute_mask(inputs, **kwargs)
        masks = generic_utils.to_list(masks)
    else:
        outs = model.call(inputs, **kwargs)
        masks = None

    outs = generic_utils.to_list(outs)
    if masks is None:
        masks = [None for _ in outs]
    targets = generic_utils.to_list(targets)

    loss_metrics = []
    with backend.name_scope('loss'):
        for i, loss_fn in enumerate(model.loss_functions):
            if sample_weights:
                weights = sample_weights[i]
            else:
                weights = None
            mask = masks[i]

            weighted_masked_fn = training_utils.weighted_masked_objective(
                loss_fn)
            with backend.name_scope(model.output_names[i] + '_loss'):
                output_loss = weighted_masked_fn(targets[i],
                                                 outs[i],
                                                 weights,
                                                 mask=mask)
            # If the number of outputs is 1 then we don't append the loss metric
            # associated with each model output. When there are multiple outputs
            # associated with a model, each output's loss is calculated and returned
            # as part of the loss_metrics.
            if len(model.outputs) > 1:
                loss_metrics.append(backend.mean(output_loss))

            loss_weight = model.loss_weights_list[i]
            if total_loss is None:
                total_loss = loss_weight * output_loss
            else:
                total_loss += loss_weight * output_loss

        total_loss = backend.mean(total_loss)
        # Add regularization losses
        custom_losses = []
        for layer in model.layers:
            if layer.losses:
                custom_losses += layer.losses

        if custom_losses:
            total_loss += sum(custom_losses)

    return outs, total_loss, loss_metrics, masks
Ejemplo n.º 48
0
def top_k_categorical_accuracy(y_true, y_pred, k=5):
  return K.mean(
      nn.in_top_k(y_pred, math_ops.argmax(y_true, axis=-1), k), axis=-1)
Ejemplo n.º 49
0
def mean_squared_logarithmic_error(y_true, y_pred):
  first_log = math_ops.log(K.clip(y_pred, K.epsilon(), None) + 1.)
  second_log = math_ops.log(K.clip(y_true, K.epsilon(), None) + 1.)
  return K.mean(math_ops.square(first_log - second_log), axis=-1)
Ejemplo n.º 50
0
 def my_mae(labels, preds):
   self.assertEqual(labels.dtype, dtypes.float64)
   self.assertEqual(preds.dtype, dtypes.float64)
   return K.mean(math_ops.abs(preds - labels), axis=-1)
Ejemplo n.º 51
0
def iterator_test_loop(model, inputs, steps, verbose=0):
  """Test function for eager execution when input is given as dataset iterator.

  Arguments:
      model: Model instance that is being evaluated in Eager mode.
      inputs: Input dataset iterator.
      steps: Total number of steps (batches of samples) before declaring
      predictions finished.
      verbose: Verbosity mode.

  Returns:
      Scalar loss (if the model has a single output and no metrics)
      or list of scalars (if the model has multiple outputs
      and/or metrics). The attribute `model.metrics_names` will give you
      the display labels for the scalar outputs.

  Raises:
      ValueError: In case of mismatch between given number of inputs and
        expectations of the model.
  """
  assert isinstance(inputs, iterator_ops.EagerIterator)
  # make sure either x,y or x,y,sample_weights is provided
  if (not isinstance(inputs.output_shapes, (list, tuple)) or
      len(inputs.output_shapes) < 2 or len(inputs.output_shapes) > 3):
    raise ValueError('Please provide either inputs and targets'
                     'or inputs, targets, and sample_weights')
  outs = []
  num_samples = 0
  if verbose == 1:
    progbar = generic_utils.Progbar(target=steps)
  for step_index in range(steps):
    # Get data from the iterator.
    try:
      next_element = inputs.get_next()
    except errors.OutOfRangeError:
      logging.warning(
          'Your dataset iterator ran out of data interrupting testing. '
          'Make sure that your dataset can generate at least `steps` batches '
          '(in this case, %d batches). You may need to use the repeat() '
          'function when building your dataset.', steps)
      break

    if len(inputs.output_shapes) == 2:
      x, y = next_element
      sample_weights = None
    else:
      x, y, sample_weights = next_element

    # Validate and standardize data.
    x, y, sample_weights = model._standardize_user_data(
        x, y, sample_weight=sample_weights)
    x = training_utils.cast_if_floating_dtype(x)
    y = training_utils.cast_if_floating_dtype(y)
    if sample_weights:
      sample_weights = [
          training_utils.cast_if_floating_dtype(
              ops.convert_to_tensor(val, dtype=backend.floatx()))
          if val is not None else None for val in sample_weights
      ]

    if step_index == 0:
      # Get stateful metrics indices. We do not do this before the `steps` loop
      # because model will be compiled only in the first iteration of this loop
      # in the deferred build scenario.
      if hasattr(model, 'metrics'):
        for m in model.stateful_metric_functions:
          m.reset_states()
        stateful_metric_indices = [
            i for i, name in enumerate(model.metrics_names)
            if str(name) in model.stateful_metric_names
        ]
      else:
        stateful_metric_indices = []

    # Calculate model output, loss values.
    loss_outs, loss, loss_metrics, masks = _model_loss(
        model, x, y, sample_weights=sample_weights, training=False)
    metrics_results = _eager_metrics_fn(
        model, loss_outs, y, sample_weights=sample_weights, masks=masks)
    batch_outs = []
    for _, v in zip(model.metrics_names,
                    [backend.mean(loss)] + loss_metrics + metrics_results):
      batch_outs.append(tensor_util.constant_value(v))

    # Get current step size.
    if isinstance(x, list):
      step_size = x[0].get_shape().as_list()[0]
    elif isinstance(x, dict):
      step_size = list(x.values())[0].get_shape().as_list()[0]
    else:
      step_size = x.get_shape().as_list()[0]

    # Accumulate results in output array.
    if not isinstance(batch_outs, list):
      batch_outs = [batch_outs]
    if step_index == 0:
      for _ in enumerate(batch_outs):
        outs.append(0.)
    for i, batch_out in enumerate(batch_outs):
      if i in stateful_metric_indices:
        outs[i] = batch_out
      else:
        outs[i] += batch_out * step_size

    # Calculate sample size.
    num_samples += step_size
    if verbose == 1:
      progbar.update(step_index + 1)

  for i in range(len(outs)):
    if i not in stateful_metric_indices:
      outs[i] /= num_samples
  if len(outs) == 1:
    return outs[0]
  return outs
Ejemplo n.º 52
0
def sparse_top_k_categorical_accuracy(y_true, y_pred, k=5):
    return K.mean(nn.in_top_k(
        y_pred, math_ops.cast(math_ops.reduce_max(y_true, axis=-1), 'int32'),
        k),
                  axis=-1)
Ejemplo n.º 53
0
def _model_loss(model, inputs, targets, sample_weights=None, training=False):
  """Calculates the loss for a given model.

  Arguments:
      model: The model on which metrics are being calculated.
      inputs: Either a dictionary of inputs to the model or a list of input
        arrays.
      targets: List of target arrays.
      sample_weights: Optional list of sample weight arrays.
      training: Whether the model should be run in inference or training mode.

  Returns:
     Returns the model output, total loss, loss value calculated using the
     specified loss function and masks for each output. The total loss includes
     regularization losses and applies masking and sample weighting
     to the loss value.
  """
  total_loss = 0
  kwargs = {}
  if model._expects_training_arg:
    kwargs['training'] = training
  if len(inputs) == 1 and not isinstance(inputs, dict):
    inputs = inputs[0]

  if model._compute_output_and_mask_jointly:
    outs, masks = model._call_and_compute_mask(inputs, **kwargs)
    masks = generic_utils.to_list(masks)
  else:
    outs = model.call(inputs, **kwargs)
    masks = None

  outs = generic_utils.to_list(outs)
  if masks is None:
    masks = [None for _ in outs]
  targets = generic_utils.to_list(targets)

  loss_metrics = []
  with backend.name_scope('loss'):
    for i, loss_fn in enumerate(model.loss_functions):
      if sample_weights:
        weights = sample_weights[i]
      else:
        weights = None
      mask = masks[i]

      weighted_masked_fn = training_utils.weighted_masked_objective(loss_fn)
      with backend.name_scope(model.output_names[i] + '_loss'):
        output_loss = weighted_masked_fn(
            targets[i], outs[i], weights, mask=mask)
      # If the number of outputs is 1 then we don't append the loss metric
      # associated with each model output. When there are multiple outputs
      # associated with a model, each output's loss is calculated and returned
      # as part of the loss_metrics.
      if len(model.outputs) > 1:
        loss_metrics.append(backend.mean(output_loss))

      loss_weight = model.loss_weights_list[i]
      if total_loss is None:
        total_loss = loss_weight * output_loss
      else:
        total_loss += loss_weight * output_loss

    total_loss = backend.mean(total_loss)
    # Add regularization losses
    custom_losses = []
    for layer in model.layers:
      if layer.losses:
        custom_losses += layer.losses

    if custom_losses:
      total_loss += sum(custom_losses)

  return outs, total_loss, loss_metrics, masks
Ejemplo n.º 54
0
def binary_accuracy(y_true, y_pred, threshold=0.5):
    threshold = math_ops.cast(threshold, y_pred.dtype)
    y_pred = math_ops.cast(y_pred > threshold, y_pred.dtype)
    return K.mean(math_ops.equal(y_true, y_pred), axis=-1)
Ejemplo n.º 55
0
def mean_squared_percentage_error(y_true, y_pred):
    diff = math_ops.square(
        (y_true - y_pred) / K.clip(y_true, K.epsilon(), None))
    return 100. * K.mean(diff, axis=-1)
Ejemplo n.º 56
0
def squared_hinge(y_true, y_pred):
  return K.mean(
      math_ops.square(math_ops.maximum(1. - y_true * y_pred, 0.)), axis=-1)
Ejemplo n.º 57
0
def binary_accuracy(y_true, y_pred, threshold=0.5):
  threshold = math_ops.cast(threshold, y_pred.dtype)
  y_pred = math_ops.cast(y_pred > threshold, y_pred.dtype)
  return K.mean(math_ops.equal(y_true, y_pred), axis=-1)
Ejemplo n.º 58
0
 def mspemse(y_true, y_pred):
     mse = math_ops.square(y_true - y_pred)
     mspe = mse / K.clip(math_ops.square(y_true), K.epsilon(), None)
     return scale * K.mean(mspe + mse / mse_div, axis=-1)
Ejemplo n.º 59
0
def sparse_top_k_categorical_accuracy(y_true, y_pred, k=5):
  # If the shape of y_true is (num_samples, 1), squeeze to (num_samples,)
  if (len(K.int_shape(y_true)) == len(K.int_shape(y_pred))):
    y_true = array_ops.squeeze(y_true, [-1])

  return K.mean(nn.in_top_k(y_pred, math_ops.cast(y_true, 'int32'), k), axis=-1)
Ejemplo n.º 60
0
 def my_mae(labels, preds):
   self.assertEqual(labels.dtype, dtypes.int32)
   self.assertEqual(preds.dtype, dtypes.float32)
   labels = math_ops.cast(labels, preds.dtype)
   return K.mean(math_ops.abs(preds - labels), axis=-1)