Ejemplo n.º 1
0
def _process_single_batch(model,
                          inputs,
                          targets,
                          output_loss_metrics=None,
                          sample_weights=None,
                          training=False):
    """Calculate the loss and gradient for one input batch.

     The model weights are updated if training is set to True.

  Arguments:
      model: Model whose loss has to be calculated.
      inputs: List of input arrays.
      targets: List of target arrays.
      output_loss_metrics: List of metrics that are used to aggregated output
        loss values.
      sample_weights: Optional list of sample weight arrays.
      training: The boolean represents if the weights of the model are updated.
              'fit' methods will set this to True while 'evaluate' methods will
              set this to False.

  Returns:
      output of the model, total loss, the loss and the mask
      associated with each output.

  Raises:
      ValueError: If the model has no loss to optimize.
  """
    with backend.eager_learning_phase_scope(1 if training else 0):
        with GradientTape() as tape:
            outs, total_loss, output_losses, masks = (_model_loss(
                model,
                inputs,
                targets,
                output_loss_metrics=output_loss_metrics,
                sample_weights=sample_weights,
                training=training))
            if total_loss is None:
                raise ValueError('The model cannot be run '
                                 'because it has no loss to optimize.')
            if isinstance(model.optimizer,
                          loss_scale_optimizer.LossScaleOptimizer):
                # TODO(reedwm): Make loss_scale public instead of accessing private
                # _loss_scale attribute.
                loss_scale = model.optimizer._loss_scale()
                scaled_total_loss = loss_scale_optimizer.scale_loss(
                    total_loss, loss_scale)
            else:
                loss_scale = None
                scaled_total_loss = total_loss
        if training:
            if not model.trainable_weights:
                logging.warning(
                    'The list of trainable weights is empty. Make sure that'
                    ' you are not setting model.trainable to False before '
                    'compiling the model.')
            else:
                grads = tape.gradient(scaled_total_loss,
                                      model.trainable_weights)
                if loss_scale is not None:
                    grads = loss_scale_optimizer.unscale_grads(
                        grads, loss_scale)
                model.optimizer.apply_gradients(
                    zip(grads, model.trainable_weights))
        return outs, total_loss, output_losses, masks
Ejemplo n.º 2
0
def _process_single_batch(model,
                          inputs,
                          targets,
                          output_loss_metrics=None,
                          sample_weights=None,
                          training=False):
  """Calculate the loss and gradient for one input batch.

     The model weights are updated if training is set to True.

  Arguments:
      model: Model whose loss has to be calculated.
      inputs: List of input arrays.
      targets: List of target arrays.
      output_loss_metrics: List of metrics that are used to aggregated output
        loss values.
      sample_weights: Optional list of sample weight arrays.
      training: The boolean represents if the weights of the model are updated.
              'fit' methods will set this to True while 'evaluate' methods will
              set this to False.

  Returns:
      output of the model, total loss, the loss and the mask
      associated with each output.

  Raises:
      ValueError: If the model has no loss to optimize.
  """
  with backend.eager_learning_phase_scope(1 if training else 0):
    with GradientTape() as tape:
      outs, total_loss, output_losses, masks = (
          _model_loss(
              model,
              inputs,
              targets,
              output_loss_metrics=output_loss_metrics,
              sample_weights=sample_weights,
              training=training))
      if total_loss is None:
        raise ValueError('The model cannot be run '
                         'because it has no loss to optimize.')
      if isinstance(model.optimizer, loss_scale_optimizer.LossScaleOptimizer):
        # TODO(reedwm): Make loss_scale public instead of accessing private
        # _loss_scale attribute.
        loss_scale = model.optimizer._loss_scale()
        scaled_total_loss = loss_scale_optimizer.scale_loss(total_loss,
                                                            loss_scale)
      else:
        loss_scale = None
        scaled_total_loss = total_loss
    if training:
      if not model.trainable_weights:
        logging.warning('The list of trainable weights is empty. Make sure that'
                        ' you are not setting model.trainable to False before '
                        'compiling the model.')
      else:
        grads = tape.gradient(scaled_total_loss, model.trainable_weights)
        if loss_scale is not None:
          grads = loss_scale_optimizer.unscale_grads(grads, loss_scale)
        model.optimizer.apply_gradients(zip(grads,
                                            model.trainable_weights))
    return outs, total_loss, output_losses, masks