Ejemplo n.º 1
0
def test_loop(model, inputs, targets,
              sample_weights=None,
              batch_size=None,
              verbose=0,
              steps=None):
  """Test function for eager execution.

  Arguments:
      model: Model instance that is being evaluated in Eager mode.
      inputs: List of input arrays.
      targets: List of target arrays.
      sample_weights: Optional list of sample weight arrays.
      batch_size: integer batch size or `None`.
      verbose: verbosity mode.
      steps: Total number of steps (batches of samples)
          before declaring predictions finished.
          Ignored with the default value of `None`.

  Returns:
      Scalar loss (if the model has a single output and no metrics)
      or list of scalars (if the model has multiple outputs
      and/or metrics). The attribute `model.metrics_names` will give you
      the display labels for the scalar outputs.
  """
  inputs, steps = training_utils.convert_to_iterator(
      x=inputs,
      y=targets,
      sample_weights=sample_weights,
      batch_size=batch_size,
      steps_per_epoch=steps,
      is_validation=True)
  with backend.learning_phase_scope(0):
    return iterator_test_loop(model, inputs, steps, verbose=verbose)
Ejemplo n.º 2
0
def predict_loop(model, inputs,
                 batch_size=32,
                 verbose=0,
                 steps=None):
  """Predict function for eager execution.

  Arguments:
      model: Instance of `Model`.
      inputs: List of input arrays.
      batch_size: integer batch size.
      verbose: verbosity mode.
      steps: Total number of steps (batches of samples)
          before declaring `_predict_loop` finished.
          Ignored with the default value of `None`.

  Returns:
      Array of predictions (if the model has a single output)
      or list of arrays of predictions
      (if the model has multiple outputs).
  """
  with backend.learning_phase_scope(0):
    if steps is not None:
      return iterator_predict_loop(model, inputs, steps, verbose=verbose)
    else:
      return batch_predict_loop(
          model, inputs, batch_size=batch_size, verbose=verbose)
Ejemplo n.º 3
0
def save(model, filepath, overwrite, include_optimizer, signatures=None):
    """Saves a model as a SavedModel to the filepath.

  Args:
    model: Keras model instance to be saved.
    filepath: String path to save the model.
    overwrite: whether to overwrite the existing filepath.
    include_optimizer: If True, save the model's optimizer state.
    signatures: Signatures to save with the SavedModel. Applicable to the 'tf'
      format only. Please see the `signatures` argument in `tf.saved_model.save`
      for details.

  Raises:
    ValueError: if the model's inputs have not been defined.
  """
    # If file exists and should not be overwritten.
    if not overwrite and os.path.exists(filepath):
        proceed = ask_to_proceed_with_overwrite(filepath)
        if not proceed:
            return

    if _should_skip_serialization(model):
        saving_utils.raise_model_input_error(model)

    if not include_optimizer:
        orig_optimizer = model.optimizer
        model.optimizer = None

    # Trace all functions and signatures with `training=0` instead of using the
    # default learning phase placeholder.
    with K.learning_phase_scope(0):
        save_lib.save(model, filepath, signatures)

    if not include_optimizer:
        model.optimizer = orig_optimizer
Ejemplo n.º 4
0
def predict_loop(model, inputs, batch_size=32, verbose=0, steps=None):
    """Predict function for eager execution.

  Arguments:
      model: Instance of `Model`.
      inputs: List of input arrays.
      batch_size: integer batch size.
      verbose: verbosity mode.
      steps: Total number of steps (batches of samples)
          before declaring `_predict_loop` finished.
          Ignored with the default value of `None`.

  Returns:
      Array of predictions (if the model has a single output)
      or list of arrays of predictions
      (if the model has multiple outputs).
  """
    with backend.learning_phase_scope(0):
        if steps is not None:
            return iterator_predict_loop(model, inputs, steps, verbose=verbose)
        else:
            return batch_predict_loop(model,
                                      inputs,
                                      batch_size=batch_size,
                                      verbose=verbose)
def test_loop(model,
              inputs,
              targets,
              sample_weights=None,
              batch_size=None,
              verbose=0,
              steps=None):
    """Test function for eager execution.

  Arguments:
      model: Model instance that is being evaluated in Eager mode.
      inputs: List of input arrays.
      targets: List of target arrays.
      sample_weights: Optional list of sample weight arrays.
      batch_size: integer batch size or `None`.
      verbose: verbosity mode.
      steps: Total number of steps (batches of samples)
          before declaring predictions finished.
          Ignored with the default value of `None`.

  Returns:
      Scalar loss (if the model has a single output and no metrics)
      or list of scalars (if the model has multiple outputs
      and/or metrics). The attribute `model.metrics_names` will give you
      the display labels for the scalar outputs.
  """
    inputs, steps = training_utils.convert_to_iterator(
        x=inputs,
        y=targets,
        sample_weights=sample_weights,
        batch_size=batch_size,
        steps_per_epoch=steps,
        is_validation=True)
    with backend.learning_phase_scope(0):
        return iterator_test_loop(model, inputs, steps, verbose=verbose)
def _process_single_batch(model,
                          inputs,
                          targets,
                          output_loss_metrics=None,
                          sample_weights=None,
                          training=False):
    """Calculate the loss and gradient for one input batch.

     The model weights are updated if training is set to True.

  Arguments:
      model: Model whose loss has to be calculated.
      inputs: List of input arrays.
      targets: List of target arrays.
      output_loss_metrics: List of metrics that are used to aggregated output
        loss values.
      sample_weights: Optional list of sample weight arrays.
      training: The boolean represents if the weights of the model are updated.
              'fit' methods will set this to True while 'evaluate' methods will
              set this to False.

  Returns:
      output of the model, total loss, the loss and the mask
      associated with each output.

  Raises:
      ValueError: If the model has no loss to optimize.
  """
    with backend.learning_phase_scope(1 if training else 0):
        with GradientTape() as tape:
            outs, loss, loss_metrics, aggregated_loss_metrics, masks\
              = _model_loss(
                  model,
                  inputs,
                  targets,
                  output_loss_metrics=output_loss_metrics,
                  sample_weights=sample_weights,
                  training=training)
            if loss is None:
                raise ValueError('The model cannot be run '
                                 'because it has no loss to optimize.')
        if training:
            if not model._collected_trainable_weights:
                logging.warning(
                    'The list of trainable weights is empty. Make sure that'
                    ' you are not setting model.trainable to False before '
                    'compiling the model.')
            else:
                grads = tape.gradient(loss, model._collected_trainable_weights)
                model.optimizer.apply_gradients(
                    zip(grads, model._collected_trainable_weights))
        return outs, loss, loss_metrics, aggregated_loss_metrics, masks
Ejemplo n.º 7
0
def _process_single_batch(model,
                          inputs,
                          targets,
                          output_loss_metrics=None,
                          sample_weights=None,
                          training=False):
  """Calculate the loss and gradient for one input batch.

     The model weights are updated if training is set to True.

  Arguments:
      model: Model whose loss has to be calculated.
      inputs: List of input arrays.
      targets: List of target arrays.
      output_loss_metrics: List of metrics that are used to aggregated output
        loss values.
      sample_weights: Optional list of sample weight arrays.
      training: The boolean represents if the weights of the model are updated.
              'fit' methods will set this to True while 'evaluate' methods will
              set this to False.

  Returns:
      output of the model, total loss, the loss and the mask
      associated with each output.

  Raises:
      ValueError: If the model has no loss to optimize.
  """
  with backend.learning_phase_scope(1 if training else 0):
    with GradientTape() as tape:
      outs, loss, loss_metrics, aggregated_loss_metrics, masks\
        = _model_loss(
            model,
            inputs,
            targets,
            output_loss_metrics=output_loss_metrics,
            sample_weights=sample_weights,
            training=training)
      if loss is None:
        raise ValueError('The model cannot be run '
                         'because it has no loss to optimize.')
    if training:
      if not model._collected_trainable_weights:
        logging.warning('The list of trainable weights is empty. Make sure that'
                        ' you are not setting model.trainable to False before '
                        'compiling the model.')
      else:
        grads = tape.gradient(loss, model._collected_trainable_weights)
        model.optimizer.apply_gradients(zip(grads,
                                            model._collected_trainable_weights))
    return outs, loss, loss_metrics, aggregated_loss_metrics, masks
Ejemplo n.º 8
0
def save(model,
         filepath,
         overwrite,
         include_optimizer,
         signatures=None,
         options=None):
    """Saves a model as a SavedModel to the filepath.

  Args:
    model: Keras model instance to be saved.
    filepath: String path to save the model.
    overwrite: whether to overwrite the existing filepath.
    include_optimizer: If True, save the model's optimizer state.
    signatures: Signatures to save with the SavedModel. Applicable to the 'tf'
      format only. Please see the `signatures` argument in `tf.saved_model.save`
      for details.
    options: Optional`tf.saved_model.SaveOptions` object that specifies
      options for saving to SavedModel.

  Raises:
    ValueError: if the model's inputs have not been defined.
  """
    # If file exists and should not be overwritten.
    if not overwrite and os.path.exists(filepath):
        proceed = ask_to_proceed_with_overwrite(filepath)
        if not proceed:
            return

    if save_impl.should_skip_serialization(model):
        saving_utils.raise_model_input_error(model)

    if not include_optimizer:
        orig_optimizer = model.optimizer
        model.optimizer = None

    # Trace all functions and signatures with `training=0` instead of using the
    # default learning phase placeholder.
    with K.learning_phase_scope(0):
        # When saving a model involving batch norm layer within a strategy scope,
        # the replica context is not available when calling `add_update()`, and thus
        # we use the default replica context here.
        with distribution_strategy_context._get_default_replica_context():  # pylint: disable=protected-access
            save_lib.save(model, filepath, signatures, options)

    if not include_optimizer:
        model.optimizer = orig_optimizer
def fit_loop(model,
             inputs,
             targets,
             sample_weights=None,
             class_weight=None,
             val_inputs=None,
             val_targets=None,
             val_sample_weights=None,
             batch_size=None,
             epochs=1,
             verbose=1,
             callbacks=None,
             shuffle=True,
             initial_epoch=0,
             steps_per_epoch=None,
             validation_steps=None):
    """Fit function for eager execution.

  Arguments:
      model: Instance of the model that is being executed in Eager mode.
      inputs: List of input arrays.
      targets: List of target arrays.
      sample_weights: Optional list of sample weight arrays.
      class_weight: Optional class-weight array to weight the importance of
          samples in `inputs` based on the class they belong to, as conveyed by
          `targets`.
      val_inputs: Input data for validation.
      val_targets: Target data for validation.
      val_sample_weights: Sample weight data for validation.
      batch_size: Integer batch size or None if unknown.
      epochs: Number of times to iterate over the data
      verbose: Verbosity mode, 0, 1 or 2
      callbacks: List of callbacks to be called during training
      shuffle: Whether to shuffle the data at the beginning of each epoch
      initial_epoch: Epoch at which to start training
          (useful for resuming a previous training run)
      steps_per_epoch: Total number of steps (batches of samples)
          before declaring one epoch finished and starting the
          next epoch. Ignored with the default value of `None`.
      validation_steps: Number of steps to run validation for (only if doing
        validation from data tensors). Ignored with default value of `None`.

  Returns:
      `History` object.

  Raises:
    ValueError: In case of invalid argument values.
  """
    # Convert training inputs to an EagerIterator
    inputs, steps_per_epoch = training_utils.convert_to_iterator(
        x=inputs,
        y=targets,
        sample_weights=sample_weights,
        batch_size=batch_size,
        steps_per_epoch=steps_per_epoch,
        epochs=epochs,
        shuffle=shuffle)
    # Required for eager execution
    with backend.learning_phase_scope(1):
        do_validation = val_inputs is not None
        callbacks = cbks.configure_callbacks(
            callbacks,
            model,
            do_validation=do_validation,
            batch_size=batch_size,
            epochs=epochs,
            steps_per_epoch=steps_per_epoch,
            val_inputs=val_inputs,
            val_targets=val_targets,
            val_sample_weights=val_sample_weights,
            validation_steps=validation_steps,
            verbose=verbose)

        # Create metric wrapper for the losses.
        output_loss_metrics = []
        for i in range(len(model.outputs)):
            loss_fn = model.loss_functions[i]
            loss_name = loss_fn.name if isinstance(
                loss_fn, losses_module.Loss) else loss_fn.__name__
            mean_wrapped_loss = metrics_module.MeanMetricWrapper(
                loss_fn, name=loss_name)
            output_loss_metrics.append(mean_wrapped_loss)

        callbacks.on_train_begin()
        for epoch in range(initial_epoch, epochs):
            if model._is_compiled:  # Model may not be compiled the first time.
                # Reset stateful metrics
                for m in model.metrics:
                    m.reset_states()

            for m in output_loss_metrics:
                m.reset_states()

            callbacks.on_epoch_begin(epoch)
            epoch_logs = {}
            iterator_fit_loop(model,
                              inputs,
                              class_weight,
                              steps_per_epoch=steps_per_epoch,
                              epoch_logs=epoch_logs,
                              val_inputs=val_inputs,
                              val_targets=val_targets,
                              val_sample_weights=val_sample_weights,
                              epochs=epochs,
                              verbose=verbose,
                              callbacks=callbacks,
                              validation_steps=validation_steps,
                              do_validation=do_validation,
                              batch_size=batch_size,
                              output_loss_metrics=output_loss_metrics)
            callbacks.on_epoch_end(epoch, epoch_logs)
            if callbacks.model.stop_training:
                break
    callbacks.on_train_end()
    return model.history
Ejemplo n.º 10
0
def distributed_scope(strategy, learning_phase):
    with strategy.scope(), backend.learning_phase_scope(learning_phase):
        yield
Ejemplo n.º 11
0
def fit_loop(model,
             inputs,
             targets,
             sample_weights=None,
             class_weight=None,
             val_inputs=None,
             val_targets=None,
             val_sample_weights=None,
             batch_size=None,
             epochs=1,
             verbose=1,
             callbacks=None,
             shuffle=True,
             initial_epoch=0,
             steps_per_epoch=None,
             validation_steps=None):
  """Fit function for eager execution.

  Arguments:
      model: Instance of the model that is being executed in Eager mode.
      inputs: List of input arrays.
      targets: List of target arrays.
      sample_weights: Optional list of sample weight arrays.
      class_weight: Optional class-weight array to weight the importance of
          samples in `inputs` based on the class they belong to, as conveyed by
          `targets`.
      val_inputs: Input data for validation.
      val_targets: Target data for validation.
      val_sample_weights: Sample weight data for validation.
      batch_size: Integer batch size or None if unknown.
      epochs: Number of times to iterate over the data
      verbose: Verbosity mode, 0, 1 or 2
      callbacks: List of callbacks to be called during training
      shuffle: Whether to shuffle the data at the beginning of each epoch
      initial_epoch: Epoch at which to start training
          (useful for resuming a previous training run)
      steps_per_epoch: Total number of steps (batches of samples)
          before declaring one epoch finished and starting the
          next epoch. Ignored with the default value of `None`.
      validation_steps: Number of steps to run validation for (only if doing
        validation from data tensors). Ignored with default value of `None`.

  Returns:
      `History` object.

  Raises:
    ValueError: In case of invalid argument values.
  """
  # Convert training inputs to an EagerIterator
  inputs, steps_per_epoch = training_utils.convert_to_iterator(
      x=inputs,
      y=targets,
      sample_weights=sample_weights,
      batch_size=batch_size,
      steps_per_epoch=steps_per_epoch,
      epochs=epochs,
      shuffle=shuffle)
  # Required for eager execution
  with backend.learning_phase_scope(1):
    do_validation = val_inputs is not None
    callbacks = cbks.configure_callbacks(
        callbacks,
        model,
        do_validation=do_validation,
        batch_size=batch_size,
        epochs=epochs,
        steps_per_epoch=steps_per_epoch,
        val_inputs=val_inputs,
        val_targets=val_targets,
        val_sample_weights=val_sample_weights,
        validation_steps=validation_steps,
        verbose=verbose)

    callbacks.on_train_begin()
    for epoch in range(initial_epoch, epochs):
      if model._is_compiled:  # Model may not be compiled the first time.
        # Reset stateful metrics
        for m in model.stateful_metric_functions:
          m.reset_states()
      callbacks.on_epoch_begin(epoch)
      epoch_logs = {}
      iterator_fit_loop(
          model,
          inputs,
          class_weight,
          steps_per_epoch=steps_per_epoch,
          epoch_logs=epoch_logs,
          val_inputs=val_inputs,
          val_targets=val_targets,
          val_sample_weights=val_sample_weights,
          epochs=epochs,
          verbose=verbose,
          callbacks=callbacks,
          validation_steps=validation_steps,
          do_validation=do_validation,
          batch_size=batch_size)
      callbacks.on_epoch_end(epoch, epoch_logs)
      if callbacks.model.stop_training:
        break
  callbacks.on_train_end()
  return model.history
Ejemplo n.º 12
0
def _export_mode(
    mode, has_saved_vars, builder, model, custom_objects, checkpoint_path,
    input_signature):
  """Exports a model, and optionally saves new vars from the clone model.

  Args:
    mode: A `tf.estimator.ModeKeys` string.
    has_saved_vars: A `boolean` indicating whether the SavedModel has already
      exported variables.
    builder: A `SavedModelBuilder` object.
    model: A `tf.keras.Model` object.
    custom_objects: A dictionary mapping string names to custom classes
      or functions.
    checkpoint_path: String path to checkpoint.
    input_signature: Nested TensorSpec containing the expected inputs. Can be
      `None`, in which case the signature will be inferred from the model.

  Raises:
    ValueError: If the train/eval mode is being exported, but the model does
      not have an optimizer.
  """
  compile_clone = (mode != mode_keys.ModeKeys.PREDICT)
  if compile_clone and not model.optimizer:
    raise ValueError(
        'Model does not have an optimizer. Cannot export mode %s' % mode)

  model_graph = ops.get_default_graph()
  with ops.Graph().as_default() as g, K.learning_phase_scope(
      mode == mode_keys.ModeKeys.TRAIN):

    if input_signature is None:
      input_tensors = None
    else:
      input_tensors = nest.map_structure(create_placeholder, input_signature)

    # Clone the model into blank graph. This will create placeholders for inputs
    # and targets.
    clone = models_lib.clone_and_build_model(
        model, input_tensors=input_tensors, custom_objects=custom_objects,
        compile_clone=compile_clone)

    # Make sure that iterations variable is added to the global step collection,
    # to ensure that, when the SavedModel graph is loaded, the iterations
    # variable is returned by `tf.compat.v1.train.get_global_step()`. This is
    # required for compatibility with the SavedModelEstimator.
    if compile_clone:
      g.add_to_collection(ops.GraphKeys.GLOBAL_STEP, clone.optimizer.iterations)

    # Extract update and train ops from train/test/predict functions.
    train_op = None
    if mode == mode_keys.ModeKeys.TRAIN:
      clone._make_train_function()  # pylint: disable=protected-access
      train_op = clone.train_function.updates_op
    elif mode == mode_keys.ModeKeys.TEST:
      clone._make_test_function()  # pylint: disable=protected-access
    else:
      clone._make_predict_function()  # pylint: disable=protected-access
    g.get_collection_ref(ops.GraphKeys.UPDATE_OPS).extend(clone.state_updates)

    with session.Session().as_default():
      clone_var_list = _get_var_list(clone)
      if has_saved_vars:
        # Confirm all variables in the clone have an entry in the checkpoint.
        status = clone.load_weights(checkpoint_path)
        status.assert_existing_objects_matched()
      else:
        # Confirm that variables between the clone and model match up exactly,
        # not counting optimizer objects. Optimizer objects are ignored because
        # if the model has not trained, the slot variables will not have been
        # created yet.
        # TODO(b/113179535): Replace with trackable equivalence.
        _assert_same_non_optimizer_objects(model, model_graph, clone, g)

        # TODO(b/113178242): Use value transfer for trackable objects.
        clone.load_weights(checkpoint_path)

        # Add graph and variables to SavedModel.
        # TODO(b/113134168): Switch to add_meta_graph_and_variables.
        clone.save_weights(checkpoint_path, save_format='tf', overwrite=True)
        builder._has_saved_variables = True  # pylint: disable=protected-access

      # Add graph to the SavedModel builder.
      builder.add_meta_graph(
          model_utils.EXPORT_TAG_MAP[mode],
          signature_def_map=_create_signature_def_map(clone, mode),
          saver=saver_lib.Saver(
              clone_var_list,
              # Allow saving Models with no variables. This is somewhat odd, but
              # it's not necessarily a bug.
              allow_empty=True),
          init_op=variables.local_variables_initializer(),
          train_op=train_op)
    return None
Ejemplo n.º 13
0
def _export_mode(
    mode, has_saved_vars, builder, model, custom_objects, checkpoint_path,
    input_signature):
  """Exports a model, and optionally saves new vars from the clone model.

  Args:
    mode: A `tf.estimator.ModeKeys` string.
    has_saved_vars: A `boolean` indicating whether the SavedModel has already
      exported variables.
    builder: A `SavedModelBuilder` object.
    model: A `tf.keras.Model` object.
    custom_objects: A dictionary mapping string names to custom classes
      or functions.
    checkpoint_path: String path to checkpoint.
    input_signature: Nested TensorSpec containing the expected inputs. Can be
      `None`, in which case the signature will be inferred from the model.

  Raises:
    ValueError: If the train/eval mode is being exported, but the model does
      not have an optimizer.
  """
  from tensorflow.python.keras import models as models_lib  # pylint: disable=g-import-not-at-top
  compile_clone = (mode != mode_keys.ModeKeys.PREDICT)
  if compile_clone and not model.optimizer:
    raise ValueError(
        'Model does not have an optimizer. Cannot export mode %s' % mode)

  model_graph = ops.get_default_graph()
  with ops.Graph().as_default() as g, K.learning_phase_scope(
      mode == mode_keys.ModeKeys.TRAIN):

    if input_signature is None:
      input_tensors = None
    else:
      input_tensors = nest.map_structure(create_placeholder, input_signature)

    # Clone the model into blank graph. This will create placeholders for inputs
    # and targets.
    clone = models_lib.clone_and_build_model(
        model, input_tensors=input_tensors, custom_objects=custom_objects,
        compile_clone=compile_clone)

    # Make sure that iterations variable is added to the global step collection,
    # to ensure that, when the SavedModel graph is loaded, the iterations
    # variable is returned by `tf.train.get_global_step()`. This is required for
    # compatibility with the SavedModelEstimator.
    if compile_clone:
      g.add_to_collection(ops.GraphKeys.GLOBAL_STEP, clone.optimizer.iterations)

    # Extract update and train ops from train/test/predict functions.
    train_op = None
    if mode == mode_keys.ModeKeys.TRAIN:
      clone._make_train_function()
      train_op = clone.train_function.updates_op
    elif mode == mode_keys.ModeKeys.TEST:
      clone._make_test_function()
    else:
      clone._make_predict_function()
    g.get_collection_ref(ops.GraphKeys.UPDATE_OPS).extend(clone.state_updates)

    with session.Session().as_default():
      clone_var_list = _get_var_list(clone)
      if has_saved_vars:
        # Confirm all variables in the clone have an entry in the checkpoint.
        status = clone.load_weights(checkpoint_path)
        status.assert_existing_objects_matched()
      else:
        # Confirm that variables between the clone and model match up exactly,
        # not counting optimizer objects. Optimizer objects are ignored because
        # if the model has not trained, the slot variables will not have been
        # created yet.
        # TODO(b/113179535): Replace with trackable equivalence.
        _assert_same_non_optimizer_objects(model, model_graph, clone, g)

        # TODO(b/113178242): Use value transfer for trackable objects.
        clone.load_weights(checkpoint_path)

        # Add graph and variables to SavedModel.
        # TODO(b/113134168): Switch to add_meta_graph_and_variables.
        clone.save_weights(checkpoint_path, save_format='tf', overwrite=True)
        builder._has_saved_variables = True

      # Add graph to the SavedModel builder.
      builder.add_meta_graph(
          model_utils.EXPORT_TAG_MAP[mode],
          signature_def_map=_create_signature_def_map(clone, mode),
          saver=saver_lib.Saver(clone_var_list),
          init_op=variables.local_variables_initializer(),
          train_op=train_op)
    return None
def distributed_scope(strategy, learning_phase):
  with strategy.scope(), K.learning_phase_scope(learning_phase):
    yield
Ejemplo n.º 15
0
def fit_loop(model,
             inputs,
             targets,
             sample_weights=None,
             class_weight=None,
             val_inputs=None,
             val_targets=None,
             val_sample_weights=None,
             batch_size=None,
             epochs=1,
             verbose=1,
             callbacks=None,
             shuffle=True,
             callback_metrics=None,
             initial_epoch=0,
             steps_per_epoch=None,
             validation_steps=None):
  """Fit function for eager execution.

  Arguments:
      model: Instance of the model that is being executed in Eager mode.
      inputs: List of input arrays.
      targets: List of target arrays.
      sample_weights: Optional list of sample weight arrays.
      class_weight: Optional class-weight array to weight the importance of
          samples in `inputs` based on the class they belong to, as conveyed by
          `targets`.
      val_inputs: Input data for validation.
      val_targets: Target data for validation.
      val_sample_weights: Sample weight data for validation.
      batch_size: Integer batch size or None if unknown.
      epochs: Number of times to iterate over the data
      verbose: Verbosity mode, 0, 1 or 2
      callbacks: List of callbacks to be called during training
      shuffle: Whether to shuffle the data at the beginning of each epoch
      callback_metrics: List of strings, the display names of the metrics
          passed to the callbacks. They should be the
          concatenation of list the display names of the outputs of
           `f` and the list of display names of the outputs of `f_val`.
      initial_epoch: Epoch at which to start training
          (useful for resuming a previous training run)
      steps_per_epoch: Total number of steps (batches of samples)
          before declaring one epoch finished and starting the
          next epoch. Ignored with the default value of `None`.
      validation_steps: Number of steps to run validation for (only if doing
        validation from data tensors). Ignored with default value of `None`.

  Returns:
      `History` object.

  Raises:
    ValueError: In case of invalid argument values.
  """
  # Required for eager execution
  with backend.learning_phase_scope(1):
    do_validation = False
    if val_inputs:
      do_validation = True
      if (steps_per_epoch is None and verbose and inputs and
          hasattr(inputs[0], 'shape') and hasattr(val_inputs[0], 'shape')):
        print('Train on %d samples, validate on %d samples' %
              (inputs[0].shape[0], val_inputs[0].shape[0]))

    num_train_samples = None
    out_labels = None
    if steps_per_epoch is None or model._is_compiled:
      out_labels = model.metrics_names
      if do_validation:
        callback_metrics = copy.copy(out_labels) + [
            'val_' + n for n in out_labels
        ]
      else:
        callback_metrics = copy.copy(out_labels)

    if steps_per_epoch is None:
      if sample_weights:
        feed_data = inputs + targets + sample_weights
      else:
        feed_data = inputs + targets
      num_train_samples = training_utils.check_num_samples(
          feed_data,
          batch_size=batch_size,
          steps=steps_per_epoch,
          steps_name='steps_per_epoch')

      if num_train_samples is not None:
        index_array = np.arange(num_train_samples)

    model.history = cbks.History()
    callbacks = [cbks.BaseLogger()] + (callbacks or []) + [model.history]
    if verbose:
      if steps_per_epoch is not None:
        count_mode = 'steps'
      else:
        count_mode = 'samples'
      callbacks += [cbks.ProgbarLogger(count_mode)]
    callbacks = cbks.CallbackList(callbacks)

    # it's possible to callback a different model than self
    # (used by Sequential models)
    if hasattr(model, 'callback_model') and model.callback_model:
      callback_model = model.callback_model
    else:
      callback_model = model

    callbacks.set_model(callback_model)

    callbacks.set_params({
        'batch_size': batch_size,
        'epochs': epochs,
        'steps': steps_per_epoch,
        'samples': num_train_samples,
        'verbose': verbose,
        'do_validation': do_validation,
        'metrics': callback_metrics or [],
    })
    callbacks.on_train_begin()
    callback_model.stop_training = False
    for cbk in callbacks:
      if not val_inputs:
        cbk.validation_data = []
      elif isinstance(val_inputs, iterator_ops.EagerIterator):
        cbk.validation_data = val_inputs
      elif val_sample_weights:
        cbk.validation_data = val_inputs + val_targets + val_sample_weights
      else:
        cbk.validation_data = val_inputs + val_targets

    for epoch in range(initial_epoch, epochs):
      callbacks.on_epoch_begin(epoch)
      epoch_logs = {}

      if steps_per_epoch is not None:
        iterator_fit_loop(
            model,
            inputs,
            class_weight,
            steps_per_epoch=steps_per_epoch,
            callback_model=callback_model,
            out_labels=out_labels,
            epoch_logs=epoch_logs,
            val_inputs=val_inputs,
            val_targets=val_targets,
            val_sample_weights=val_sample_weights,
            epochs=epochs,
            verbose=verbose,
            callbacks=callbacks,
            callback_metrics=callback_metrics,
            validation_steps=validation_steps,
            do_validation=do_validation)
      else:
        batch_fit_loop(
            model,
            inputs,
            targets,
            epoch_logs=epoch_logs,
            index_array=index_array,
            out_labels=out_labels,
            callback_model=callback_model,
            batch_size=batch_size,
            sample_weights=sample_weights,
            val_inputs=val_inputs,
            val_targets=val_targets,
            val_sample_weights=val_sample_weights,
            callbacks=callbacks,
            shuffle=shuffle,
            num_train_samples=num_train_samples,
            do_validation=do_validation)
      callbacks.on_epoch_end(epoch, epoch_logs)
      if callback_model.stop_training:
        break
  callbacks.on_train_end()
  return model.history
Ejemplo n.º 16
0
 def trace_with_training(value, fn=fn):
     utils.set_training_arg(value, self._training_arg_index,
                            args, kwargs)
     with K.learning_phase_scope(value):
         fn.get_concrete_function(*args, **kwargs)
Ejemplo n.º 17
0
def fit_loop(model,
             inputs,
             targets,
             sample_weights=None,
             class_weight=None,
             val_inputs=None,
             val_targets=None,
             val_sample_weights=None,
             batch_size=None,
             epochs=1,
             verbose=1,
             callbacks=None,
             shuffle=True,
             callback_metrics=None,
             initial_epoch=0,
             steps_per_epoch=None,
             validation_steps=None):
    """Fit function for eager execution.

  Arguments:
      model: Instance of the model that is being executed in Eager mode.
      inputs: List of input arrays.
      targets: List of target arrays.
      sample_weights: Optional list of sample weight arrays.
      class_weight: Optional class-weight array to weight the importance of
          samples in `inputs` based on the class they belong to, as conveyed by
          `targets`.
      val_inputs: Input data for validation.
      val_targets: Target data for validation.
      val_sample_weights: Sample weight data for validation.
      batch_size: Integer batch size or None if unknown.
      epochs: Number of times to iterate over the data
      verbose: Verbosity mode, 0, 1 or 2
      callbacks: List of callbacks to be called during training
      shuffle: Whether to shuffle the data at the beginning of each epoch
      callback_metrics: List of strings, the display names of the metrics
          passed to the callbacks. They should be the
          concatenation of list the display names of the outputs of
           `f` and the list of display names of the outputs of `f_val`.
      initial_epoch: Epoch at which to start training
          (useful for resuming a previous training run)
      steps_per_epoch: Total number of steps (batches of samples)
          before declaring one epoch finished and starting the
          next epoch. Ignored with the default value of `None`.
      validation_steps: Number of steps to run validation for (only if doing
        validation from data tensors). Ignored with default value of `None`.

  Returns:
      `History` object.

  Raises:
    ValueError: In case of invalid argument values.
  """
    # Convert training inputs to an EagerIterator
    inputs, steps_per_epoch = training_utils.convert_to_iterator(
        x=inputs,
        y=targets,
        sample_weights=sample_weights,
        batch_size=batch_size,
        steps_per_epoch=steps_per_epoch,
        epochs=epochs,
        shuffle=shuffle)
    # Required for eager execution
    with backend.learning_phase_scope(1):
        do_validation = False
        if val_inputs:
            do_validation = True

        num_train_samples = None
        out_labels = None
        if model._is_compiled:
            out_labels = model.metrics_names
            if do_validation:
                callback_metrics = copy.copy(out_labels) + [
                    'val_' + n for n in out_labels
                ]
            else:
                callback_metrics = copy.copy(out_labels)

        model.history = cbks.History()
        callbacks = [cbks.BaseLogger()] + (callbacks or []) + [model.history]
        if verbose:
            callbacks += [cbks.ProgbarLogger('steps')]
        callbacks = cbks.CallbackList(callbacks)

        # it's possible to callback a different model than self
        # (used by Sequential models)
        if hasattr(model, 'callback_model') and model.callback_model:
            callback_model = model.callback_model
        else:
            callback_model = model

        callbacks.set_model(callback_model)

        callback_params = {
            'batch_size': batch_size,
            'epochs': epochs,
            'steps': steps_per_epoch,
            'samples': num_train_samples,
            'verbose': verbose,
            'do_validation': do_validation,
            'metrics': callback_metrics or [],
        }
        if validation_steps:
            callback_params.update({'validation_steps': validation_steps})
        callbacks.set_params(callback_params)

        for cbk in callbacks:
            if not val_inputs:
                cbk.validation_data = []
            elif isinstance(val_inputs, iterator_ops.EagerIterator):
                cbk.validation_data = val_inputs
            elif val_sample_weights:
                cbk.validation_data = val_inputs + val_targets + val_sample_weights
            else:
                cbk.validation_data = val_inputs + val_targets
        # validation_data must be set before on_train_begin() is called
        # so that TensorboardCallback can validate its input
        callbacks.on_train_begin()
        callback_model.stop_training = False

        for epoch in range(initial_epoch, epochs):
            callbacks.on_epoch_begin(epoch)
            epoch_logs = {}
            iterator_fit_loop(model,
                              inputs,
                              class_weight,
                              steps_per_epoch=steps_per_epoch,
                              callback_model=callback_model,
                              out_labels=out_labels,
                              epoch_logs=epoch_logs,
                              val_inputs=val_inputs,
                              val_targets=val_targets,
                              val_sample_weights=val_sample_weights,
                              epochs=epochs,
                              verbose=verbose,
                              callbacks=callbacks,
                              callback_metrics=callback_metrics,
                              validation_steps=validation_steps,
                              do_validation=do_validation,
                              batch_size=batch_size)
            callbacks.on_epoch_end(epoch, epoch_logs)
            if callback_model.stop_training:
                break
    callbacks.on_train_end()
    return model.history