コード例 #1
0
 def predict(self,
             model,
             x,
             batch_size=None,
             verbose=0,
             steps=None,
             callbacks=None,
             **kwargs):
     """Predict loop for Distribution Strategies."""
     dist_utils.validate_inputs(x=x, y=None)
     batch_size, steps = self._process_batch_and_step_size(
         model, x, batch_size, steps, ModeKeys.PREDICT)
     batch_size = model._validate_or_infer_batch_size(batch_size, steps, x)
     dataset = model._distribution_standardize_user_data(
         x, batch_size=batch_size, allow_partial_batch=True)
     if dist_utils.is_tpu_strategy(model._distribution_strategy):
         steps = training_utils.infer_steps_for_dataset(dataset,
                                                        steps,
                                                        steps_name='steps')
         if steps is None:
             raise ValueError(
                 'Number of steps could not be infered from the data, '
                 'please pass the steps argument.')
         if not context.executing_eagerly():
             return experimental_tpu_predict_loop(model,
                                                  dataset,
                                                  verbose=verbose,
                                                  steps=steps,
                                                  callbacks=callbacks)
     return training_arrays.predict_loop(model,
                                         dataset,
                                         batch_size=batch_size,
                                         verbose=verbose,
                                         steps=steps,
                                         callbacks=callbacks)
コード例 #2
0
def predict_distributed(model,
                        x=None,
                        batch_size=None,
                        verbose=0,
                        steps=None,
                        callbacks=None):
  """Predict loop for Distribution Strategies."""
  distributed_training_utils.validate_inputs(x, None)
  first_x_value = nest.flatten(x)[0]
  if isinstance(first_x_value, np.ndarray):
    steps, batch_size = distributed_training_utils.get_input_params(
        model._distribution_strategy, first_x_value, steps,
        batch_size, mode=ModeKeys.PREDICT)
  batch_size = model._validate_or_infer_batch_size(batch_size, steps, x)
  dataset = model._distribution_standardize_user_data(
      x,
      batch_size=batch_size,
      allow_partial_batch=True)
  if distributed_training_utils.is_tpu_strategy(model._distribution_strategy):
    return experimental_tpu_predict_loop(
        model, dataset, verbose=verbose, steps=steps, callbacks=callbacks)
  else:
    return training_arrays.predict_loop(
        model,
        dataset,
        batch_size=batch_size,
        verbose=verbose,
        steps=steps,
        callbacks=callbacks)
コード例 #3
0
def predict_distributed(model,
                        x=None,
                        batch_size=None,
                        verbose=0,
                        steps=None,
                        callbacks=None):
  """Predict loop for Distribution Strategies."""
  distributed_training_utils.validate_inputs(
      x, None, model._distribution_strategy, allow_partial_batch=True)
  first_x_value = nest.flatten(x)[0]
  if isinstance(first_x_value, np.ndarray):
    steps, batch_size = distributed_training_utils.get_input_params(
        model._distribution_strategy, first_x_value, steps,
        batch_size, mode=ModeKeys.PREDICT)
  batch_size = model._validate_or_infer_batch_size(batch_size, steps, x)
  dataset = model._distribution_standardize_user_data(
      x,
      batch_size=batch_size,
      allow_partial_batch=True)
  if distributed_training_utils.is_tpu_strategy(model._distribution_strategy):
    return experimental_tpu_predict_loop(
        model, dataset, verbose=verbose, steps=steps, callbacks=callbacks)
  else:
    return training_arrays.predict_loop(
        model,
        dataset,
        batch_size=batch_size,
        verbose=verbose,
        steps=steps,
        callbacks=callbacks)
コード例 #4
0
def evaluate_distributed(model,
                         x=None,
                         y=None,
                         batch_size=None,
                         verbose=1,
                         sample_weight=None,
                         steps=None,
                         callbacks=None):
    """Evaluate loop for Distribution Strategies."""
    distributed_training_utils.validate_inputs(x, y,
                                               model._distribution_strategy)
    first_x_value = nest.flatten(x)[0]
    if isinstance(first_x_value, np.ndarray):
        steps, batch_size = distributed_training_utils.get_input_params(
            model._distribution_strategy, first_x_value, steps, batch_size)
    batch_size = model._validate_or_infer_batch_size(batch_size, steps, x)
    dataset = model._distribution_standardize_user_data(
        x, y, sample_weight=sample_weight, batch_size=batch_size)

    if distributed_training_utils.is_tpu_strategy(
            model._distribution_strategy):
        return experimental_tpu_test_loop(model,
                                          dataset,
                                          verbose=verbose,
                                          steps=steps,
                                          callbacks=callbacks)
    else:
        return training_arrays.test_loop(model,
                                         inputs=dataset,
                                         batch_size=batch_size,
                                         verbose=verbose,
                                         steps=steps,
                                         callbacks=callbacks)
コード例 #5
0
def evaluate_distributed(model,
                         x=None,
                         y=None,
                         batch_size=None,
                         verbose=1,
                         sample_weight=None,
                         steps=None,
                         callbacks=None):
  """Evaluate loop for Distribution Strategies."""
  distributed_training_utils.validate_inputs(x, y, model._distribution_strategy)
  first_x_value = nest.flatten(x)[0]
  if isinstance(first_x_value, np.ndarray):
    steps, batch_size = distributed_training_utils.get_input_params(
        model._distribution_strategy, first_x_value, steps, batch_size)
  batch_size = model._validate_or_infer_batch_size(batch_size, steps, x)
  dataset = model._distribution_standardize_user_data(
      x, y,
      sample_weight=sample_weight,
      batch_size=batch_size)

  if distributed_training_utils.is_tpu_strategy(model._distribution_strategy):
    return experimental_tpu_test_loop(
        model, dataset, verbose=verbose, steps=steps, callbacks=callbacks)
  else:
    return training_arrays.test_loop(
        model,
        inputs=dataset,
        batch_size=batch_size,
        verbose=verbose,
        steps=steps,
        callbacks=callbacks)
コード例 #6
0
 def predict(self,
             model,
             x,
             batch_size=None,
             verbose=0,
             steps=None,
             callbacks=None,
             **kwargs):
     """Predict loop for Distribution Strategies."""
     dist_utils.validate_inputs(x=x, y=None)
     batch_size, steps = self._process_batch_and_step_size(
         model, x, batch_size, steps, ModeKeys.PREDICT)
     batch_size = model._validate_or_infer_batch_size(batch_size, steps, x)
     dataset = model._distribution_standardize_user_data(
         x, batch_size=batch_size, allow_partial_batch=True)
     if dist_utils.is_tpu_strategy(model._distribution_strategy):
         return experimental_tpu_predict_loop(model,
                                              dataset,
                                              verbose=verbose,
                                              steps=steps,
                                              callbacks=callbacks)
     else:
         return training_arrays.predict_loop(model,
                                             dataset,
                                             batch_size=batch_size,
                                             verbose=verbose,
                                             steps=steps,
                                             callbacks=callbacks)
コード例 #7
0
    def evaluate(self,
                 model,
                 x=None,
                 y=None,
                 batch_size=None,
                 verbose=1,
                 sample_weight=None,
                 steps=None,
                 callbacks=None,
                 **kwargs):
        """Evaluate loop for Distribution Strategies."""
        dist_utils.validate_inputs(x, y)
        batch_size, steps = self._process_batch_and_step_size(
            model, x, batch_size, steps, ModeKeys.TEST)
        batch_size = model._validate_or_infer_batch_size(batch_size, steps, x)
        dataset = model._distribution_standardize_user_data(
            x,
            y,
            sample_weight=sample_weight,
            batch_size=batch_size,
            allow_partial_batch=True)

        if dist_utils.is_tpu_strategy(model._distribution_strategy):
            return experimental_tpu_test_loop(model,
                                              dataset,
                                              verbose=verbose,
                                              steps=steps,
                                              callbacks=callbacks)
        else:
            return training_arrays.test_loop(model,
                                             inputs=dataset,
                                             batch_size=batch_size,
                                             verbose=verbose,
                                             steps=steps,
                                             callbacks=callbacks)
コード例 #8
0
    def evaluate(self,
                 model,
                 x=None,
                 y=None,
                 batch_size=None,
                 verbose=1,
                 sample_weight=None,
                 steps=None,
                 callbacks=None,
                 **kwargs):
        """Evaluate loop for Distribution Strategies."""
        dist_utils.validate_inputs(x, y)
        batch_size, steps = dist_utils.process_batch_and_step_size(
            model._distribution_strategy, x, batch_size, steps, ModeKeys.TEST)
        batch_size = model._validate_or_infer_batch_size(batch_size, steps, x)
        dataset = model._distribution_standardize_user_data(
            x,
            y,
            sample_weight=sample_weight,
            batch_size=batch_size,
            allow_partial_batch=True)

        if dist_utils.is_tpu_strategy(model._distribution_strategy):
            steps = training_utils.infer_steps_for_dataset(model,
                                                           dataset,
                                                           steps,
                                                           steps_name='steps')
            if steps is None:
                raise ValueError(
                    'Number of steps could not be inferred from the data, '
                    'please pass the steps argument.')

            if not context.executing_eagerly():
                # Run TPU evaluation in a custom loop in graph mode.
                return experimental_tpu_test_loop(model,
                                                  dataset,
                                                  verbose=verbose,
                                                  steps=steps,
                                                  callbacks=callbacks)

        return training_arrays_v1.test_loop(model,
                                            inputs=dataset,
                                            batch_size=batch_size,
                                            verbose=verbose,
                                            steps=steps,
                                            callbacks=callbacks)
コード例 #9
0
def fit_distributed(model,
                    x=None,
                    y=None,
                    batch_size=None,
                    epochs=1,
                    verbose=1,
                    callbacks=None,
                    validation_split=0.,
                    validation_data=None,
                    shuffle=True,
                    class_weight=None,
                    sample_weight=None,
                    initial_epoch=0,
                    steps_per_epoch=None,
                    validation_steps=None,
                    validation_freq=1):
  """Fit loop for Distribution Strategies."""
  distributed_training_utils.validate_callbacks(callbacks, model.optimizer)
  distributed_training_utils.validate_inputs(
      x, y)

  first_x_value = nest.flatten(x)[0]
  if isinstance(first_x_value, np.ndarray):
    # Until support for partial batch is implemented across all
    # functions and distribution strategy, we pass `mode` to selectively
    # relax the costraint to consume all the training samples.
    steps_per_epoch, batch_size = (
        distributed_training_utils.get_input_params(
            model._distribution_strategy, first_x_value, steps_per_epoch,
            batch_size, mode=ModeKeys.TRAIN))
  batch_size = model._validate_or_infer_batch_size(
      batch_size, steps_per_epoch, x)
  dataset = model._distribution_standardize_user_data(
      x, y,
      sample_weight=sample_weight,
      class_weight=class_weight,
      batch_size=batch_size,
      validation_split=validation_split,
      shuffle=shuffle,
      epochs=epochs)
  if not distributed_training_utils.is_distributing_by_cloning(model):
    with model._distribution_strategy.scope():
      (dataset, _, _) = model._standardize_user_data(
          dataset,
          sample_weight=sample_weight,
          class_weight=class_weight,
          batch_size=batch_size,
          validation_split=validation_split,
          shuffle=shuffle)

  val_dataset = None
  if validation_data:
    val_x, val_y, val_sample_weights = model._unpack_validation_data(
        validation_data)
    distributed_training_utils.validate_inputs(val_x, val_y)
    first_valx_value = nest.flatten(val_x)[0]
    if isinstance(first_valx_value, np.ndarray):
      validation_steps, _ = distributed_training_utils.get_input_params(
          model._distribution_strategy, first_valx_value, validation_steps,
          batch_size, mode=ModeKeys.TEST)
    val_dataset = model._distribution_standardize_user_data(
        val_x, val_y,
        sample_weight=val_sample_weights,
        class_weight=None,
        batch_size=batch_size,
        validation_split=validation_split,
        shuffle=shuffle,
        allow_partial_batch=True)
  elif validation_split:
    raise ValueError('validation_split argument is not supported with '
                     'distribution strategies.')

  if distributed_training_utils.is_tpu_strategy(model._distribution_strategy):
    return experimental_tpu_fit_loop(
        model,
        dataset,
        epochs=epochs,
        verbose=verbose,
        callbacks=callbacks,
        val_dataset=val_dataset,
        initial_epoch=initial_epoch,
        steps_per_epoch=steps_per_epoch,
        validation_steps=validation_steps,
        validation_freq=validation_freq)
  else:
    return training_arrays.fit_loop(
        model,
        dataset,
        batch_size=batch_size,
        epochs=epochs,
        verbose=verbose,
        callbacks=callbacks,
        val_inputs=val_dataset,
        shuffle=shuffle,
        initial_epoch=initial_epoch,
        steps_per_epoch=steps_per_epoch,
        validation_steps=validation_steps,
        validation_freq=validation_freq,
        steps_name='steps_per_epoch')
コード例 #10
0
    def fit(self,
            model,
            x=None,
            y=None,
            batch_size=None,
            epochs=1,
            verbose=1,
            callbacks=None,
            validation_split=0.,
            validation_data=None,
            shuffle=True,
            class_weight=None,
            sample_weight=None,
            initial_epoch=0,
            steps_per_epoch=None,
            validation_steps=None,
            validation_freq=1,
            **kwargs):
        """Fit loop for Distribution Strategies."""
        dist_utils.validate_callbacks(input_callbacks=callbacks,
                                      optimizer=model.optimizer)
        dist_utils.validate_inputs(x, y)

        batch_size, steps_per_epoch = dist_utils.process_batch_and_step_size(
            model._distribution_strategy, x, batch_size, steps_per_epoch,
            ModeKeys.TRAIN)
        batch_size = model._validate_or_infer_batch_size(
            batch_size, steps_per_epoch, x)
        dataset = model._distribution_standardize_user_data(
            x,
            y,
            sample_weight=sample_weight,
            class_weight=class_weight,
            batch_size=batch_size,
            validation_split=validation_split,
            shuffle=shuffle,
            epochs=epochs)
        if not dist_utils.is_distributing_by_cloning(model):
            with model._distribution_strategy.scope():
                (dataset, _, _) = model._standardize_user_data(
                    dataset,
                    sample_weight=sample_weight,
                    class_weight=class_weight,
                    batch_size=batch_size,
                    validation_split=validation_split,
                    shuffle=shuffle)

        val_dataset = None
        if validation_data:
            val_x, val_y, val_sample_weights = training_utils.unpack_validation_data(
                validation_data)
            dist_utils.validate_inputs(val_x, val_y)
            _, validation_steps = dist_utils.process_batch_and_step_size(
                model._distribution_strategy, val_x, batch_size,
                validation_steps, ModeKeys.TEST)

            val_dataset = model._distribution_standardize_user_data(
                val_x,
                val_y,
                sample_weight=val_sample_weights,
                class_weight=None,
                batch_size=batch_size,
                validation_split=validation_split,
                shuffle=shuffle,
                allow_partial_batch=True)
        elif validation_split:
            raise ValueError('validation_split argument is not supported with '
                             'distribution strategies.')

        if dist_utils.is_tpu_strategy(model._distribution_strategy):
            steps_per_epoch = training_utils.infer_steps_for_dataset(
                dataset, steps_per_epoch, epochs, steps_name='steps_per_epoch')
            if steps_per_epoch is None:
                raise ValueError(
                    'Number of steps could not be inferred from the data, '
                    'please pass the steps_per_epoch argument.')

            if not context.executing_eagerly():
                # Run TPU training in a custom loop in graph mode.
                return experimental_tpu_fit_loop(
                    model,
                    dataset,
                    epochs=epochs,
                    verbose=verbose,
                    callbacks=callbacks,
                    val_dataset=val_dataset,
                    initial_epoch=initial_epoch,
                    steps_per_epoch=steps_per_epoch,
                    validation_steps=validation_steps,
                    validation_freq=validation_freq)

        return training_arrays.fit_loop(model,
                                        dataset,
                                        batch_size=batch_size,
                                        epochs=epochs,
                                        verbose=verbose,
                                        callbacks=callbacks,
                                        val_inputs=val_dataset,
                                        shuffle=shuffle,
                                        initial_epoch=initial_epoch,
                                        steps_per_epoch=steps_per_epoch,
                                        validation_steps=validation_steps,
                                        validation_freq=validation_freq,
                                        steps_name='steps_per_epoch')
コード例 #11
0
def fit_distributed(model,
                    x=None,
                    y=None,
                    batch_size=None,
                    epochs=1,
                    verbose=1,
                    callbacks=None,
                    validation_split=0.,
                    validation_data=None,
                    shuffle=True,
                    class_weight=None,
                    sample_weight=None,
                    initial_epoch=0,
                    steps_per_epoch=None,
                    validation_steps=None,
                    validation_freq=1):
  """Fit loop for Distribution Strategies."""
  distributed_training_utils.validate_callbacks(callbacks, model.optimizer)
  distributed_training_utils.validate_inputs(
      x, y, model._distribution_strategy)

  first_x_value = nest.flatten(x)[0]
  if isinstance(first_x_value, np.ndarray):
    # Until support for partial batch is implemented across all
    # functions and distribution strategy, we pass `mode` to selectively
    # relax the costraint to consume all the training samples.
    steps_per_epoch, batch_size = (
        distributed_training_utils.get_input_params(
            model._distribution_strategy, first_x_value, steps_per_epoch,
            batch_size, mode=ModeKeys.TRAIN))
  batch_size = model._validate_or_infer_batch_size(
      batch_size, steps_per_epoch, x)
  dataset = model._distribution_standardize_user_data(
      x, y,
      sample_weight=sample_weight,
      class_weight=class_weight,
      batch_size=batch_size,
      validation_split=validation_split,
      shuffle=shuffle,
      repeat=True)

  val_dataset = None
  if validation_data:
    val_x, val_y, val_sample_weights = model._unpack_validation_data(
        validation_data)
    distributed_training_utils.validate_inputs(
        val_x, val_y, model._distribution_strategy)
    first_valx_value = nest.flatten(val_x)[0]
    if isinstance(first_valx_value, np.ndarray):
      validation_steps, _ = distributed_training_utils.get_input_params(
          model._distribution_strategy, first_valx_value, validation_steps,
          batch_size)
    val_dataset = model._distribution_standardize_user_data(
        val_x, val_y,
        sample_weight=val_sample_weights,
        class_weight=None,
        batch_size=batch_size,
        validation_split=validation_split,
        shuffle=shuffle)
  elif validation_split:
    raise ValueError('validation_split argument is not supported with '
                     'distribution strategies.')

  if distributed_training_utils.is_tpu_strategy(model._distribution_strategy):
    return experimental_tpu_fit_loop(
        model,
        dataset,
        epochs=epochs,
        verbose=verbose,
        callbacks=callbacks,
        val_dataset=val_dataset,
        initial_epoch=initial_epoch,
        steps_per_epoch=steps_per_epoch,
        validation_steps=validation_steps,
        validation_freq=validation_freq)
  else:
    return training_arrays.fit_loop(
        model,
        dataset,
        batch_size=batch_size,
        epochs=epochs,
        verbose=verbose,
        callbacks=callbacks,
        val_inputs=val_dataset,
        shuffle=shuffle,
        initial_epoch=initial_epoch,
        steps_per_epoch=steps_per_epoch,
        validation_steps=validation_steps,
        validation_freq=validation_freq,
        steps_name='steps_per_epoch')
コード例 #12
0
    def fit(self,
            model,
            x=None,
            y=None,
            batch_size=None,
            epochs=1,
            verbose=1,
            callbacks=None,
            validation_split=0.,
            validation_data=None,
            shuffle=True,
            class_weight=None,
            sample_weight=None,
            initial_epoch=0,
            steps_per_epoch=None,
            validation_steps=None,
            validation_freq=1,
            **kwargs):
        """Fit loop for Distribution Strategies."""
        dist_utils.validate_callbacks(input_callbacks=callbacks,
                                      optimizer=model.optimizer)
        dist_utils.validate_inputs(x, y)

        batch_size, steps_per_epoch = self._process_batch_and_step_size(
            model, x, batch_size, steps_per_epoch, ModeKeys.TRAIN)
        batch_size = model._validate_or_infer_batch_size(
            batch_size, steps_per_epoch, x)
        dataset = model._distribution_standardize_user_data(
            x,
            y,
            sample_weight=sample_weight,
            class_weight=class_weight,
            batch_size=batch_size,
            validation_split=validation_split,
            shuffle=shuffle,
            epochs=epochs)
        if not dist_utils.is_distributing_by_cloning(model):
            with model._distribution_strategy.scope():
                (dataset, _, _) = model._standardize_user_data(
                    dataset,
                    sample_weight=sample_weight,
                    class_weight=class_weight,
                    batch_size=batch_size,
                    validation_split=validation_split,
                    shuffle=shuffle)

        val_dataset = None
        if validation_data:
            val_x, val_y, val_sample_weights = model._unpack_validation_data(
                validation_data)
            dist_utils.validate_inputs(val_x, val_y)
            _, validation_steps = self._process_batch_and_step_size(
                model, val_x, batch_size, validation_steps, ModeKeys.TEST)

            val_dataset = model._distribution_standardize_user_data(
                val_x,
                val_y,
                sample_weight=val_sample_weights,
                class_weight=None,
                batch_size=batch_size,
                validation_split=validation_split,
                shuffle=shuffle,
                allow_partial_batch=True)
        elif validation_split:
            raise ValueError('validation_split argument is not supported with '
                             'distribution strategies.')

        if dist_utils.is_tpu_strategy(model._distribution_strategy):
            return experimental_tpu_fit_loop(model,
                                             dataset,
                                             epochs=epochs,
                                             verbose=verbose,
                                             callbacks=callbacks,
                                             val_dataset=val_dataset,
                                             initial_epoch=initial_epoch,
                                             steps_per_epoch=steps_per_epoch,
                                             validation_steps=validation_steps,
                                             validation_freq=validation_freq)
        else:
            return training_arrays.fit_loop(model,
                                            dataset,
                                            batch_size=batch_size,
                                            epochs=epochs,
                                            verbose=verbose,
                                            callbacks=callbacks,
                                            val_inputs=val_dataset,
                                            shuffle=shuffle,
                                            initial_epoch=initial_epoch,
                                            steps_per_epoch=steps_per_epoch,
                                            validation_steps=validation_steps,
                                            validation_freq=validation_freq,
                                            steps_name='steps_per_epoch')