def test_on_batch(model,
                  inputs,
                  targets,
                  sample_weights=None,
                  reset_metrics=True,
                  output_loss_metrics=None):
    """Calculates the loss for one input batch.

  Arguments:
      model: Model whose loss has to be calculated.
      inputs: Input batch data.
      targets: Target batch data.
      sample_weights: Sample weight batch data.
      reset_metrics: If `True`, the metrics returned will be only for this
        batch. If `False`, the metrics will be statefully accumulated across
        batches.
      output_loss_metrics: List of metrics that are used to aggregated output
        loss values.

  Returns:
      total loss, loss and metrics associated with each output.
  """
    if isinstance(inputs, collections.Sequence):
        if len(inputs) and tensor_util.is_tensor(inputs[0]):
            inputs = training_utils.cast_if_floating_dtype(inputs)
            targets = training_utils.cast_if_floating_dtype(targets)
        else:
            inputs = training_utils.cast_if_floating_dtype(
                [ops.convert_to_tensor(val) for val in inputs])
            targets = training_utils.cast_if_floating_dtype(
                [ops.convert_to_tensor(val) for val in targets])
    if sample_weights:
        sample_weights = [
            training_utils.cast_if_floating_dtype(ops.convert_to_tensor(val))
            if val is not None else None for val in sample_weights
        ]
    outs, total_loss, output_losses, aggregated_output_losses, masks = (
        _model_loss(model,
                    inputs,
                    targets,
                    sample_weights=sample_weights,
                    training=False,
                    output_loss_metrics=output_loss_metrics))
    if not isinstance(outs, list):
        outs = [outs]
    metrics_results = _eager_metrics_fn(
        model,
        outs,
        targets,
        sample_weights=sample_weights,
        masks=masks,
        return_stateful_result=not reset_metrics)
    total_loss = nest.flatten(total_loss)
    if reset_metrics:
        final_output_losses = output_losses
    else:
        final_output_losses = aggregated_output_losses
    results = total_loss + final_output_losses + metrics_results

    return [tensor_util.constant_value(v) for v in results]
def test_on_batch(model, inputs, targets, sample_weights=None):
  """Calculates the loss for one input batch.

  Arguments:
      model: Model whose loss has to be calculated.
      inputs: Input batch data.
      targets: Target batch data.
      sample_weights: Sample weight batch data.

  Returns:
      total loss, loss and metrics associated with each output.
  """
  if len(inputs) and tensor_util.is_tensor(inputs[0]):
    inputs = training_utils.cast_if_floating_dtype(inputs)
    targets = training_utils.cast_if_floating_dtype(targets)
  else:
    inputs = [
        ops.convert_to_tensor(val, dtype=backend.floatx()) for val in inputs
    ]
    targets = [
        ops.convert_to_tensor(val, dtype=backend.floatx()) for val in targets
    ]
  if sample_weights:
    sample_weights = [
        ops.convert_to_tensor(val, dtype=backend.floatx())
        if val is not None else None for val in sample_weights
    ]
  outs, loss, loss_metrics = _model_loss(
      model, inputs, targets, sample_weights=sample_weights, training=False)
  if not isinstance(outs, list):
    outs = [outs]
  metrics_results = _eager_metrics_fn(model, outs, targets)
  if not isinstance(loss, list):
    loss = [loss]
  return loss + loss_metrics + metrics_results
def test_on_batch(model, inputs, targets, sample_weights=None):
  """Calculates the loss for one input batch.

  Arguments:
      model: Model whose loss has to be calculated.
      inputs: Input batch data.
      targets: Target batch data.
      sample_weights: Sample weight batch data.

  Returns:
      total loss, loss and metrics associated with each output.
  """
  if len(inputs) and tensor_util.is_tensor(inputs[0]):
    inputs = training_utils.cast_if_floating_dtype(inputs)
    targets = training_utils.cast_if_floating_dtype(targets)
  else:
    inputs = [
        ops.convert_to_tensor(val, dtype=backend.floatx()) for val in inputs
    ]
    targets = [
        ops.convert_to_tensor(val, dtype=backend.floatx()) for val in targets
    ]
  if sample_weights:
    sample_weights = [
        ops.convert_to_tensor(val, dtype=backend.floatx())
        if val is not None else None for val in sample_weights
    ]
  outs, loss, loss_metrics = _model_loss(
      model, inputs, targets, sample_weights=sample_weights, training=False)
  if not isinstance(outs, list):
    outs = [outs]
  metrics_results = _eager_metrics_fn(model, outs, targets)
  if not isinstance(loss, list):
    loss = [loss]
  return loss + loss_metrics + metrics_results
Exemple #4
0
def train_on_batch(model,
                   inputs,
                   targets,
                   sample_weights=None,
                   output_loss_metrics=None):
    """Calculates the loss and gradient updates for one input batch.

  Arguments:
      model: Model whose loss has to be calculated.
      inputs: Input batch data.
      targets: Target batch data.
      sample_weights: Sample weight batch data.
      output_loss_metrics: List of metrics that are used to aggregated output
        loss values.

  Returns:
      total loss and the loss associated with each output.
  """
    if isinstance(inputs, collections.Sequence):
        if len(inputs) and tensor_util.is_tensor(inputs[0]):
            inputs = training_utils.cast_if_floating_to_model_input_dtypes(
                inputs, model)
            if targets:
                targets = training_utils.cast_if_floating_dtype(targets)
        else:
            inputs = training_utils.cast_if_floating_to_model_input_dtypes(
                [ops.convert_to_tensor(val) for val in inputs], model)
            if targets:
                targets = training_utils.cast_if_floating_dtype(
                    [ops.convert_to_tensor(val) for val in targets])
    if sample_weights:
        sample_weights = [
            training_utils.cast_if_floating_dtype(ops.convert_to_tensor(val))
            if val is not None else None for val in sample_weights
        ]

    outs, total_loss, output_losses, masks = (_process_single_batch(
        model,
        inputs,
        targets,
        sample_weights=sample_weights,
        training=True,
        output_loss_metrics=output_loss_metrics))
    if not isinstance(outs, list):
        outs = [outs]
    metrics_results = _eager_metrics_fn(model,
                                        outs,
                                        targets,
                                        sample_weights=sample_weights,
                                        masks=masks)
    total_loss = nest.flatten(total_loss)
    results = total_loss + output_losses + metrics_results

    return [_non_none_constant_value(v) for v in results]
def train_on_batch(model,
                   inputs,
                   targets,
                   sample_weights=None,
                   output_loss_metrics=None):
  """Calculates the loss and gradient updates for one input batch.

  Arguments:
      model: Model whose loss has to be calculated.
      inputs: Input batch data.
      targets: Target batch data.
      sample_weights: Sample weight batch data.
      output_loss_metrics: List of metrics that are used to aggregated output
        loss values.

  Returns:
      total loss and the loss associated with each output.
  """
  if isinstance(inputs, collections.Sequence):
    if len(inputs) and tensor_util.is_tensor(inputs[0]):
      inputs = training_utils.cast_if_floating_to_model_input_dtypes(inputs,
                                                                     model)
      if targets:
        targets = training_utils.cast_if_floating_dtype(targets)
    else:
      inputs = training_utils.cast_if_floating_to_model_input_dtypes(
          [ops.convert_to_tensor(val) for val in inputs], model)
      if targets:
        targets = training_utils.cast_if_floating_dtype(
            [ops.convert_to_tensor(val) for val in targets])
  if sample_weights:
    sample_weights = [
        training_utils.cast_if_floating_dtype(ops.convert_to_tensor(val))
        if val is not None else None for val in sample_weights
    ]

  outs, total_loss, output_losses, masks = (
      _process_single_batch(
          model,
          inputs,
          targets,
          sample_weights=sample_weights,
          training=True,
          output_loss_metrics=output_loss_metrics))
  if not isinstance(outs, list):
    outs = [outs]
  metrics_results = _eager_metrics_fn(
      model, outs, targets, sample_weights=sample_weights, masks=masks)
  total_loss = nest.flatten(total_loss)
  results = total_loss + output_losses + metrics_results

  return [_non_none_constant_value(v) for v in results]
Exemple #6
0
def get_rnd_networks_from_variant(variant, env):
    rnd_params = variant['algorithm_params']['rnd_params']
    target_network = None
    predictor_network = None

    observation_keys = variant['policy_params']['kwargs']['observation_keys']
    if not observation_keys:
        observation_keys = env.observation_keys
    observation_shapes = OrderedDict(
        ((key, value) for key, value in env.observation_shape.items()
         if key in observation_keys))

    inputs_flat = create_inputs(observation_shapes)

    target_network, predictor_network = [], []
    for input_tensor in inputs_flat:
        if 'pixels' in input_tensor.name:  # check logic
            from softlearning.preprocessors.utils import get_convnet_preprocessor
            target_network.append(
                get_convnet_preprocessor(
                    'rnd_target_conv',
                    **rnd_params['convnet_params'])(input_tensor))
            predictor_network.append(
                get_convnet_preprocessor(
                    'rnd_predictor_conv',
                    **rnd_params['convnet_params'])(input_tensor))
        else:
            target_network.append(input_tensor)
            predictor_network.append(input_tensor)

    target_network = tf.keras.layers.Lambda(
        lambda inputs: tf.concat(training_utils.cast_if_floating_dtype(inputs),
                                 axis=-1))(target_network)

    predictor_network = tf.keras.layers.Lambda(
        lambda inputs: tf.concat(training_utils.cast_if_floating_dtype(inputs),
                                 axis=-1))(predictor_network)

    target_network = get_feedforward_preprocessor(
        'rnd_target_fc', **rnd_params['fc_params'])(target_network)

    predictor_network = get_feedforward_preprocessor(
        'rnd_predictor_fc', **rnd_params['fc_params'])(predictor_network)

    # Initialize RN weights
    target_network = PicklableModel(inputs_flat, target_network)
    target_network.set_weights([
        np.random.normal(0, 0.1, size=weight.shape)
        for weight in target_network.get_weights()
    ])
    predictor_network = PicklableModel(inputs_flat, predictor_network)
    return target_network, predictor_network
Exemple #7
0
def predict_on_batch(model, x):
    """Returns predictions for a single batch of samples.

  Arguments:
      model: The model to predict with.
      x: Input data. It could be:
        - A Numpy array (or array-like), or a list of arrays
          (in case the model has multiple inputs).
        - A TensorFlow tensor, or a list of tensors
          (in case the model has multiple inputs).
        - A `tf.data` dataset.

  Returns:
      Numpy array(s) of predictions.

  Raises:
      ValueError: In case of mismatch between given number of inputs and
        expectations of the model.
  """
    # TODO(scottzhu): Standardization should happen in the data handlers,
    ## not on a per batch basis in the *_on_batch methods
    # Validate and standardize user data.
    inputs, _, _ = model._standardize_user_data(
        x, extract_tensors_from_dataset=True)

    # If `model._distribution_strategy` is True, then we are in a replica context
    # at this point.
    inputs = training_utils.cast_if_floating_dtype(inputs)
    if isinstance(inputs, collections.Sequence):
        # Unwrap lists with only one input, as we do when training on batch
        if len(inputs) == 1:
            inputs = inputs[0]

    with backend.eager_learning_phase_scope(0):
        return model(inputs)  # pylint: disable=not-callable
def train_on_batch(model, inputs, targets, sample_weights=None):
    """Calculates the loss and gradient updates for one input batch.

  Arguments:
      model: Model whose loss has to be calculated.
      inputs: Input batch data.
      targets: Target batch data.
      sample_weights: Sample weight batch data.

  Returns:
      total loss and the loss associated with each output.
  """
    if isinstance(inputs, collections.Sequence):
        if len(inputs) and tensor_util.is_tensor(inputs[0]):
            inputs = training_utils.cast_if_floating_dtype(inputs)
            targets = training_utils.cast_if_floating_dtype(targets)
        else:
            inputs = [
                ops.convert_to_tensor(val, dtype=backend.floatx())
                for val in inputs
            ]
            targets = [
                ops.convert_to_tensor(val, dtype=backend.floatx())
                for val in targets
            ]
    if sample_weights:
        sample_weights = [
            ops.convert_to_tensor(val, dtype=backend.floatx())
            if val is not None else None for val in sample_weights
        ]

    outs, loss, loss_metrics, _, masks = _process_single_batch(
        model, inputs, targets, sample_weights=sample_weights, training=True)
    if not isinstance(outs, list):
        outs = [outs]
    metrics_results = _eager_metrics_fn(model,
                                        outs,
                                        targets,
                                        sample_weights=sample_weights,
                                        masks=masks,
                                        return_stateful_result=False)
    loss = generic_utils.to_list(loss)

    return [
        tensor_util.constant_value(v)
        for v in loss + loss_metrics + metrics_results
    ]
Exemple #9
0
def test_on_batch(model,
                  inputs,
                  targets,
                  sample_weights=None,
                  output_loss_metrics=None):
    """Calculates the loss for one input batch.

  Arguments:
      model: Model whose loss has to be calculated.
      inputs: Input batch data.
      targets: Target batch data.
      sample_weights: Sample weight batch data.
      output_loss_metrics: List of metrics that are used to aggregated output
        loss values.

  Returns:
      total loss, loss and metrics associated with each output.
  """
    if isinstance(inputs, collections.Sequence):
        inputs = training_utils.cast_to_model_input_dtypes(inputs, model)
        if targets:
            targets = training_utils.cast_if_floating_dtype(targets)
    if sample_weights:
        sample_weights = [
            training_utils.cast_if_floating_dtype(ops.convert_to_tensor(val))
            if val is not None else None for val in sample_weights
        ]
    with backend.eager_learning_phase_scope(0):
        outs, total_loss, output_losses, masks = (_model_loss(
            model,
            inputs,
            targets,
            sample_weights=sample_weights,
            training=False,
            output_loss_metrics=output_loss_metrics))
    if not isinstance(outs, list):
        outs = [outs]
    metrics_results = _eager_metrics_fn(model,
                                        outs,
                                        targets,
                                        sample_weights=sample_weights,
                                        masks=masks)
    total_loss = nest.flatten(total_loss)
    results = total_loss + output_losses + metrics_results

    return results
Exemple #10
0
def train_on_batch(model, inputs, targets, sample_weights=None):
  """Calculates the loss and gradient updates for one input batch.

  Arguments:
      model: Model whose loss has to be calculated.
      inputs: Input batch data.
      targets: Target batch data.
      sample_weights: Sample weight batch data.

  Returns:
      total loss and the loss associated with each output.
  """
  if isinstance(inputs, collections.Sequence):
    if len(inputs) and tensor_util.is_tensor(inputs[0]):
      inputs = training_utils.cast_if_floating_dtype(inputs)
      targets = training_utils.cast_if_floating_dtype(targets)
    else:
      inputs = training_utils.cast_if_floating_dtype([
          ops.convert_to_tensor(val) for val in inputs
      ])
      targets = training_utils.cast_if_floating_dtype([
          ops.convert_to_tensor(val) for val in targets
      ])
  if sample_weights:
    sample_weights = [
        training_utils.cast_if_floating_dtype(ops.convert_to_tensor(val))
        if val is not None else None for val in sample_weights
    ]

  outs, loss, loss_metrics, _, masks = _process_single_batch(
      model, inputs, targets, sample_weights=sample_weights, training=True)
  if not isinstance(outs, list):
    outs = [outs]
  metrics_results = _eager_metrics_fn(
      model,
      outs,
      targets,
      sample_weights=sample_weights,
      masks=masks,
      return_stateful_result=True)
  loss = nest.flatten(loss)

  return [
      tensor_util.constant_value(v)
      for v in loss + loss_metrics + metrics_results
  ]
Exemple #11
0
 def set_inputs(self, inputs):
     """
     set inputs and output shape according to inputs
     :param inputs: inputs data or data piece
     :return:
     """
     if isinstance(inputs, (list, tuple)):
         if tensor_util.is_tensor(inputs[0]):
             dummy_output_values = self.call(
                 training_utils.cast_if_floating_dtype(inputs[:1]))
         else:
             dummy_output_values = self.call([
                 ops.convert_to_tensor(v, dtype=K.floatx())
                 for v in inputs[:1]
             ])
         dummy_input_values = list(inputs[:1])
     else:
         if tensor_util.is_tensor(inputs):
             dummy_output_values = self.call(
                 training_utils.cast_if_floating_dtype(inputs[:1]))
         else:
             dummy_output_values = self.call(
                 ops.convert_to_tensor(inputs[:1], dtype=K.floatx()))
         dummy_input_values = [inputs[:1]]
     if isinstance(dummy_output_values, (list, tuple)):
         dummy_output_values = list(dummy_output_values)
     else:
         dummy_output_values = [dummy_output_values]
     self.outputs = [
         base_layer.DeferredTensor(shape=(None for _ in v.shape),
                                   dtype=v.dtype)
         for v in dummy_output_values
     ]
     self.inputs = [
         base_layer.DeferredTensor(shape=(None for _ in v.shape),
                                   dtype=v.dtype)
         for v in dummy_input_values
     ]
     self.input_names = [
         'input_%d' % (i + 1) for i in range(len(dummy_input_values))
     ]
     self.output_names = [
         'output_%d' % (i + 1) for i in range(len(dummy_output_values))
     ]
     self.built = True
def iterator_predict_loop(model, inputs, steps, verbose=0):
    """Predict function for eager execution when input is dataset iterator.

  Arguments:
      model: Instance of `Model`.
      inputs: Input dataset iterator.
      steps: Total number of steps (batches of samples) before declaring
          `_predict_loop` finished.
      verbose: Verbosity mode.

  Returns:
      Array of predictions (if the model has a single output)
      or list of arrays of predictions (if the model has multiple outputs).

  Raises:
      ValueError: In case of mismatch between given number of inputs and
        expectations of the model.
  """
    assert isinstance(inputs, iterator_ops.EagerIterator)
    if not isinstance(inputs.output_shapes,
                      collections.Sequence) or len(inputs.output_shapes) > 3:
        raise ValueError(
            'Please provide data as a list or tuple of 1, 2, or 3 elements '
            ' - `(input)`, or `(input, target)`, or `(input, target,'
            'sample_weights)`. Received %s. We do not use the `target` or'
            '`sample_weights` value here.' % inputs.output_shapes)
    outs = []
    if verbose == 1:
        progbar = generic_utils.Progbar(target=steps)

    for step_index in range(steps):
        # Get data from the iterator.
        try:
            next_element = inputs.get_next()
        except errors.OutOfRangeError:
            logging.warning(
                'Your dataset iterator ran out of data; interrupting prediction. '
                'Make sure that your dataset can generate at least `steps` batches '
                '(in this case, %d batches). You may need to use the repeat() '
                'function when building your dataset.', steps)
            break

        # expects a tuple, where first element of tuple represents inputs
        x = next_element[0]

        # Validate and standardize data.
        x, _, _ = model._standardize_user_data(x)
        x = training_utils.cast_if_floating_dtype(x)

        if isinstance(x, list) and len(x) == 1:
            x = x[0]

        if model._expects_training_arg:
            batch_outs = model.call(x, training=False)
        else:
            batch_outs = model.call(x)
        if not isinstance(batch_outs, list):
            batch_outs = [batch_outs]

        # We collect the results from every step and then concatenate them once
        # in the end. This is an expensive process. We are doing this because we
        # do not know the number of samples beforehand.
        if step_index == 0:
            for _ in batch_outs:
                outs.append([])
        for i, batch_out in enumerate(batch_outs):
            outs[i].append(backend.get_value(batch_out))

        if verbose == 1:
            progbar.update(step_index + 1)
    for i, out in enumerate(outs):
        outs[i] = np.concatenate(tuple(out), axis=0)
    if len(outs) == 1:
        return outs[0]
    return outs
def iterator_test_loop(model, inputs, steps, verbose=0):
    """Test function for eager execution when input is given as dataset iterator.

  Arguments:
      model: Model instance that is being evaluated in Eager mode.
      inputs: Input dataset iterator.
      steps: Total number of steps (batches of samples) before declaring
      predictions finished.
      verbose: Verbosity mode.

  Returns:
      Scalar loss (if the model has a single output and no metrics)
      or list of scalars (if the model has multiple outputs
      and/or metrics). The attribute `model.metrics_names` will give you
      the display labels for the scalar outputs.

  Raises:
      ValueError: In case of mismatch between given number of inputs and
        expectations of the model.
  """
    assert isinstance(inputs, iterator_ops.EagerIterator)
    # make sure either x,y or x,y,sample_weights is provided
    if (not isinstance(inputs.output_shapes, collections.Sequence)
            or len(inputs.output_shapes) < 2 or len(inputs.output_shapes) > 3):
        raise ValueError('Please provide either inputs and targets'
                         'or inputs, targets, and sample_weights')
    outs = []

    # Create metric wrapper for the losses.
    output_loss_metrics = []
    for i in range(len(model.outputs)):
        loss_fn = model.loss_functions[i]
        loss_name = loss_fn.name if isinstance(
            loss_fn, losses_module.Loss) else loss_fn.__name__
        mean_wrapped_loss = metrics_module.MeanMetricWrapper(loss_fn,
                                                             name=loss_name)
        output_loss_metrics.append(mean_wrapped_loss)

    num_samples = 0
    if verbose == 1:
        progbar = generic_utils.Progbar(target=steps)
    for step_index in range(steps):
        # Get data from the iterator.
        try:
            next_element = inputs.get_next()
        except errors.OutOfRangeError:
            logging.warning(
                'Your dataset iterator ran out of data interrupting testing. '
                'Make sure that your dataset can generate at least `steps` batches '
                '(in this case, %d batches). You may need to use the repeat() '
                'function when building your dataset.', steps)
            break

        if len(inputs.output_shapes) == 2:
            x, y = next_element
            sample_weights = None
        else:
            x, y, sample_weights = next_element

        # Validate and standardize data.
        x, y, sample_weights = model._standardize_user_data(
            x, y, sample_weight=sample_weights)
        x = training_utils.cast_if_floating_dtype(x)
        y = training_utils.cast_if_floating_dtype(y)
        if sample_weights:
            sample_weights = [
                training_utils.cast_if_floating_dtype(
                    ops.convert_to_tensor(val, dtype=backend.floatx()))
                if val is not None else None for val in sample_weights
            ]

        if step_index == 0:
            # Get stateful metrics indices. We do not do this before the `steps` loop
            # because model will be compiled only in the first iteration of this loop
            # in the deferred build scenario.
            if hasattr(model, '_compile_metrics'):
                for m in model.metrics:
                    m.reset_states()
            for m in output_loss_metrics:
                m.reset_states()

        # Calculate model output, loss values.
        loss_outs, loss, _, aggregated_loss_metrics, masks = _model_loss(
            model,
            x,
            y,
            output_loss_metrics=output_loss_metrics,
            sample_weights=sample_weights,
            training=False)
        metrics_results = _eager_metrics_fn(model,
                                            loss_outs,
                                            y,
                                            sample_weights=sample_weights,
                                            masks=masks)
        batch_outs = []
        for _, v in zip(model.metrics_names, [backend.mean(loss)] +
                        aggregated_loss_metrics + metrics_results):
            batch_outs.append(tensor_util.constant_value(v))

        # Get current step size.
        if isinstance(x, list):
            step_size = x[0].get_shape().as_list()[0]
        elif isinstance(x, dict):
            step_size = list(x.values())[0].get_shape().as_list()[0]
        else:
            step_size = x.get_shape().as_list()[0]

        # Accumulate results in output array.
        if not isinstance(batch_outs, list):
            batch_outs = [batch_outs]
        if step_index == 0:
            for _ in enumerate(batch_outs):
                outs.append(0.)
        outs[0] += batch_outs[0] * step_size  # index 0 = 'loss'
        outs[1:] = batch_outs[1:]

        # Calculate sample size.
        num_samples += step_size
        if verbose == 1:
            progbar.update(step_index + 1)

    outs[0] /= num_samples  # index 0 = 'loss'
    if len(outs) == 1:
        return outs[0]
    return outs
def iterator_fit_loop(model,
                      inputs,
                      class_weight,
                      steps_per_epoch,
                      epoch_logs,
                      val_inputs=None,
                      val_targets=None,
                      val_sample_weights=None,
                      epochs=1,
                      verbose=1,
                      callbacks=None,
                      validation_steps=None,
                      do_validation=False,
                      batch_size=None,
                      output_loss_metrics=None):
    """Fit function for eager execution when input is given as dataset iterator.

  Updates the given epoch logs.

  Arguments:
      model: Instance of the `Model`.
      inputs: Input dataset iterator.
      class_weight: Optional class-weight array to weight the importance of
          samples in `inputs` based on the class they belong to, as conveyed by
          the targets from the `inputs` iterator.
      steps_per_epoch: Total number of steps (batches of samples)
          before declaring one epoch finished and starting the
          next epoch.
      epoch_logs: Dictionary of logs from every epoch.
      val_inputs: Input data for validation.
      val_targets: Target data for validation.
      val_sample_weights: Sample weight data for validation.
      epochs: Number of times to iterate over the data
      verbose: Verbosity mode, 0, 1 or 2
      callbacks: CallbackList instance. Controls callbacks during training.
      validation_steps: Number of steps to run validation for (only if doing
        validation from data tensors). Ignored with default value of `None`.
      do_validation: Boolean value indicating whether we should do validation.
      batch_size: int, val_inputs and val_targets will be evaled batch by
        batch with size batch_size if they are array.
      output_loss_metrics: List of metrics that are used to aggregated output
        loss values.

  Raises:
      ValueError: In case of mismatch between given number of inputs and
        expectations of the model.
  """
    assert isinstance(inputs, iterator_ops.EagerIterator)

    # make sure either x,y or x,y,sample_weights is provided
    if (not isinstance(inputs.output_shapes, collections.Sequence)
            or len(inputs.output_shapes) not in (2, 3)):
        raise ValueError('Please provide either inputs and targets '
                         'or inputs, targets, and sample_weights')

    for step_index in range(steps_per_epoch):
        batch_logs = {'batch': step_index, 'size': 1}
        callbacks.on_batch_begin(step_index, batch_logs)

        # Get data from the iterator.
        try:
            next_element = inputs.get_next()
        except errors.OutOfRangeError:
            logging.warning(
                'Your dataset iterator ran out of data; interrupting training. Make '
                'sure that your dataset can generate at least '
                '`steps_per_epoch * epochs` batches (in this case, %d batches). You '
                'may need to use the repeat() function when building your '
                'dataset.' % steps_per_epoch * epochs)
            break

        if len(inputs.output_shapes) == 2:
            x, y = next_element
            sample_weights = None
        else:
            x, y, sample_weights = next_element

        # Validate and standardize data.
        x, y, sample_weights = model._standardize_user_data(
            x, y, sample_weight=sample_weights, class_weight=class_weight)
        x = training_utils.cast_if_floating_dtype(x)
        y = training_utils.cast_if_floating_dtype(y)
        if sample_weights:
            sample_weights = [
                training_utils.cast_if_floating_dtype(
                    ops.convert_to_tensor(val, dtype=backend.floatx()))
                if val is not None else None for val in sample_weights
            ]

        # Train model.
        outs, loss, _, aggregated_loss_metrics, masks = _process_single_batch(
            model,
            x,
            y,
            output_loss_metrics=output_loss_metrics,
            sample_weights=sample_weights,
            training=True)
        outs = generic_utils.to_list(outs)

        if step_index == 0:
            # Set stateful_metrics in callbacks. We do not do this before the
            # `steps_per_epoch` loop because model will be compiled only in the first
            # iteration of this loop in the deferred build scenario.
            for cbk in callbacks:
                if (isinstance(cbk, cbks.BaseLogger)
                        or isinstance(cbk, cbks.ProgbarLogger)):
                    cbk.stateful_metrics = model.metrics_names[
                        1:]  # Exclude `loss`

            callback_metrics = copy.copy(model.metrics_names)
            if do_validation:
                callback_metrics += ['val_' + n for n in model.metrics_names]
            callbacks.set_params({
                'batch_size': batch_size,
                'epochs': epochs,
                'steps': steps_per_epoch,
                'verbose': verbose,
                'do_validation': do_validation,
                'metrics': callback_metrics or [],
                'validation_steps': validation_steps
            })

        # Calculate metrics.
        for l, o in zip(model.metrics_names, outs):
            batch_logs[l] = o
        metrics_results = _eager_metrics_fn(model,
                                            outs,
                                            y,
                                            sample_weights=sample_weights,
                                            masks=masks)
        batch_logs['loss'] = tensor_util.constant_value(backend.mean(loss))

        for k, v in zip(model.metrics_names, [backend.mean(loss)] +
                        aggregated_loss_metrics + metrics_results):
            batch_logs[k] = tensor_util.constant_value(v)
        callbacks.on_batch_end(step_index, batch_logs)
        if callbacks.model.stop_training:
            break

        if step_index == steps_per_epoch - 1:
            if do_validation:
                val_outs = test_loop(model,
                                     val_inputs,
                                     val_targets,
                                     sample_weights=val_sample_weights,
                                     steps=validation_steps,
                                     verbose=0,
                                     batch_size=batch_size)
                if not isinstance(val_outs, list):
                    val_outs = [val_outs]
                # Same labels assumed.
                for l, o in zip(model.metrics_names, val_outs):
                    epoch_logs['val_' + l] = o
def iterator_predict_loop(model, inputs, steps, verbose=0):
  """Predict function for eager execution when input is dataset iterator.

  Arguments:
      model: Instance of `Model`.
      inputs: Input dataset iterator.
      steps: Total number of steps (batches of samples) before declaring
          `_predict_loop` finished.
      verbose: Verbosity mode.

  Returns:
      Array of predictions (if the model has a single output)
      or list of arrays of predictions (if the model has multiple outputs).

  Raises:
      ValueError: In case of mismatch between given number of inputs and
        expectations of the model.
  """
  assert isinstance(inputs, iterator_ops.EagerIterator)
  if not isinstance(inputs.output_shapes,
                    (list, tuple)) or len(inputs.output_shapes) > 3:
    raise ValueError(
        'Please provide data as a list or tuple of 1, 2, or 3 elements '
        ' - `(input)`, or `(input, target)`, or `(input, target,'
        'sample_weights)`. Received %s. We do not use the `target` or'
        '`sample_weights` value here.' % inputs.output_shapes)
  outs = []
  if verbose == 1:
    progbar = generic_utils.Progbar(target=steps)
  for step_index in range(steps):
    # Get data from the iterator.
    try:
      next_element = inputs.get_next()
    except errors.OutOfRangeError:
      logging.warning(
          'Your dataset iterator ran out of data; interrupting prediction. '
          'Make sure that your dataset can generate at least `steps` batches '
          '(in this case, %d batches). You may need to use the repeat() '
          'function when building your dataset.', steps)
      break

    # expects a tuple, where first element of tuple represents inputs
    x = next_element[0]

    # Validate and standardize data.
    x, _, _ = model._standardize_user_data(x)
    x = training_utils.cast_if_floating_dtype(x)

    if isinstance(x, list) and len(x) == 1:
      x = x[0]

    if model._expects_training_arg:
      batch_outs = model.call(x, training=False)
    else:
      batch_outs = model.call(x)
    if not isinstance(batch_outs, list):
      batch_outs = [batch_outs]

    # We collect the results from every step and then concatenate them once
    # in the end. This is an expensive process. We are doing this because we
    # do not know the number of samples beforehand.
    if step_index == 0:
      for _ in batch_outs:
        outs.append([])
    for i, batch_out in enumerate(batch_outs):
      outs[i].append(backend.get_value(batch_out))

    if verbose == 1:
      progbar.update(step_index + 1)
  for i, out in enumerate(outs):
    outs[i] = np.concatenate(tuple(out), axis=0)
  if len(outs) == 1:
    return outs[0]
  return outs
def iterator_test_loop(model, inputs, steps, verbose=0):
  """Test function for eager execution when input is given as dataset iterator.

  Arguments:
      model: Model instance that is being evaluated in Eager mode.
      inputs: Input dataset iterator.
      steps: Total number of steps (batches of samples) before declaring
      predictions finished.
      verbose: Verbosity mode.

  Returns:
      Scalar loss (if the model has a single output and no metrics)
      or list of scalars (if the model has multiple outputs
      and/or metrics). The attribute `model.metrics_names` will give you
      the display labels for the scalar outputs.

  Raises:
      ValueError: In case of mismatch between given number of inputs and
        expectations of the model.
  """
  assert isinstance(inputs, iterator_ops.EagerIterator)
  # make sure either x,y or x,y,sample_weights is provided
  if (not isinstance(inputs.output_shapes, (list, tuple)) or
      len(inputs.output_shapes) < 2 or len(inputs.output_shapes) > 3):
    raise ValueError('Please provide either inputs and targets'
                     'or inputs, targets, and sample_weights')
  outs = []
  num_samples = 0
  if verbose == 1:
    progbar = generic_utils.Progbar(target=steps)
  for step_index in range(steps):
    # Get data from the iterator.
    try:
      next_element = inputs.get_next()
    except errors.OutOfRangeError:
      logging.warning(
          'Your dataset iterator ran out of data interrupting testing. '
          'Make sure that your dataset can generate at least `steps` batches '
          '(in this case, %d batches). You may need to use the repeat() '
          'function when building your dataset.', steps)
      break

    if len(inputs.output_shapes) == 2:
      x, y = next_element
      sample_weights = None
    else:
      x, y, sample_weights = next_element

    # Validate and standardize data.
    x, y, sample_weights = model._standardize_user_data(
        x, y, sample_weight=sample_weights)
    x = training_utils.cast_if_floating_dtype(x)
    y = training_utils.cast_if_floating_dtype(y)
    if sample_weights:
      sample_weights = [
          training_utils.cast_if_floating_dtype(
              ops.convert_to_tensor(val, dtype=backend.floatx()))
          if val is not None else None for val in sample_weights
      ]

    if step_index == 0:
      # Get stateful metrics indices. We do not do this before the `steps` loop
      # because model will be compiled only in the first iteration of this loop
      # in the deferred build scenario.
      if hasattr(model, 'metrics'):
        for m in model.stateful_metric_functions:
          m.reset_states()
        stateful_metric_indices = [
            i for i, name in enumerate(model.metrics_names)
            if str(name) in model.stateful_metric_names
        ]
      else:
        stateful_metric_indices = []

    # Calculate model output, loss values.
    loss_outs, loss, loss_metrics, masks = _model_loss(
        model, x, y, sample_weights=sample_weights, training=False)
    metrics_results = _eager_metrics_fn(
        model, loss_outs, y, sample_weights=sample_weights, masks=masks)
    batch_outs = []
    for _, v in zip(model.metrics_names,
                    [backend.mean(loss)] + loss_metrics + metrics_results):
      batch_outs.append(tensor_util.constant_value(v))

    # Get current step size.
    if isinstance(x, list):
      step_size = x[0].get_shape().as_list()[0]
    elif isinstance(x, dict):
      step_size = list(x.values())[0].get_shape().as_list()[0]
    else:
      step_size = x.get_shape().as_list()[0]

    # Accumulate results in output array.
    if not isinstance(batch_outs, list):
      batch_outs = [batch_outs]
    if step_index == 0:
      for _ in enumerate(batch_outs):
        outs.append(0.)
    for i, batch_out in enumerate(batch_outs):
      if i in stateful_metric_indices:
        outs[i] = batch_out
      else:
        outs[i] += batch_out * step_size

    # Calculate sample size.
    num_samples += step_size
    if verbose == 1:
      progbar.update(step_index + 1)

  for i in range(len(outs)):
    if i not in stateful_metric_indices:
      outs[i] /= num_samples
  if len(outs) == 1:
    return outs[0]
  return outs
def iterator_fit_loop(model,
                      inputs,
                      class_weight,
                      steps_per_epoch,
                      epoch_logs,
                      val_inputs=None,
                      val_targets=None,
                      val_sample_weights=None,
                      epochs=1,
                      verbose=1,
                      callbacks=None,
                      validation_steps=None,
                      do_validation=False,
                      batch_size=None):
  """Fit function for eager execution when input is given as dataset iterator.

  Updates the given epoch logs.

  Arguments:
      model: Instance of the `Model`.
      inputs: Input dataset iterator.
      class_weight: Optional class-weight array to weight the importance of
          samples in `inputs` based on the class they belong to, as conveyed by
          the targets from the `inputs` iterator.
      steps_per_epoch: Total number of steps (batches of samples)
          before declaring one epoch finished and starting the
          next epoch.
      epoch_logs: Dictionary of logs from every epoch.
      val_inputs: Input data for validation.
      val_targets: Target data for validation.
      val_sample_weights: Sample weight data for validation.
      epochs: Number of times to iterate over the data
      verbose: Verbosity mode, 0, 1 or 2
      callbacks: CallbackList instance. Controls callbacks during training.
      validation_steps: Number of steps to run validation for (only if doing
        validation from data tensors). Ignored with default value of `None`.
      do_validation: Boolean value indicating whether we should do validation.
      batch_size: int, val_inputs and val_targets will be evaled batch by
        batch with size batch_size if they are array.

  Raises:
      ValueError: In case of mismatch between given number of inputs and
        expectations of the model.
  """
  assert isinstance(inputs, iterator_ops.EagerIterator)

  # make sure either x,y or x,y,sample_weights is provided
  if (not isinstance(inputs.output_shapes, (list, tuple)) or
      len(inputs.output_shapes) not in (2, 3)):
    raise ValueError('Please provide either inputs and targets '
                     'or inputs, targets, and sample_weights')

  for step_index in range(steps_per_epoch):
    batch_logs = {'batch': step_index, 'size': 1}
    callbacks.on_batch_begin(step_index, batch_logs)

    # Get data from the iterator.
    try:
      next_element = inputs.get_next()
    except errors.OutOfRangeError:
      logging.warning(
          'Your dataset iterator ran out of data; interrupting training. Make '
          'sure that your dataset can generate at least '
          '`steps_per_epoch * epochs` batches (in this case, %d batches). You '
          'may need to use the repeat() function when building your '
          'dataset.' % steps_per_epoch * epochs)
      break

    if len(inputs.output_shapes) == 2:
      x, y = next_element
      sample_weights = None
    else:
      x, y, sample_weights = next_element

    # Validate and standardize data.
    x, y, sample_weights = model._standardize_user_data(
        x, y, sample_weight=sample_weights, class_weight=class_weight)
    x = training_utils.cast_if_floating_dtype(x)
    y = training_utils.cast_if_floating_dtype(y)
    if sample_weights:
      sample_weights = [
          training_utils.cast_if_floating_dtype(
              ops.convert_to_tensor(val, dtype=backend.floatx()))
          if val is not None else None for val in sample_weights
      ]

    # Set stateful_metrics in callbacks. We do not do this before the
    # `steps_per_epoch` loop because model will be compiled only in the first
    # iteration of this loop in the deferred build scenario.
    if step_index == 0:
      for cbk in callbacks:
        if (isinstance(cbk, cbks.BaseLogger) or
            isinstance(cbk, cbks.ProgbarLogger)):
          cbk.stateful_metrics = model.stateful_metric_names

    if step_index == 0 and not callbacks.params['metrics']:
      callback_metrics = copy.copy(model.metrics_names)
      if do_validation:
        callback_metrics += ['val_' + n for n in model.metrics_names]
      callbacks.set_params({
          'batch_size': batch_size,
          'epochs': epochs,
          'steps': steps_per_epoch,
          'verbose': verbose,
          'do_validation': do_validation,
          'metrics': callback_metrics or [],
          'validation_steps': validation_steps
      })

    # Train model.
    outs, loss, loss_metrics, masks = _process_single_batch(
        model, x, y, sample_weights=sample_weights, training=True)
    outs = generic_utils.to_list(outs)

    # Calculate metrics.
    for l, o in zip(model.metrics_names, outs):
      batch_logs[l] = o
    # Required for eager execution
    metrics_results = _eager_metrics_fn(
        model, outs, y, sample_weights=sample_weights, masks=masks)
    batch_logs['loss'] = tensor_util.constant_value(backend.mean(loss))

    for k, v in zip(model.metrics_names,
                    [backend.mean(loss)] + loss_metrics + metrics_results):
      batch_logs[k] = tensor_util.constant_value(v)
    callbacks.on_batch_end(step_index, batch_logs)
    if callbacks.model.stop_training:
      break

    if step_index == steps_per_epoch - 1:
      if do_validation:
        val_outs = test_loop(
            model,
            val_inputs,
            val_targets,
            sample_weights=val_sample_weights,
            steps=validation_steps,
            verbose=0,
            batch_size=batch_size)
        if not isinstance(val_outs, list):
          val_outs = [val_outs]
        # Same labels assumed.
        for l, o in zip(model.metrics_names, val_outs):
          epoch_logs['val_' + l] = o
def test_on_batch(model,
                  inputs,
                  targets,
                  sample_weights=None,
                  reset_metrics=True,
                  output_loss_metrics=None):
  """Calculates the loss for one input batch.

  Arguments:
      model: Model whose loss has to be calculated.
      inputs: Input batch data.
      targets: Target batch data.
      sample_weights: Sample weight batch data.
      reset_metrics: If `True`, the metrics returned will be only for this
        batch. If `False`, the metrics will be statefully accumulated across
        batches.
      output_loss_metrics: List of metrics that are used to aggregated output
        loss values.

  Returns:
      total loss, loss and metrics associated with each output.
  """
  if isinstance(inputs, collections.Sequence):
    if len(inputs) and tensor_util.is_tensor(inputs[0]):
      inputs = training_utils.cast_if_floating_dtype(inputs)
      targets = training_utils.cast_if_floating_dtype(targets)
    else:
      inputs = training_utils.cast_if_floating_dtype(
          [ops.convert_to_tensor(val) for val in inputs])
      targets = training_utils.cast_if_floating_dtype(
          [ops.convert_to_tensor(val) for val in targets])
  if sample_weights:
    sample_weights = [
        training_utils.cast_if_floating_dtype(ops.convert_to_tensor(val))
        if val is not None else None for val in sample_weights
    ]
  outs, total_loss, output_losses, aggregated_output_losses, masks = (
      _model_loss(
          model,
          inputs,
          targets,
          sample_weights=sample_weights,
          training=False,
          output_loss_metrics=output_loss_metrics))
  if not isinstance(outs, list):
    outs = [outs]
  metrics_results = _eager_metrics_fn(
      model,
      outs,
      targets,
      sample_weights=sample_weights,
      masks=masks,
      return_stateful_result=not reset_metrics)
  total_loss = nest.flatten(total_loss)
  if reset_metrics:
    final_output_losses = output_losses
  else:
    final_output_losses = aggregated_output_losses
  results = total_loss + final_output_losses + metrics_results

  return [tensor_util.constant_value(v) for v in results]
Exemple #19
0
def iterator_test_loop(model, inputs, steps, verbose=0):
    """Test function for eager execution when input is given as dataset iterator.

  Arguments:
      model: Model instance that is being evaluated in Eager mode.
      inputs: Input dataset iterator.
      steps: Total number of steps (batches of samples) before declaring
      predictions finished.
      verbose: Verbosity mode.

  Returns:
      Scalar loss (if the model has a single output and no metrics)
      or list of scalars (if the model has multiple outputs
      and/or metrics). The attribute `model.metrics_names` will give you
      the display labels for the scalar outputs.

  Raises:
      ValueError: In case of mismatch between given number of inputs and
        expectations of the model.
  """
    assert isinstance(inputs, iterator_ops.EagerIterator)
    # make sure either x,y or x,y,sample_weights is provided
    if (not isinstance(inputs.output_shapes, (list, tuple))
            or len(inputs.output_shapes) < 2 or len(inputs.output_shapes) > 3):
        raise ValueError('Please provide either inputs and targets'
                         'or inputs, targets, and sample_weights')
    outs = []
    num_samples = 0
    if verbose == 1:
        progbar = generic_utils.Progbar(target=steps)
    for step_index in range(steps):
        # Get data from the iterator.
        try:
            next_element = inputs.get_next()
        except errors.OutOfRangeError:
            logging.warning(
                'Your dataset iterator ran out of data interrupting testing. '
                'Make sure that your dataset can generate at least `steps` batches '
                '(in this case, %d batches).', steps)
            break

        if len(inputs.output_shapes) == 2:
            x, y = next_element
            sample_weights = None
        else:
            x, y, sample_weights = next_element

        # Validate and standardize data.
        x, y, sample_weights = model._standardize_user_data(x, y)
        x = training_utils.cast_if_floating_dtype(x)
        y = training_utils.cast_if_floating_dtype(y)

        # Calculate model output, loss values.
        loss_outs, loss, loss_metrics = _model_loss(
            model, x, y, sample_weights=sample_weights, training=False)
        metrics_results = _eager_metrics_fn(model, loss_outs, y)
        batch_outs = []
        for _, v in zip(model.metrics_names,
                        [backend.mean(loss)] + loss_metrics + metrics_results):
            batch_outs.append(tensor_util.constant_value(v))

        # Get current step size.
        if isinstance(x, list):
            step_size = x[0].get_shape().as_list()[0]
        else:
            step_size = x.get_shape().as_list()[0]

        # Accumulate results in output array.
        if not isinstance(batch_outs, list):
            batch_outs = [batch_outs]
        if step_index == 0:
            for _ in enumerate(batch_outs):
                outs.append(0.)
        for i, batch_out in enumerate(batch_outs):
            outs[i] += batch_out * step_size

        # Calculate sample size.
        num_samples += step_size
        if verbose == 1:
            progbar.update(step_index + 1)

    for i in range(len(outs)):
        outs[i] /= num_samples
    if len(outs) == 1:
        return outs[0]
    return outs
Exemple #20
0
def iterator_fit_loop(model,
                      inputs,
                      class_weight,
                      steps_per_epoch,
                      callback_model,
                      out_labels,
                      epoch_logs,
                      val_inputs=None,
                      val_targets=None,
                      val_sample_weights=None,
                      epochs=1,
                      verbose=1,
                      callbacks=None,
                      callback_metrics=None,
                      validation_steps=None,
                      do_validation=False,
                      batch_size=None):
    """Fit function for eager execution when input is given as dataset iterator.

  Updates the given epoch logs.

  Arguments:
      model: Instance of the `Model`.
      inputs: Input dataset iterator.
      class_weight: Optional class-weight array to weight the importance of
          samples in `inputs` based on the class they belong to, as conveyed by
          the targets from the `inputs` iterator.
      steps_per_epoch: Total number of steps (batches of samples)
          before declaring one epoch finished and starting the
          next epoch.
      callback_model: Instance of `Model` to callback.
      out_labels: Output labels generated from model metric names.
      epoch_logs: Dictionary of logs from every epoch.
      val_inputs: Input data for validation.
      val_targets: Target data for validation.
      val_sample_weights: Sample weight data for validation.
      epochs: Number of times to iterate over the data
      verbose: Verbosity mode, 0, 1 or 2
      callbacks: List of callbacks to be called during training
      callback_metrics: List of strings, the display names of the metrics
          passed to the callbacks. They should be the
          concatenation of list the display names of the outputs of
           `f` and the list of display names of the outputs of `f_val`.
      validation_steps: Number of steps to run validation for (only if doing
        validation from data tensors). Ignored with default value of `None`.
      do_validation: Boolean value indicating whether we should do validation.
      batch_size: int, val_inputs and val_targets will be evaled batch by
        batch with size batch_size if they are array.

  Raises:
      ValueError: In case of mismatch between given number of inputs and
        expectations of the model.
  """
    assert isinstance(inputs, iterator_ops.EagerIterator)

    # make sure either x,y or x,y,sample_weights is provided
    if (not isinstance(inputs.output_shapes, (list, tuple))
            or len(inputs.output_shapes) not in (2, 3)):
        raise ValueError('Please provide either inputs and targets'
                         'or inputs, targets, and sample_weights')

    for step_index in range(steps_per_epoch):
        batch_logs = {'batch': step_index, 'size': 1}
        callbacks.on_batch_begin(step_index, batch_logs)

        # Get data from the iterator.
        try:
            next_element = inputs.get_next()
        except errors.OutOfRangeError:
            logging.warning(
                'Your dataset iterator ran out of data; '
                'interrupting training. Make sure that your dataset'
                ' can generate at least `steps_per_epoch * epochs` '
                'batches (in this case, %d batches).' % steps_per_epoch *
                epochs)
            break

        if len(inputs.output_shapes) == 2:
            x, y = next_element
            sample_weights = None
        else:
            x, y, sample_weights = next_element

        # Validate and standardize data.
        x, y, sample_weights = model._standardize_user_data(
            x, y, sample_weight=sample_weights, class_weight=class_weight)
        x = training_utils.cast_if_floating_dtype(x)
        y = training_utils.cast_if_floating_dtype(y)
        if sample_weights:
            sample_weights = [
                training_utils.cast_if_floating_dtype(
                    ops.convert_to_tensor(val, dtype=backend.floatx()))
                if val is not None else None for val in sample_weights
            ]

        if step_index == 0 and not callback_metrics:
            out_labels = model.metrics_names
            if do_validation:
                callback_metrics = copy.copy(out_labels) + [
                    'val_' + n for n in out_labels
                ]
            else:
                callback_metrics = copy.copy(out_labels)
            callbacks.set_params({
                'epochs': epochs,
                'steps': steps_per_epoch,
                'verbose': verbose,
                'do_validation': do_validation,
                'metrics': callback_metrics or [],
            })

        # Train model.
        outs, loss, loss_metrics = _process_single_batch(
            model, x, y, sample_weights=sample_weights, training=True)
        if not isinstance(outs, list):
            outs = [outs]

        # Calculate metrics.
        for l, o in zip(out_labels, outs):
            batch_logs[l] = o
        # Required for eager execution
        metrics_results = _eager_metrics_fn(model, outs, y)
        batch_logs['loss'] = tensor_util.constant_value(backend.mean(loss))

        for k, v in zip(model.metrics_names,
                        [backend.mean(loss)] + loss_metrics + metrics_results):
            batch_logs[k] = tensor_util.constant_value(v)
        callbacks.on_batch_end(step_index, batch_logs)
        if callback_model.stop_training:
            break

        if step_index == steps_per_epoch - 1:
            if do_validation:
                val_outs = test_loop(model,
                                     val_inputs,
                                     val_targets,
                                     sample_weights=val_sample_weights,
                                     steps=validation_steps,
                                     verbose=0,
                                     batch_size=batch_size)
                if not isinstance(val_outs, list):
                    val_outs = [val_outs]
                # Same labels assumed.
                for l, o in zip(out_labels, val_outs):
                    epoch_logs['val_' + l] = o
def iterator_fit_loop(model,
                      inputs,
                      class_weight,
                      steps_per_epoch,
                      callback_model,
                      out_labels,
                      epoch_logs,
                      val_inputs=None,
                      val_targets=None,
                      val_sample_weights=None,
                      epochs=1,
                      verbose=1,
                      callbacks=None,
                      callback_metrics=None,
                      validation_steps=None,
                      do_validation=False):
  """Fit function for eager execution when input is given as dataset iterator.

  Updates the given epoch logs.

  Arguments:
      model: Instance of the `Model`.
      inputs: Input dataset iterator.
      class_weight: Optional class-weight array to weight the importance of
          samples in `inputs` based on the class they belong to, as conveyed by
          the targets from the `inputs` iterator.
      steps_per_epoch: Total number of steps (batches of samples)
          before declaring one epoch finished and starting the
          next epoch.
      callback_model: Instance of `Model` to callback.
      out_labels: Output labels generated from model metric names.
      epoch_logs: Dictionary of logs from every epoch.
      val_inputs: Input data for validation.
      val_targets: Target data for validation.
      val_sample_weights: Sample weight data for validation.
      epochs: Number of times to iterate over the data
      verbose: Verbosity mode, 0, 1 or 2
      callbacks: List of callbacks to be called during training
      callback_metrics: List of strings, the display names of the metrics
          passed to the callbacks. They should be the
          concatenation of list the display names of the outputs of
           `f` and the list of display names of the outputs of `f_val`.
      validation_steps: Number of steps to run validation for (only if doing
        validation from data tensors). Ignored with default value of `None`.
      do_validation: Boolean value indicating whether we should do validation.

  Raises:
      ValueError: In case of mismatch between given number of inputs and
        expectations of the model.
  """
  assert isinstance(inputs, iterator_ops.EagerIterator)
  for step_index in range(steps_per_epoch):
    batch_logs = {}
    batch_logs['batch'] = step_index
    batch_logs['size'] = 1
    callbacks.on_batch_begin(step_index, batch_logs)

    # Get data from the iterator.
    try:
      next_element = inputs.get_next()
    except errors.OutOfRangeError:
      logging.warning(
          'Your dataset iterator ran out of data; '
          'interrupting training. Make sure that your dataset'
          ' can generate at least `steps_per_epoch * epochs` '
          'batches (in this case, %d batches).' % steps_per_epoch * epochs)
      break

    if not isinstance(next_element, (list, tuple)) or len(next_element) != 2:
      raise ValueError('Please provide data as a list or tuple of 2 elements '
                       ' - input and target pair. Received %s' % next_element)
    x, y = next_element

    # Validate and standardize data.
    x, y, sample_weights = model._standardize_user_data(
        x, y, class_weight=class_weight)
    x = training_utils.cast_if_floating_dtype(x)
    y = training_utils.cast_if_floating_dtype(y)
    if sample_weights:
      sample_weights = [
          ops.convert_to_tensor(val, dtype=backend.floatx())
          if val is not None else None for val in sample_weights
      ]

    if step_index == 0 and not callback_metrics:
      out_labels = model.metrics_names
      if do_validation:
        callback_metrics = copy.copy(out_labels) + [
            'val_' + n for n in out_labels
        ]
      else:
        callback_metrics = copy.copy(out_labels)
      callbacks.set_params({
          'epochs': epochs,
          'steps': steps_per_epoch,
          'verbose': verbose,
          'do_validation': do_validation,
          'metrics': callback_metrics or [],
      })

    # Train model.
    outs, loss, loss_metrics = _process_single_batch(
        model, x, y, sample_weights=sample_weights, training=True)
    if not isinstance(outs, list):
      outs = [outs]

    # Calculate metrics.
    for l, o in zip(out_labels, outs):
      batch_logs[l] = o
    # Required for eager execution
    metrics_results = _eager_metrics_fn(model, outs, y)
    batch_logs['loss'] = tensor_util.constant_value(backend.mean(loss))

    for k, v in zip(model.metrics_names,
                    [backend.mean(loss)] + loss_metrics + metrics_results):
      batch_logs[k] = tensor_util.constant_value(v)
    callbacks.on_batch_end(step_index, batch_logs)
    if callback_model.stop_training:
      break

    if step_index == steps_per_epoch - 1:
      if do_validation:
        val_outs = test_loop(
            model,
            val_inputs,
            val_targets,
            sample_weights=val_sample_weights,
            steps=validation_steps,
            verbose=0)
        if not isinstance(val_outs, list):
          val_outs = [val_outs]
        # Same labels assumed.
        for l, o in zip(out_labels, val_outs):
          epoch_logs['val_' + l] = o
def iterator_test_loop(model, inputs, steps, verbose=0):
  """Test function for eager execution when input is given as dataset iterator.

  Arguments:
      model: Model instance that is being evaluated in Eager mode.
      inputs: Input dataset iterator.
      steps: Total number of steps (batches of samples) before declaring
      predictions finished.
      verbose: Verbosity mode.

  Returns:
      Scalar loss (if the model has a single output and no metrics)
      or list of scalars (if the model has multiple outputs
      and/or metrics). The attribute `model.metrics_names` will give you
      the display labels for the scalar outputs.

  Raises:
      ValueError: In case of mismatch between given number of inputs and
        expectations of the model.
  """
  assert isinstance(inputs, iterator_ops.EagerIterator)
  outs = []
  num_samples = 0
  if verbose == 1:
    progbar = generic_utils.Progbar(target=steps)
  for step_index in range(steps):
    # Get data from the iterator.
    try:
      next_element = inputs.get_next()
    except errors.OutOfRangeError:
      logging.warning(
          'Your dataset iterator ran out of data interrupting testing. '
          'Make sure that your dataset can generate at least `steps` batches '
          '(in this case, %d batches).', steps)
      break

    if not isinstance(next_element, (list, tuple)) or len(next_element) != 2:
      raise ValueError('Please provide data as a list or tuple of 2 elements '
                       ' - input and target pair. Received %s' % next_element)
    x, y = next_element

    # Validate and standardize data.
    x, y, sample_weights = model._standardize_user_data(x, y)
    x = training_utils.cast_if_floating_dtype(x)
    y = training_utils.cast_if_floating_dtype(y)

    # Calculate model output, loss values.
    loss_outs, loss, loss_metrics = _model_loss(
        model, x, y, sample_weights=sample_weights, training=False)
    metrics_results = _eager_metrics_fn(model, loss_outs, y)
    batch_outs = []
    for _, v in zip(model.metrics_names,
                    [backend.mean(loss)] + loss_metrics + metrics_results):
      batch_outs.append(tensor_util.constant_value(v))

    # Get current step size.
    if isinstance(x, list):
      step_size = x[0].get_shape().as_list()[0]
    else:
      step_size = x.get_shape().as_list()[0]

    # Accumulate results in output array.
    if not isinstance(batch_outs, list):
      batch_outs = [batch_outs]
    if step_index == 0:
      for _ in enumerate(batch_outs):
        outs.append(0.)
    for i, batch_out in enumerate(batch_outs):
      outs[i] += batch_out * step_size

    # Calculate sample size.
    num_samples += step_size
    if verbose == 1:
      progbar.update(step_index + 1)

  for i in range(len(outs)):
    outs[i] /= num_samples
  if len(outs) == 1:
    return outs[0]
  return outs
Exemple #23
0
    def __init__(self,
                 input_shapes,
                 output_shape,
                 *args,
                 squash=True,
                 preprocessors=None,
                 name=None,
                 **kwargs):
        self._Serializable__initialize(locals())

        self._input_shapes = input_shapes
        self._output_shape = output_shape
        self._squash = squash
        self._name = name

        super(GaussianPolicy, self).__init__(*args, **kwargs)

        inputs_flat = create_inputs(input_shapes)
        preprocessors_flat = (flatten_input_structure(preprocessors)
                              if preprocessors is not None else tuple(
                                  None for _ in inputs_flat))

        assert len(inputs_flat) == len(preprocessors_flat), (
            inputs_flat, preprocessors_flat)

        preprocessed_inputs = [
            preprocessor(input_) if preprocessor is not None else input_
            for preprocessor, input_ in zip(preprocessors_flat, inputs_flat)
        ]

        float_inputs = tf.keras.layers.Lambda(
            lambda inputs: training_utils.cast_if_floating_dtype(inputs))(
                preprocessed_inputs)

        conditions = tf.keras.layers.Lambda(
            lambda inputs: tf.concat(inputs, axis=-1))(float_inputs)

        self.condition_inputs = inputs_flat

        shift_and_log_scale_diag = self._shift_and_log_scale_diag_net(
            output_size=output_shape[0] * 2, )(conditions)

        shift, log_scale_diag = tf.keras.layers.Lambda(
            lambda shift_and_log_scale_diag: tf.split(
                shift_and_log_scale_diag, num_or_size_splits=2, axis=-1))(
                    shift_and_log_scale_diag)

        log_scale_diag = tf.keras.layers.Lambda(
            lambda log_scale_diag: tf.clip_by_value(
                log_scale_diag, *SCALE_DIAG_MIN_MAX))(log_scale_diag)

        batch_size = tf.keras.layers.Lambda(lambda x: tf.shape(input=x)[0])(
            conditions)

        base_distribution = tfp.distributions.MultivariateNormalDiag(
            loc=tf.zeros(output_shape), scale_diag=tf.ones(output_shape))

        latents = tf.keras.layers.Lambda(lambda batch_size: base_distribution.
                                         sample(batch_size))(batch_size)

        self.latents_model = tf.keras.Model(self.condition_inputs, latents)
        self.latents_input = tf.keras.layers.Input(shape=output_shape,
                                                   name='latents')

        def raw_actions_fn(inputs):
            shift, log_scale_diag, latents = inputs
            bijector = tfp.bijectors.Affine(shift=shift,
                                            scale_diag=tf.exp(log_scale_diag))
            actions = bijector.forward(latents)
            return actions

        raw_actions = tf.keras.layers.Lambda(raw_actions_fn)(
            (shift, log_scale_diag, latents))

        raw_actions_for_fixed_latents = tf.keras.layers.Lambda(raw_actions_fn)(
            (shift, log_scale_diag, self.latents_input))

        squash_bijector = (SquashBijector()
                           if self._squash else tfp.bijectors.Identity())

        actions = tf.keras.layers.Lambda(lambda raw_actions: squash_bijector.
                                         forward(raw_actions))(raw_actions)
        self.actions_model = tf.keras.Model(self.condition_inputs, actions)

        actions_for_fixed_latents = tf.keras.layers.Lambda(
            lambda raw_actions: squash_bijector.forward(raw_actions))(
                raw_actions_for_fixed_latents)
        self.actions_model_for_fixed_latents = tf.keras.Model(
            (*self.condition_inputs, self.latents_input),
            actions_for_fixed_latents)

        deterministic_actions = tf.keras.layers.Lambda(
            lambda shift: squash_bijector.forward(shift))(shift)

        self.deterministic_actions_model = tf.keras.Model(
            self.condition_inputs, deterministic_actions)

        def log_pis_fn(inputs):
            shift, log_scale_diag, actions = inputs
            base_distribution = tfp.distributions.MultivariateNormalDiag(
                loc=tf.zeros(output_shape), scale_diag=tf.ones(output_shape))
            bijector = tfp.bijectors.Chain((
                squash_bijector,
                tfp.bijectors.Affine(shift=shift,
                                     scale_diag=tf.exp(log_scale_diag)),
            ))
            distribution = (
                tfp.distributions.ConditionalTransformedDistribution(
                    distribution=base_distribution, bijector=bijector))

            log_pis = distribution.log_prob(actions)[:, None]
            return log_pis

        self.actions_input = tf.keras.layers.Input(shape=output_shape,
                                                   name='actions')

        log_pis = tf.keras.layers.Lambda(log_pis_fn)(
            [shift, log_scale_diag, actions])

        log_pis_for_action_input = tf.keras.layers.Lambda(log_pis_fn)(
            [shift, log_scale_diag, self.actions_input])

        self.log_pis_model = tf.keras.Model(
            (*self.condition_inputs, self.actions_input),
            log_pis_for_action_input)

        self.diagnostics_model = tf.keras.Model(
            self.condition_inputs,
            (shift, log_scale_diag, log_pis, raw_actions, actions))
Exemple #24
0
def _model_loss(model,
                inputs,
                targets,
                output_loss_metrics=None,
                sample_weights=None,
                training=False):
    """Calculates the loss for a given model.

  Arguments:
      model: The model on which metrics are being calculated.
      inputs: Either a dictionary of inputs to the model or a list of input
        arrays.
      targets: List of target arrays.
      output_loss_metrics: List of metrics that are used to aggregated output
        loss values.
      sample_weights: Optional list of sample weight arrays.
      training: Whether the model should be run in inference or training mode.

  Returns:
     Returns the model output, total loss, loss value calculated using the
     specified loss function and masks for each output. The total loss includes
     regularization losses and applies masking and sample weighting
     to the loss value.
  """
    # TODO(psv): Dedup code here with graph mode prepare_total_loss() fn.
    # Used to keep track of the total loss value (stateless).
    # eg., total_loss = loss_weight_1 * output_1_loss_fn(...) +
    #                   loss_weight_2 * output_2_loss_fn(...) +
    #                   layer losses.
    total_loss = 0
    kwargs = {}
    if model._expects_training_arg:
        kwargs['training'] = training
    if len(inputs) == 1 and not isinstance(inputs, dict):
        inputs = inputs[0]

    # Allow mixed `NumPy` and `EagerTensor` input here.
    if any(
            isinstance(input_t, (np.ndarray, float, int))
            for input_t in nest.flatten(inputs)):
        inputs = nest.map_structure(ops.convert_to_tensor, inputs)

    outs = model(inputs, **kwargs)
    outs = nest.flatten(outs)

    if targets:
        targets = training_utils.cast_if_floating_dtype_and_mismatch(
            targets, outs)
    # TODO(sallymatson/psv): check if we should do same mismatch fix for weights
    if sample_weights:
        sample_weights = [
            training_utils.cast_if_floating_dtype(ops.convert_to_tensor(val))
            if val is not None else None for val in sample_weights
        ]

    masks = [getattr(t, '_keras_mask', None) for t in outs]
    targets = nest.flatten(targets)

    # Used to keep track of individual output losses.
    output_losses = []

    with backend.name_scope('loss'):
        loss_fns = [
            loss_fn for loss_fn in model.loss_functions if loss_fn is not None
        ]
        custom_losses = model.losses  # Regularization losses

        if not loss_fns and not custom_losses:
            if training:
                raise ValueError('The model cannot be trained '
                                 'because it has no loss to optimize.')
            else:
                raise ValueError('The model cannot be evaluated '
                                 'because it has no loss to compute.')

        for i, loss_fn in enumerate(loss_fns):
            weights = sample_weights[i] if sample_weights else None
            mask = masks[i]
            with backend.name_scope(model.output_names[i] + '_loss'):
                if mask is not None:
                    mask = math_ops.cast(mask, outs[i].dtype)
                    # Update weights with mask.
                    if weights is None:
                        weights = mask
                    else:
                        # Update dimensions of weights to match with mask if possible.
                        weights = math_ops.cast(weights, outs[i].dtype)
                        mask, _, weights = (
                            tf_losses_utils.squeeze_or_expand_dimensions(
                                mask, sample_weight=weights))
                        weights *= mask

                if hasattr(loss_fn, 'reduction'):
                    per_sample_losses = loss_fn.call(targets[i], outs[i])
                    weighted_losses = losses_utils.compute_weighted_loss(
                        per_sample_losses,
                        sample_weight=weights,
                        reduction=losses_utils.ReductionV2.NONE)
                    loss_reduction = loss_fn.reduction

                    # `AUTO` loss reduction defaults to `SUM_OVER_BATCH_SIZE` for all
                    # compile use cases.
                    if loss_reduction == losses_utils.ReductionV2.AUTO:
                        loss_reduction = losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE

                    # Compute the stateless loss value.
                    output_loss = losses_utils.reduce_weighted_loss(
                        weighted_losses, reduction=loss_reduction)
                else:
                    # Compute the stateless loss value for a custom loss class.
                    # Here we assume that the class takes care of loss reduction
                    # because if this class returns a vector value we cannot
                    # differentiate between use case where a custom optimizer
                    # expects a vector loss value vs unreduced per-sample loss value.
                    output_loss = loss_fn(targets[i],
                                          outs[i],
                                          sample_weight=weights)
                    loss_reduction = losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE

            # If the number of outputs is 1 then we don't append the loss metric
            # associated with each model output. When there are multiple outputs
            # associated with a model, each output's loss is calculated and returned
            # as part of the loss_metrics.
            if len(model.outputs) > 1:
                # Keep track of the stateful output loss result.
                output_losses.append(output_loss_metrics[i](output_loss))

            # Scale output loss for distribution. For custom losses we assume
            # reduction was mean.
            if loss_reduction == losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE:
                output_loss = losses_utils.scale_loss_for_distribution(
                    output_loss)
            total_loss += model._loss_weights_list[i] * output_loss

        # Add regularization losses
        if custom_losses:
            total_loss += losses_utils.scale_loss_for_distribution(
                math_ops.add_n(custom_losses))
    return outs, total_loss, output_losses, masks
Exemple #25
0
    def construct(self, inputs):
        """
        Set inputs and output shape according to inputs
        :param inputs: inputs data or data piece
        :return:
        """
        if not self.multiple_inputs:
            if isinstance(inputs, (list, tuple)):
                if tensor_util.is_tensor(inputs[0]):
                    dummy_output_values = self.call(
                        training_utils.cast_if_floating_dtype(inputs[:1]))
                else:
                    dummy_output_values = self.call([
                        ops.convert_to_tensor(v, dtype=K.floatx())
                        for v in inputs[:1]
                    ])
                # set dummy input values for inputs
                dummy_input_values = list(inputs[:1])
            else:
                if tensor_util.is_tensor(inputs):
                    dummy_output_values = self.call(
                        training_utils.cast_if_floating_dtype(inputs[:1]))
                else:
                    dummy_output_values = self.call(
                        ops.convert_to_tensor(inputs[:1], dtype=K.floatx()))
                # set dummy input values for inputs
                dummy_input_values = [inputs[:1]]
            # set output values
            if isinstance(dummy_output_values, (list, tuple)):
                dummy_output_values = list(dummy_output_values)
            else:
                dummy_output_values = [dummy_output_values]
        else:
            first_inputs = copy.copy(inputs)[0]
            if tensor_util.is_tensor(inputs):
                inputs = training_utils.cast_if_floating_dtype(inputs)
            else:
                inputs = ops.convert_to_tensor(inputs, dtype=K.floatx())
            inputs = tf.unstack(inputs[:, :1, :], axis=0)
            dummy_output_values = self.call(inputs, training=True)
            dummy_input_values = [first_inputs[:1]]
            # set output values
            if isinstance(dummy_output_values, (list, tuple)):
                dummy_output_values = list(dummy_output_values)
            else:
                dummy_output_values = [dummy_output_values]
        self.outputs = [
            base_layer.DeferredTensor(shape=(None for _ in v.shape),
                                      dtype=v.dtype)
            for v in dummy_output_values
        ]
        self.inputs = [
            base_layer.DeferredTensor(shape=(None for _ in v.shape),
                                      dtype=v.dtype)
            for v in dummy_input_values
        ]
        self.input_names = [
            'input_%d' % (i + 1) for i in range(len(dummy_input_values))
        ]
        self.output_names = [
            'output_%d' % (i + 1) for i in range(len(dummy_output_values))
        ]

        # self.call(tensor, training=True)
        self.built = True
        self.init()