示例#1
0
def batch_predict_loop(model, inputs, batch_size, verbose=0):
    """Predict function for eager execution when input is arrays or tensors.

  Arguments:
      model: Instance of `Model`.
      inputs: List of input arrays.
      batch_size: Integer batch size.
      verbose: Verbosity mode.

  Returns:
      Array of predictions (if the model has a single output)
      or list of arrays of predictions (if the model has multiple outputs).
  """
    outs = []
    num_samples = training_utils.check_num_samples(inputs, batch_size)
    if verbose == 1:
        progbar = generic_utils.Progbar(target=num_samples)
    batches = generic_utils.make_batches(num_samples, batch_size)
    index_array = np.arange(num_samples)
    for batch_index, (batch_start, batch_end) in enumerate(batches):
        batch_ids = index_array[batch_start:batch_end]
        inputs_batch = slice_arrays(inputs, batch_ids)

        inputs_batch = [
            ops.convert_to_tensor(val, dtype=backend.floatx())
            for val in inputs_batch
        ]

        if len(inputs_batch) == 1:
            if model._expects_training_arg:
                batch_outs = model.call(inputs_batch[0], training=False)
            else:
                batch_outs = model.call(inputs_batch[0])
        else:
            if model._expects_training_arg:
                batch_outs = model.call(inputs_batch, training=False)
            else:
                batch_outs = model.call(inputs_batch)

        if not isinstance(batch_outs, list):
            batch_outs = [batch_outs]
        if batch_index == 0:
            # Pre-allocate the results arrays.
            for batch_out in batch_outs:
                dims = batch_out.shape[1:].dims
                dims_list = [d.value for d in dims]
                shape = (num_samples, ) + tuple(dims_list)
                outs.append(
                    np.zeros(shape, dtype=batch_out.dtype.as_numpy_dtype))
        for i, batch_out in enumerate(batch_outs):
            outs[i][batch_start:batch_end] = batch_out
        if verbose == 1:
            progbar.update(batch_end)

    if len(outs) == 1:
        return outs[0]
    return outs
示例#2
0
def predict_loop(model, inputs, batch_size=32, verbose=0, steps=None):
    """Abstract method to loop over some data in batches.

  Arguments:
      model:
      inputs: List of input arrays.
      batch_size: integer batch size.
      verbose: verbosity mode.
      steps: Total number of steps (batches of samples)
          before declaring `_predict_loop` finished.
          Ignored with the default value of `None`.

  Returns:
      Array of predictions (if the model has a single output)
      or list of arrays of predictions
      (if the model has multiple outputs).
  """
    with backend.learning_phase_scope(0):
        num_samples = training_utils.check_num_samples(inputs, batch_size,
                                                       steps, 'steps')
        if verbose == 1:
            if steps is not None:
                progbar = generic_utils.Progbar(target=steps)
            else:
                progbar = generic_utils.Progbar(target=num_samples)

        outs = []
        batches = generic_utils.make_batches(num_samples, batch_size)
        index_array = np.arange(num_samples)
        for batch_index, (batch_start, batch_end) in enumerate(batches):
            batch_ids = index_array[batch_start:batch_end]
            inputs_batch = slice_arrays(inputs, batch_ids)

            inputs_batch = [
                ops.convert_to_tensor(val, dtype=backend.floatx())
                for val in inputs_batch
            ]

            if len(inputs_batch) == 1:
                if model._expects_training_arg:
                    batch_outs = model.call(inputs_batch[0], training=False)
                else:
                    batch_outs = model.call(inputs_batch[0])
            else:
                if model._expects_training_arg:
                    batch_outs = model.call(inputs_batch, training=False)
                else:
                    batch_outs = model.call(inputs_batch)

            if not isinstance(batch_outs, list):
                batch_outs = [batch_outs]
            if batch_index == 0:
                # Pre-allocate the results arrays.
                for batch_out in batch_outs:
                    dims = batch_out.shape[1:].dims
                    dims_list = [d.value for d in dims]
                    shape = (num_samples, ) + tuple(dims_list)
                    outs.append(
                        np.zeros(shape, dtype=batch_out.dtype.as_numpy_dtype))
            for i, batch_out in enumerate(batch_outs):
                outs[i][batch_start:batch_end] = batch_out
            if verbose == 1:
                progbar.update(batch_end)
        if len(outs) == 1:
            return outs[0]
        return outs
示例#3
0
def test_loop(model,
              inputs,
              targets,
              sample_weights=None,
              batch_size=None,
              verbose=0,
              steps=None):
    """Abstract method to loop over some data in batches.

  Arguments:
      model: Model instance that is being evaluated in Eager mode.
      inputs: List of input arrays.
      targets: List of target arrays.
      sample_weights: Optional list of sample weight arrays.
      batch_size: integer batch size or `None`.
      verbose: verbosity mode.
      steps: Total number of steps (batches of samples)
          before declaring predictions finished.
          Ignored with the default value of `None`.

  Returns:
      Scalar loss (if the model has a single output and no metrics)
      or list of scalars (if the model has multiple outputs
      and/or metrics). The attribute `model.metrics_names` will give you
      the display labels for the scalar outputs.
  """
    with backend.learning_phase_scope(0):
        feed_data = inputs + targets
        if sample_weights:
            feed_data += sample_weights
        num_samples = training_utils.check_num_samples(feed_data,
                                                       batch_size=batch_size,
                                                       steps=steps,
                                                       steps_name='steps')
        outs = []
        if verbose == 1:
            progbar = generic_utils.Progbar(target=num_samples)
        batches = generic_utils.make_batches(num_samples, batch_size)
        index_array = np.arange(num_samples)
        for batch_index, (batch_start, batch_end) in enumerate(batches):
            batch_ids = index_array[batch_start:batch_end]
            inputs_batch = slice_arrays(inputs, batch_ids)
            targets_batch = slice_arrays(targets, batch_ids)
            if sample_weights:
                sample_weights_batch = slice_arrays(sample_weights, batch_ids)
            else:
                sample_weights_batch = None

            inputs_batch = [
                ops.convert_to_tensor(val, dtype=backend.floatx())
                for val in inputs_batch
            ]
            targets_batch = [
                ops.convert_to_tensor(val, dtype=backend.floatx())
                for val in targets_batch
            ]
            if sample_weights:
                sample_weights_batch = [
                    ops.convert_to_tensor(val, dtype=backend.floatx())
                    if val is not None else None
                    for val in sample_weights_batch
                ]

            loss_outs, loss, loss_metrics = _model_loss(
                model,
                inputs_batch,
                targets_batch,
                sample_weights=sample_weights_batch,
                training=False)
            metrics_results = _eager_metrics_fn(model, loss_outs,
                                                targets_batch)
            batch_outs = []
            for _, v in zip(model.metrics_names, [backend.mean(loss)] +
                            loss_metrics + metrics_results):
                batch_outs.append(tensor_util.constant_value(v))

            if isinstance(batch_outs, list):
                if batch_index == 0:
                    for batch_out in enumerate(batch_outs):
                        outs.append(0.)
                for i, batch_out in enumerate(batch_outs):
                    outs[i] += batch_out * len(batch_ids)
            else:
                if batch_index == 0:
                    outs.append(0.)
                outs[0] += batch_outs * len(batch_ids)

            if verbose == 1:
                progbar.update(batch_end)
        for i in range(len(outs)):
            outs[i] /= num_samples
        if len(outs) == 1:
            return outs[0]
        return outs
示例#4
0
def iterator_predict_loop(model, inputs, steps, verbose=0):
    """Predict function for eager execution when input is dataset iterator.

  Arguments:
      model: Instance of `Model`.
      inputs: Input dataset iterator.
      steps: Total number of steps (batches of samples) before declaring
          `_predict_loop` finished.
      verbose: Verbosity mode.

  Returns:
      Array of predictions (if the model has a single output)
      or list of arrays of predictions (if the model has multiple outputs).

  Raises:
      ValueError: In case of mismatch between given number of inputs and
        expectations of the model.
  """
    assert isinstance(inputs, iterator_ops.EagerIterator)
    outs = []
    if verbose == 1:
        progbar = generic_utils.Progbar(target=steps)
    for step_index in range(steps):
        # Get data from the iterator.
        try:
            next_element = inputs.get_next()
        except errors.OutOfRangeError:
            logging.warning(
                'Your dataset iterator ran out of data; '
                'interrupting prediction. Make sure that your '
                'dataset can generate at least `steps` '
                'batches (in this case, %d batches).', steps)
            break

        if not isinstance(next_element,
                          (list, tuple)) or len(next_element) != 2:
            raise ValueError(
                'Please provide data as a list or tuple of 2 elements '
                ' - input and target pair. Received %s. We do not use the '
                '`target` value here.' % next_element)
        x, _ = next_element

        # Validate and standardize data.
        x, _, _ = model._standardize_user_data(x)

        if model._expects_training_arg:
            batch_outs = model.call(x[0] if len(x) == 1 else x, training=False)
        else:
            batch_outs = model.call(x[0] if len(x) == 1 else x)
        if not isinstance(batch_outs, list):
            batch_outs = [batch_outs]

        # We collect the results from every step and then concatenate them once
        # in the end. This is an expensive process. We are doing this because we
        # do not know the number of samples beforehand.
        if step_index == 0:
            for _ in batch_outs:
                outs.append([])
        for i, batch_out in enumerate(batch_outs):
            outs[i].append(backend.get_value(batch_out))

        if verbose == 1:
            progbar.update(step_index + 1)
    for i, out in enumerate(outs):
        outs[i] = np.concatenate(tuple(out), axis=0)
    if len(outs) == 1:
        return outs[0]
    return outs
示例#5
0
def batch_test_loop(model,
                    inputs,
                    targets,
                    batch_size,
                    sample_weights=None,
                    verbose=0):
    """Test function for eager execution when input is given as arrays or tensors.

  Arguments:
      model: Model instance that is being evaluated in Eager mode.
      inputs: List of input arrays.
      targets: List of target arrays.
      batch_size: Integer batch size.
      sample_weights: Optional list of sample weight arrays.
      verbose: Verbosity mode.

  Returns:
      Scalar loss (if the model has a single output and no metrics)
      or list of scalars (if the model has multiple outputs
      and/or metrics). The attribute `model.metrics_names` will give you
      the display labels for the scalar outputs.
  """
    outs = []
    feed_data = inputs + targets
    if sample_weights:
        feed_data += sample_weights
    num_samples = training_utils.check_num_samples(feed_data,
                                                   batch_size=batch_size)
    if verbose == 1:
        progbar = generic_utils.Progbar(target=num_samples)
    batches = generic_utils.make_batches(num_samples, batch_size)
    index_array = np.arange(num_samples)
    for batch_index, (batch_start, batch_end) in enumerate(batches):
        batch_ids = index_array[batch_start:batch_end]
        inputs_batch = slice_arrays(inputs, batch_ids)
        targets_batch = slice_arrays(targets, batch_ids)
        if sample_weights:
            sample_weights_batch = slice_arrays(sample_weights, batch_ids)
        else:
            sample_weights_batch = None

        inputs_batch = [
            ops.convert_to_tensor(val, dtype=backend.floatx())
            for val in inputs_batch
        ]
        targets_batch = [
            ops.convert_to_tensor(val, dtype=backend.floatx())
            for val in targets_batch
        ]
        if sample_weights:
            sample_weights_batch = [
                ops.convert_to_tensor(val, dtype=backend.floatx())
                if val is not None else None for val in sample_weights_batch
            ]

        loss_outs, loss, loss_metrics = _model_loss(
            model,
            inputs_batch,
            targets_batch,
            sample_weights=sample_weights_batch,
            training=False)
        metrics_results = _eager_metrics_fn(model, loss_outs, targets_batch)
        batch_outs = []
        for _, v in zip(model.metrics_names,
                        [backend.mean(loss)] + loss_metrics + metrics_results):
            batch_outs.append(tensor_util.constant_value(v))

        if isinstance(batch_outs, list):
            if batch_index == 0:
                for _ in enumerate(batch_outs):
                    outs.append(0.)
            for i, batch_out in enumerate(batch_outs):
                outs[i] += batch_out * len(batch_ids)
        else:
            if batch_index == 0:
                outs.append(0.)
            outs[0] += batch_outs * len(batch_ids)

        if verbose == 1:
            progbar.update(batch_end)

    for i in range(len(outs)):
        outs[i] /= num_samples
    if len(outs) == 1:
        return outs[0]
    return outs
示例#6
0
def iterator_test_loop(model, inputs, steps, verbose=0):
    """Test function for eager execution when input is given as dataset iterator.

  Arguments:
      model: Model instance that is being evaluated in Eager mode.
      inputs: Input dataset iterator.
      steps: Total number of steps (batches of samples) before declaring
      predictions finished.
      verbose: Verbosity mode.

  Returns:
      Scalar loss (if the model has a single output and no metrics)
      or list of scalars (if the model has multiple outputs
      and/or metrics). The attribute `model.metrics_names` will give you
      the display labels for the scalar outputs.

  Raises:
      ValueError: In case of mismatch between given number of inputs and
        expectations of the model.
  """
    assert isinstance(inputs, iterator_ops.EagerIterator)
    outs = []
    num_samples = 0
    if verbose == 1:
        progbar = generic_utils.Progbar(target=steps)
    for step_index in range(steps):
        # Get data from the iterator.
        try:
            next_element = inputs.get_next()
        except errors.OutOfRangeError:
            logging.warning(
                'Your dataset iterator ran out of data interrupting testing. '
                'Make sure that your dataset can generate at least `steps` batches '
                '(in this case, %d batches).', steps)
            break

        if not isinstance(next_element,
                          (list, tuple)) or len(next_element) != 2:
            raise ValueError(
                'Please provide data as a list or tuple of 2 elements '
                ' - input and target pair. Received %s' % next_element)
        x, y = next_element

        # Validate and standardize data.
        x, y, sample_weights = model._standardize_user_data(x, y)

        # Calculate model output, loss values.
        loss_outs, loss, loss_metrics = _model_loss(
            model, x, y, sample_weights=sample_weights, training=False)
        metrics_results = _eager_metrics_fn(model, loss_outs, y)
        batch_outs = []
        for _, v in zip(model.metrics_names,
                        [backend.mean(loss)] + loss_metrics + metrics_results):
            batch_outs.append(tensor_util.constant_value(v))

        # Get current step size.
        if isinstance(x, list):
            step_size = x[0].get_shape().as_list()[0]
        else:
            step_size = x.get_shape().as_list()[0]

        # Accumulate results in output array.
        if not isinstance(batch_outs, list):
            batch_outs = [batch_outs]
        if step_index == 0:
            for _ in enumerate(batch_outs):
                outs.append(0.)
        for i, batch_out in enumerate(batch_outs):
            outs[i] += batch_out * step_size

        # Calculate sample size.
        num_samples += step_size
        if verbose == 1:
            progbar.update(step_index + 1)

        for i in range(len(outs)):
            outs[i] /= num_samples
        if len(outs) == 1:
            return outs[0]
        return outs