コード例 #1
0
ファイル: training_utils_test.py プロジェクト: MFChunga/poo
 def test_single_thing_eager(self):
     with testing_utils.use_keras_tensors_scope(False):
         with context.eager_mode():
             a = np.ones(10, dtype=np.int32)
             model_inputs = training_utils.ModelInputs(a)
             self.assertEqual(['input_1'], model_inputs.get_input_names())
             val = model_inputs.get_symbolic_inputs()
             self.assertTrue(tf_utils.is_symbolic_tensor(val))
             vals = model_inputs.get_symbolic_inputs(
                 return_single_as_list=True)
             self.assertEqual(1, len(vals))
             self.assertTrue(tf_utils.is_symbolic_tensor(vals[0]))
             self.assertEqual(dtypes.int32, vals[0].dtype)
     with testing_utils.use_keras_tensors_scope(True):
         with context.eager_mode():
             a = np.ones(10, dtype=np.int32)
             model_inputs = training_utils.ModelInputs(a)
             self.assertEqual(['input_1'], model_inputs.get_input_names())
             val = model_inputs.get_symbolic_inputs()
             self.assertIsInstance(val, keras_tensor.KerasTensor)
             vals = model_inputs.get_symbolic_inputs(
                 return_single_as_list=True)
             self.assertEqual(1, len(vals))
             self.assertIsInstance(vals[0], keras_tensor.KerasTensor)
             self.assertEqual(dtypes.int32, vals[0].dtype)
コード例 #2
0
 def test_list(self):
   a = [np.ones(10), np.ones(20)]
   model_inputs = training_utils.ModelInputs(a)
   self.assertEqual(['input_1', 'input_2'], model_inputs.get_input_names())
   vals = model_inputs.get_symbolic_inputs()
   self.assertTrue(tensor_util.is_tensor(vals[0]))
   self.assertTrue(tensor_util.is_tensor(vals[1]))
コード例 #3
0
def _prepare_feed_values(model, inputs, mode):
    """Prepare feed values to the model execution function.

  Arguments:
    model: Model to prepare feed values for.
    inputs: An iterator of model inputs, targets, and sample_weights.
      model inputs may be lists, single values, or dicts mapping input feed
      names to values.
    mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT.

  Returns:
    Feed values for the model in the given mode. This is a tuple of
    the structure (inputs, targets, sample_weights), where each of
    (tuple, targets, sample_weights) may be a python list. Single values
    for inputs will always be wrapped in lists.
  """
    inputs, targets, sample_weights = _get_input_from_iterator(inputs)

    # When the inputs are dict, then we want to flatten it in the same order as
    # the input layers, such that the data are fed into the input layers in the
    # correct order.
    if isinstance(inputs, dict):
        inputs = [inputs[key] for key in model._feed_input_names]
    else:
        inputs = training_utils.ModelInputs(inputs).as_list()

    if mode == ModeKeys.PREDICT:
        sample_weights = []
        targets = []

    ins = [inputs, targets, sample_weights]
    return tuple(ins)
コード例 #4
0
 def test_dict(self):
   a = {'b': np.ones(10), 'a': np.ones(20)}
   model_inputs = training_utils.ModelInputs(a)
   self.assertEqual(['a', 'b'], model_inputs.get_input_names())
   vals = model_inputs.get_symbolic_inputs()
   self.assertTrue(tensor_util.is_tensor(vals['a']))
   self.assertTrue(tensor_util.is_tensor(vals['b']))
コード例 #5
0
ファイル: training_utils_test.py プロジェクト: MFChunga/poo
 def test_dict_eager(self):
     with testing_utils.use_keras_tensors_scope(False):
         with context.eager_mode():
             a = {'b': np.ones(10), 'a': np.ones(20)}
             model_inputs = training_utils.ModelInputs(a)
             self.assertEqual(['a', 'b'], model_inputs.get_input_names())
             vals = model_inputs.get_symbolic_inputs()
             self.assertTrue(tf_utils.is_symbolic_tensor(vals['a']))
             self.assertTrue(tf_utils.is_symbolic_tensor(vals['b']))
     with testing_utils.use_keras_tensors_scope(True):
         with context.eager_mode():
             a = {'b': np.ones(10), 'a': np.ones(20)}
             model_inputs = training_utils.ModelInputs(a)
             self.assertEqual(['a', 'b'], model_inputs.get_input_names())
             vals = model_inputs.get_symbolic_inputs()
             self.assertIsInstance(vals['a'], keras_tensor.KerasTensor)
             self.assertIsInstance(vals['b'], keras_tensor.KerasTensor)
コード例 #6
0
 def test_dict_eager(self):
   with context.eager_mode():
     a = {'b': np.ones(10), 'a': np.ones(20)}
     model_inputs = training_utils.ModelInputs(a)
     self.assertEqual(['a', 'b'], model_inputs.get_input_names())
     vals = model_inputs.get_symbolic_inputs()
     self.assertTrue(tf_utils.is_symbolic_tensor(vals['a']))
     self.assertTrue(tf_utils.is_symbolic_tensor(vals['b']))
コード例 #7
0
 def test_list_eager(self):
   with context.eager_mode():
     a = [np.ones(10), np.ones(20)]
     model_inputs = training_utils.ModelInputs(a)
     self.assertEqual(['input_1', 'input_2'], model_inputs.get_input_names())
     vals = model_inputs.get_symbolic_inputs()
     self.assertTrue(tf_utils.is_symbolic_tensor(vals[0]))
     self.assertTrue(tf_utils.is_symbolic_tensor(vals[1]))
コード例 #8
0
 def test_single_thing(self):
   a = np.ones(10)
   model_inputs = training_utils.ModelInputs(a)
   self.assertEqual(['input_1'], model_inputs.get_input_names())
   vals = model_inputs.get_symbolic_inputs()
   self.assertTrue(tensor_util.is_tensor(vals))
   vals = model_inputs.get_symbolic_inputs(return_single_as_list=True)
   self.assertEqual(1, len(vals))
   self.assertTrue(tensor_util.is_tensor(vals[0]))
コード例 #9
0
 def test_single_thing_eager(self):
   with context.eager_mode():
     a = np.ones(10)
     model_inputs = training_utils.ModelInputs(a)
     self.assertEqual(['input_1'], model_inputs.get_input_names())
     val = model_inputs.get_symbolic_inputs()
     self.assertTrue(tf_utils.is_symbolic_tensor(val))
     vals = model_inputs.get_symbolic_inputs(return_single_as_list=True)
     self.assertEqual(1, len(vals))
     self.assertTrue(tf_utils.is_symbolic_tensor(vals[0]))
コード例 #10
0
ファイル: training_utils_test.py プロジェクト: MFChunga/poo
 def test_list_eager(self):
     with testing_utils.use_keras_tensors_scope(False):
         with context.eager_mode():
             a = [np.ones(10), np.ones(20)]
             model_inputs = training_utils.ModelInputs(a)
             self.assertEqual(['input_1', 'input_2'],
                              model_inputs.get_input_names())
             vals = model_inputs.get_symbolic_inputs()
             self.assertTrue(tf_utils.is_symbolic_tensor(vals[0]))
             self.assertTrue(tf_utils.is_symbolic_tensor(vals[1]))
     with testing_utils.use_keras_tensors_scope(True):
         with context.eager_mode():
             a = [np.ones(10), np.ones(20)]
             model_inputs = training_utils.ModelInputs(a)
             self.assertEqual(['input_1', 'input_2'],
                              model_inputs.get_input_names())
             vals = model_inputs.get_symbolic_inputs()
             self.assertIsInstance(vals[0], keras_tensor.KerasTensor)
             self.assertIsInstance(vals[1], keras_tensor.KerasTensor)
コード例 #11
0
 def test_dict_eager(self):
   with context.eager_mode():
     a = {'b': np.ones(10), 'a': np.ones(20)}
     model_inputs = training_utils.ModelInputs(a)
     self.assertEquals(['a', 'b'], model_inputs.get_input_names())
     vals = model_inputs.get_input_values()
     self.assertAllEqual(np.ones(20), vals['a'])
     self.assertAllEqual(np.ones(10), vals['b'])
     self.assertTrue(tensor_util.is_tensor(vals['a']))
     self.assertTrue(tensor_util.is_tensor(vals['b']))
     vals = model_inputs.get_symbolic_inputs()
     self.assertTrue(isinstance(vals['a'], base_layer.DeferredTensor))
     self.assertTrue(isinstance(vals['b'], base_layer.DeferredTensor))
コード例 #12
0
 def test_single_thing_eager(self):
   with context.eager_mode():
     a = np.ones(10)
     model_inputs = training_utils.ModelInputs(a)
     self.assertEquals(['input_1'], model_inputs.get_input_names())
     vals = model_inputs.get_input_values()
     self.assertAllEqual(np.ones(10), vals)
     self.assertTrue(tensor_util.is_tensor(vals))
     vals = model_inputs.get_symbolic_inputs()
     self.assertTrue(isinstance(vals, base_layer.DeferredTensor))
     vals = model_inputs.get_symbolic_inputs(return_single_as_list=True)
     self.assertEquals(1, len(vals))
     self.assertTrue(isinstance(vals[0], base_layer.DeferredTensor))
コード例 #13
0
 def test_single_thing(self):
     a = np.ones(10)
     model_inputs = training_utils.ModelInputs(a)
     self.assertEquals(['input_1'], model_inputs.get_input_names())
     vals = model_inputs.get_input_values()
     self.assertAllEqual(np.ones(10), vals)
     self.assertFalse(tensor_util.is_tensor(vals))
     vals = model_inputs.get_symbolic_inputs()
     self.assertTrue(tensor_util.is_tensor(vals))
     vals = model_inputs.get_symbolic_inputs(return_single_as_list=True)
     self.assertEquals(1, len(vals))
     self.assertTrue(tensor_util.is_tensor(vals[0]))
     self.assertEqual(backend.floatx(), vals[0].dtype)
コード例 #14
0
 def test_list_eager(self):
   with context.eager_mode():
     a = [np.ones(10), np.ones(20)]
     model_inputs = training_utils.ModelInputs(a)
     self.assertEquals(['input_1', 'input_2'], model_inputs.get_input_names())
     vals = model_inputs.get_input_values()
     self.assertEqual(2, len(vals))
     self.assertAllEqual(np.ones(10), vals[0])
     self.assertAllEqual(np.ones(20), vals[1])
     self.assertTrue(tensor_util.is_tensor(vals[0]))
     self.assertTrue(tensor_util.is_tensor(vals[1]))
     vals = model_inputs.get_symbolic_inputs()
     self.assertTrue(isinstance(vals[0], base_layer.DeferredTensor))
     self.assertTrue(isinstance(vals[1], base_layer.DeferredTensor))
コード例 #15
0
def _prepare_feed_values(model, inputs, targets, sample_weights, mode):
    """Prepare feed values to the model execution function.

  Arguments:
    model: Model to prepare feed values for.
    inputs: List or dict of model inputs.
    targets: Optional list of model targets.
    sample_weights: Optional list of sample weight arrays.
    mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT.

  Returns:
    Feed values for the model in the given mode.
  """
    if model._distribution_strategy:
        if isinstance(inputs, (dataset_ops.DatasetV1, dataset_ops.DatasetV2)):
            inputs = distributed_training_utils.get_iterator(
                inputs, model._distribution_strategy)

        def get_distributed_inputs():
            return distributed_training_utils._prepare_feed_values(
                model, inputs, targets, sample_weights, mode)

        # In the eager case, we want to call the input method per step, so return
        # a lambda from here that can be called. Note that this is applicable only
        # in Distribution Strategy case as it follows the same code path for both
        # eager and graph modes.
        # TODO(priyag,omalleyt): Either we should move the training DS with
        # IteratorV2 to use training_generator code path, or figure out how to
        # set a symbolic Iterator out of a Dataset when in eager mode.
        if context.executing_eagerly():
            return get_distributed_inputs
        else:
            return get_distributed_inputs()

    if isinstance(
            inputs,
        (dataset_ops.DatasetV1, dataset_ops.DatasetV2, iterator_ops.Iterator)):
        inputs, targets, sample_weights = model._standardize_user_data(
            inputs, extract_tensors_from_dataset=True)

    inputs = training_utils.ModelInputs(inputs).as_list()
    targets = targets or []
    sample_weights = sample_weights or []
    ins = inputs + targets + sample_weights
    if mode == ModeKeys.TRAIN and not isinstance(K.symbolic_learning_phase(),
                                                 int):
        ins += [True]  # Add learning phase value.
    return ins
コード例 #16
0
def _prepare_feed_values(model, inputs, targets, sample_weights, mode):
    """Prepare feed values to the model execution function.

  Arguments:
    model: Model to prepare feed values for.
    inputs: List or dict of model inputs.
    targets: Optional list of model targets.
    sample_weights: Optional list of sample weight arrays.
    mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT.

  Returns:
    Feed values for the model in the given mode.
  """
    strategy = model._distribution_strategy
    inputs, targets, sample_weights = _get_input_from_iterator(inputs, model)
    if is_tpu_strategy(strategy):
        if sample_weights is not None:
            raise ValueError('TPUStrategy does not support sample weights.')

    # When the inputs are dict, then we want to flatten it in the same order as
    # the input layers, such that the data are fed into the input layers in the
    # correct order.
    if isinstance(inputs, dict):
        inputs = [inputs[key] for key in model._feed_input_names]
    if is_distributing_by_cloning(model):
        inputs = flatten_per_replica_values(strategy, inputs)
        targets = flatten_per_replica_values(strategy, targets)
        # Expand 1-dimensional inputs.
        # TODO(b/124535720): Remove once this standarize data logic is shared with
        # main flow.
        inputs, targets = nest.map_structure(
            training_utils.standardize_single_array, (inputs, targets))
    else:
        inputs = training_utils.ModelInputs(inputs).as_list()

    if mode == ModeKeys.PREDICT:
        sample_weights = []
        targets = []
    elif sample_weights is not None and is_distributing_by_cloning(model):
        if context.executing_eagerly() and not model._compile_distribution:
            raise NotImplementedError(
                '`sample_weight` is not supported when using '
                'tf.distribute.Strategy in eager mode and '
                'cloning=True.')
        sample_weights = flatten_per_replica_values(strategy, sample_weights)

    ins = [inputs, targets, sample_weights]
    return tuple(ins)
コード例 #17
0
def _prepare_feed_values(model, inputs, mode, strategy):
    """Prepare feed values to the model execution function.

  Arguments:
    model: Model to prepare feed values for.
    inputs: An iterator of model inputs, targets, and sample_weights.
      model inputs may be lists, single values, or dicts mapping input feed
      names to values.
    mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT.
    strategy: The current distribution strategy for the model.

  Returns:
    Feed values for the model in the given mode. This is a tuple of
    the structure (inputs, targets, sample_weights), where each of
    (tuple, targets, sample_weights) may be a python list. Single values
    for inputs will always be wrapped in lists.
  """
    # For predict, we need to extract the manually added batch_index first.
    with_batch_index = _should_add_batch_index_to_element(strategy, mode)

    inputs, targets, sample_weights, batch_index = _get_input_from_iterator(
        inputs, with_batch_index)

    # When the inputs are dict, then we want to flatten it in the same order as
    # the input layers, such that the data are fed into the input layers in the
    # correct order.
    if isinstance(inputs, dict):
        inputs = [inputs[key] for key in model._feed_input_names]
    else:
        inputs = training_utils.ModelInputs(inputs).as_list()

    if mode == ModeKeys.PREDICT:
        sample_weights = []
        targets = []

    ins = [inputs, targets, sample_weights]
    if batch_index is not None:
        ins.append(batch_index)
    return tuple(ins)
コード例 #18
0
def _prepare_feed_values(model, inputs, targets, sample_weights, mode):
    """Prepare feed values to the model execution function.

  Arguments:
    model: Model to prepare feed values for.
    inputs: List or dict of model inputs.
    targets: Optional list of model targets.
    sample_weights: Optional list of sample weight arrays.
    mode: One of 'train'/'test'/'predict'.

  Returns:
    Feed values for the model in the given mode.
  """
    if model._distribution_strategy:

        def get_distributed_inputs():
            return training_distributed._prepare_feed_values(
                model, inputs, targets, sample_weights, mode)

        # In the eager case, we want to call the input method per step, so return
        # a lambda from here that can be called. Note that this is applicable only
        # in Distribution Strategy case as it follows the same code path for both
        # eager and graph modes.
        # TODO(priyag,omalleyt): Either we should move the training DS with
        # EagerIterator to use training_generator code path, or figure out how to
        # set a symbolic Iterator out of a Dataset when in eager mode.
        if context.executing_eagerly():
            return get_distributed_inputs
        else:
            return get_distributed_inputs()

    inputs = training_utils.ModelInputs(inputs).as_list()
    targets = targets or []
    sample_weights = sample_weights or []
    ins = inputs + targets + sample_weights
    if mode == 'train' and not isinstance(K.symbolic_learning_phase(), int):
        ins += [True]
    return ins
コード例 #19
0
ファイル: training_arrays.py プロジェクト: qwerzou1/shibie
def _prepare_feed_values(model, inputs, targets, sample_weights, mode):
    """Prepare feed values to the model execution function.

  Arguments:
    model: Model to prepare feed values for.
    inputs: List or dict of model inputs.
    targets: Optional list of model targets.
    sample_weights: Optional list of sample weight arrays.
    mode: One of 'train'/'test'/'predict'.

  Returns:
    Feed values for the model in the given mode.
  """
    if model._distribution_strategy:
        return training_distributed._prepare_feed_values(
            model, inputs, targets, sample_weights, mode)
    inputs = training_utils.ModelInputs(inputs).as_list()
    targets = targets or []
    sample_weights = sample_weights or []
    ins = inputs + targets + sample_weights
    if mode == 'train' and not isinstance(K.symbolic_learning_phase(), int):
        ins += [True]
    return ins
コード例 #20
0
def fit_loop(model,
             inputs,
             targets,
             sample_weights=None,
             batch_size=None,
             epochs=100,
             verbose=1,
             callbacks=None,
             val_inputs=None,
             val_targets=None,
             val_sample_weights=None,
             shuffle=True,
             initial_epoch=0,
             steps_per_epoch=None,
             validation_steps=None):
    """Abstract fit function for arrays of data.

  Arguments:
      model: Keras Model instance.
      inputs: Either a list of arrays or a dictionary.
      targets: List of target arrays.
      sample_weights: Optional list of sample weight arrays.
      batch_size: Integer batch size or None if unknown.
      epochs: Number of times to iterate over the data
      verbose: Verbosity mode, 0, 1 or 2
      callbacks: List of callbacks to be called during training
      val_inputs: List of input arrays.
      val_targets: List of target arrays.
      val_sample_weights: Optional list of sample weight arrays.
      shuffle: Whether to shuffle the data at the beginning of each epoch
          concatenation of list the display names of the outputs of
           `f` and the list of display names of the outputs of `f_val`.
      initial_epoch: Epoch at which to start training
          (useful for resuming a previous training run)
      steps_per_epoch: Total number of steps (batches of samples)
          before declaring one epoch finished and starting the
          next epoch. Ignored with the default value of `None`.
      validation_steps: Number of steps to run validation for
          (only if doing validation from data tensors).
          Ignored with the default value of `None`.

  Returns:
      `History` object.

  Raises:
      ValueError: in case of invalid arguments.
  """
    model._make_fit_function()
    f = model._fit_function

    sample_weights = sample_weights or []
    val_sample_weights = val_sample_weights or []
    inputs = training_utils.ModelInputs(inputs).as_list()
    if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
        ins = inputs + targets + sample_weights + [1]
    else:
        ins = inputs + targets + sample_weights

    do_validation = False
    if val_inputs:
        do_validation = True
        if (steps_per_epoch is None and verbose and inputs
                and hasattr(inputs[0], 'shape')
                and hasattr(val_inputs[0], 'shape')):
            print('Train on %d samples, validate on %d samples' %
                  (inputs[0].shape[0], val_inputs[0].shape[0]))
    if validation_steps:
        do_validation = True
        if steps_per_epoch is None:
            raise ValueError('Can only use `validation_steps` '
                             'when doing step-wise '
                             'training, i.e. `steps_per_epoch` '
                             'must be set.')

    num_train_samples = training_utils.check_num_samples(
        ins, batch_size, steps_per_epoch, 'steps_per_epoch')
    count_mode = 'steps' if steps_per_epoch else 'samples'
    callbacks = cbks.configure_callbacks(callbacks,
                                         model,
                                         do_validation=do_validation,
                                         val_inputs=val_inputs,
                                         val_targets=val_targets,
                                         val_sample_weights=val_sample_weights,
                                         batch_size=batch_size,
                                         epochs=epochs,
                                         steps_per_epoch=steps_per_epoch,
                                         samples=num_train_samples,
                                         validation_steps=validation_steps,
                                         verbose=verbose,
                                         count_mode=count_mode)

    if num_train_samples is not None:
        index_array = np.arange(num_train_samples)

    # To prevent a slowdown, we find beforehand the arrays that need conversion.
    feed = model._feed_inputs + model._feed_targets + model._feed_sample_weights
    indices_for_conversion_to_dense = []
    for i in range(len(feed)):
        if issparse is not None and issparse(
                ins[i]) and not K.is_sparse(feed[i]):
            indices_for_conversion_to_dense.append(i)

    callbacks.on_train_begin()
    for epoch in range(initial_epoch, epochs):
        # Reset stateful metrics
        for m in model.stateful_metric_functions:
            m.reset_states()
        # Update callbacks
        callbacks.on_epoch_begin(epoch)
        epoch_logs = {}
        if steps_per_epoch is not None:
            # Step-wise fit loop.
            for step_index in range(steps_per_epoch):
                batch_logs = {'batch': step_index, 'size': 1}
                callbacks.on_batch_begin(step_index, batch_logs)
                try:
                    outs = f(ins)
                except errors.OutOfRangeError:
                    logging.warning(
                        'Your dataset iterator ran out of data; '
                        'interrupting training. Make sure that your dataset '
                        'can generate at least `steps_per_epoch * epochs` '
                        'batches (in this case, %d batches). You may need to'
                        'use the repeat() function when building your '
                        'dataset.' % steps_per_epoch * epochs)
                    break

                if not isinstance(outs, list):
                    outs = [outs]
                for l, o in zip(model.metrics_names, outs):
                    batch_logs[l] = o

                callbacks.on_batch_end(step_index, batch_logs)
                if callbacks.model.stop_training:
                    break

            if do_validation:
                val_outs = test_loop(model,
                                     val_inputs,
                                     val_targets,
                                     sample_weights=val_sample_weights,
                                     steps=validation_steps,
                                     verbose=0)
                if not isinstance(val_outs, list):
                    val_outs = [val_outs]
                # Same labels assumed.
                for l, o in zip(model.metrics_names, val_outs):
                    epoch_logs['val_' + l] = o
        else:
            # Sample-wise fit loop.
            if shuffle == 'batch':
                index_array = training_utils.batch_shuffle(
                    index_array, batch_size)
            elif shuffle:
                np.random.shuffle(index_array)

            batches = make_batches(num_train_samples, batch_size)

            for batch_index, (batch_start, batch_end) in enumerate(batches):
                batch_ids = index_array[batch_start:batch_end]
                try:
                    if isinstance(ins[-1], int):
                        # Do not slice the training phase flag.
                        ins_batch = slice_arrays(ins[:-1],
                                                 batch_ids) + [ins[-1]]
                    else:
                        ins_batch = slice_arrays(ins, batch_ids)
                except TypeError:
                    raise TypeError('TypeError while preparing batch. '
                                    'If using HDF5 input data, '
                                    'pass shuffle="batch".')
                batch_logs = {}
                batch_logs['batch'] = batch_index
                batch_logs['size'] = len(batch_ids)
                callbacks.on_batch_begin(batch_index, batch_logs)
                for i in indices_for_conversion_to_dense:
                    ins_batch[i] = ins_batch[i].toarray()

                outs = f(ins_batch)
                if not isinstance(outs, list):
                    outs = [outs]
                for l, o in zip(model.metrics_names, outs):
                    batch_logs[l] = o

                callbacks.on_batch_end(batch_index, batch_logs)
                if callbacks.model.stop_training:
                    break

                if batch_index == len(batches) - 1:  # Last batch.
                    if do_validation:
                        val_outs = test_loop(model,
                                             val_inputs,
                                             val_targets,
                                             sample_weights=val_sample_weights,
                                             batch_size=batch_size,
                                             verbose=0)
                        if not isinstance(val_outs, list):
                            val_outs = [val_outs]
                        # Same labels assumed.
                        for l, o in zip(model.metrics_names, val_outs):
                            epoch_logs['val_' + l] = o
        callbacks.on_epoch_end(epoch, epoch_logs)
        if callbacks.model.stop_training:
            break
    callbacks.on_train_end()
    return model.history
コード例 #21
0
def test_loop(model,
              inputs,
              targets,
              sample_weights=None,
              batch_size=None,
              verbose=0,
              steps=None):
    """Abstract method to loop over some data in batches.

  Arguments:
      model: Keras Model instance.
      inputs: List of input arrays.
      targets: List of target arrays.
      sample_weights: Optional list of sample weight arrays.
      batch_size: integer batch size or `None`.
      verbose: verbosity mode.
      steps: Total number of steps (batches of samples)
          before declaring predictions finished.
          Ignored with the default value of `None`.

  Returns:
      Scalar loss (if the model has a single output and no metrics)
      or list of scalars (if the model has multiple outputs
      and/or metrics). The attribute `model.metrics_names` will give you
      the display labels for the scalar outputs.
  """
    model._make_eval_function()
    f = model._eval_function

    sample_weights = sample_weights or []
    inputs = training_utils.ModelInputs(inputs).as_list()
    if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
        ins = inputs + targets + sample_weights + [0]
    else:
        ins = inputs + targets + sample_weights

    if hasattr(model, 'metrics'):
        for m in model.stateful_metric_functions:
            m.reset_states()

    num_samples = training_utils.check_num_samples(ins, batch_size, steps,
                                                   'steps')
    outs = []
    if verbose == 1:
        if steps is not None:
            progbar = Progbar(target=steps)
        else:
            progbar = Progbar(target=num_samples)

    # To prevent a slowdown, we find beforehand the arrays that need conversion.
    feed = model._feed_inputs + model._feed_targets + model._feed_sample_weights
    indices_for_conversion_to_dense = []
    for i in range(len(feed)):
        if issparse is not None and issparse(
                ins[i]) and not K.is_sparse(feed[i]):
            indices_for_conversion_to_dense.append(i)

    if steps is not None:
        for step in range(steps):
            batch_outs = f(ins)
            if isinstance(batch_outs, list):
                if step == 0:
                    for _ in enumerate(batch_outs):
                        outs.append(0.)
                outs[0] += batch_outs[0]  # index 0 = 'loss'
                outs[1:] = batch_outs[1:]
            else:
                if step == 0:
                    outs.append(0.)
                outs[0] += batch_outs
            if verbose == 1:
                progbar.update(step + 1)
        outs[0] /= steps
    else:
        batches = make_batches(num_samples, batch_size)
        index_array = np.arange(num_samples)
        for batch_index, (batch_start, batch_end) in enumerate(batches):
            batch_ids = index_array[batch_start:batch_end]
            if isinstance(ins[-1], int):
                # Do not slice the training phase flag.
                ins_batch = slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
            else:
                ins_batch = slice_arrays(ins, batch_ids)
            for i in indices_for_conversion_to_dense:
                ins_batch[i] = ins_batch[i].toarray()

            batch_outs = f(ins_batch)

            if isinstance(batch_outs, list):
                if batch_index == 0:
                    outs.extend([0.] * len(batch_outs))
                outs[0] += batch_outs[0] * len(batch_ids)  # index 0 = 'loss'
                outs[1:] = batch_outs[1:]
            else:
                if batch_index == 0:
                    outs.append(0.)
                outs[0] += batch_outs * len(batch_ids)
            if verbose == 1:
                progbar.update(batch_end)
        outs[0] /= num_samples
    if len(outs) == 1:
        return outs[0]
    return outs
コード例 #22
0
def predict_loop(model, inputs, batch_size=32, verbose=0, steps=None):
    """Abstract method to loop over some data in batches.

  Arguments:
      model: Keras Model instance.
      inputs: list of tensors to be fed to `f`.
      batch_size: integer batch size.
      verbose: verbosity mode.
      steps: Total number of steps (batches of samples)
          before declaring `_predict_loop` finished.
          Ignored with the default value of `None`.

  Returns:
      Array of predictions (if the model has a single output)
      or list of arrays of predictions
      (if the model has multiple outputs).
  """
    model._make_predict_function()
    f = model.predict_function

    inputs = training_utils.ModelInputs(inputs).as_list()
    if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
        ins = inputs + [0]
    else:
        ins = inputs

    num_samples = training_utils.check_num_samples(inputs, batch_size, steps,
                                                   'steps')
    if verbose == 1:
        if steps is not None:
            progbar = Progbar(target=steps)
        else:
            progbar = Progbar(target=num_samples)

    indices_for_conversion_to_dense = []
    for i in range(len(model._feed_inputs)):
        if (issparse is not None and issparse(inputs[i])
                and not K.is_sparse(model._feed_inputs[i])):
            indices_for_conversion_to_dense.append(i)

    if steps is not None:
        # Step-based predictions.
        # Since we do not know how many samples
        # we will see, we cannot pre-allocate
        # the returned Numpy arrays.
        # Instead, we store one array per batch seen
        # and concatenate them upon returning.
        unconcatenated_outs = []
        for step in range(steps):
            batch_outs = f(ins)
            if not isinstance(batch_outs, list):
                batch_outs = [batch_outs]
            if step == 0:
                for batch_out in batch_outs:
                    unconcatenated_outs.append([])
            for i, batch_out in enumerate(batch_outs):
                unconcatenated_outs[i].append(batch_out)
            if verbose == 1:
                progbar.update(step + 1)
        if len(unconcatenated_outs) == 1:
            return np.concatenate(unconcatenated_outs[0], axis=0)
        return [
            np.concatenate(unconcatenated_outs[i], axis=0)
            for i in range(len(unconcatenated_outs))
        ]
    else:
        # Sample-based predictions.
        outs = []
        batches = make_batches(num_samples, batch_size)
        index_array = np.arange(num_samples)
        for batch_index, (batch_start, batch_end) in enumerate(batches):
            batch_ids = index_array[batch_start:batch_end]
            if ins and isinstance(ins[-1], int):
                # Do not slice the training phase flag.
                ins_batch = slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
            else:
                ins_batch = slice_arrays(ins, batch_ids)
            for i in indices_for_conversion_to_dense:
                ins_batch[i] = ins_batch[i].toarray()

            batch_outs = f(ins_batch)
            if not isinstance(batch_outs, list):
                batch_outs = [batch_outs]
            if batch_index == 0:
                # Pre-allocate the results arrays.
                for batch_out in batch_outs:
                    shape = (num_samples, ) + batch_out.shape[1:]
                    outs.append(np.zeros(shape, dtype=batch_out.dtype))
            for i, batch_out in enumerate(batch_outs):
                outs[i][batch_start:batch_end] = batch_out
            if verbose == 1:
                progbar.update(batch_end)
        if len(outs) == 1:
            return outs[0]
        return outs
コード例 #23
0
def model_iteration(model,
                    inputs,
                    targets=None,
                    sample_weights=None,
                    batch_size=None,
                    epochs=1,
                    verbose=1,
                    callbacks=None,
                    val_inputs=None,
                    val_targets=None,
                    val_sample_weights=None,
                    shuffle=True,
                    initial_epoch=0,
                    steps_per_epoch=None,
                    validation_steps=None,
                    mode='train',
                    **kwargs):
    """Loop function for arrays of data with modes 'train'/'test'/'predict'.

  Arguments:
      model: Keras Model instance.
      inputs: Either a list of arrays or a dictionary.
      targets: List of target arrays.
      sample_weights: Optional list of sample weight arrays.
      batch_size: Integer batch size or None if unknown.
      epochs: Number of times to iterate over the data
      verbose: Verbosity mode, 0, 1 or 2
      callbacks: List of callbacks to be called during training
      val_inputs: List of input arrays.
      val_targets: List of target arrays.
      val_sample_weights: Optional list of sample weight arrays.
      shuffle: Whether to shuffle the data at the beginning of each epoch
        concatenation of list the display names of the outputs of `f` and the
        list of display names of the outputs of `f_val`.
      initial_epoch: Epoch at which to start training (useful for resuming a
        previous training run)
      steps_per_epoch: Total number of steps (batches of samples) before
        declaring one epoch finished and starting the next epoch. Ignored with
        the default value of `None`.
      validation_steps: Number of steps to run validation for (only if doing
        validation from data tensors). Ignored with the default value of `None`.
      mode: One of 'train'/'test'/'predict'.
      **kwargs: Additional arguments for backwards compatibility.

  Returns:
      - In 'train' mode: `History` object.
      - In 'test' mode: Evaluation metrics.
      - In 'predict' mode: Outputs of the Model called on inputs.

  Raises:
      ValueError: in case of invalid arguments.
  """
    # Backwards compatibility.
    if 'steps' in kwargs:
        steps_per_epoch = kwargs['steps']

    _validate_arguments(steps_per_epoch, validation_steps, kwargs)
    if mode == 'train':
        _print_train_info(inputs, val_inputs, steps_per_epoch, verbose)

    # Get step function and loop type.
    f = model._get_execution_function(mode)
    use_steps = steps_per_epoch is not None
    do_validation = val_inputs is not None

    # Prepare input data.
    inputs = training_utils.ModelInputs(inputs).as_list()
    targets = targets or []
    sample_weights = sample_weights or []
    learning_phase_input = []
    if not isinstance(K.symbolic_learning_phase(), int):
        learning_phase_input = [True] if mode == 'train' else [False]
    ins = inputs + targets + sample_weights + learning_phase_input
    num_samples_or_steps = _get_num_samples_or_steps(ins, batch_size,
                                                     steps_per_epoch)

    # Configure callbacks.
    count_mode = 'steps' if use_steps else 'samples'
    callbacks = cbks.configure_callbacks(
        callbacks,
        model,
        do_validation=do_validation,
        val_inputs=val_inputs,
        val_targets=val_targets,
        val_sample_weights=val_sample_weights,
        batch_size=batch_size,
        epochs=epochs,
        steps_per_epoch=steps_per_epoch,
        samples=num_samples_or_steps,
        validation_steps=validation_steps,
        verbose=0,  # Handle ProgBarLogger separately in this loop.
        count_mode=count_mode,
        mode=mode)
    # TODO(omalleyt): Handle ProgBar as part of Callbacks once hooks are ready.
    progbar = _get_progbar(model, count_mode)
    progbar.params = callbacks.params
    progbar.params['verbose'] = verbose

    # Find beforehand arrays that need sparse-to-dense conversion.
    if issparse is not None:
        indices_for_conversion_to_dense = []
        feed = _get_model_feed(model, mode)
        for i, (input_data, feed_tensor) in enumerate(zip(ins, feed)):
            if issparse(input_data) and not K.is_sparse(feed_tensor):
                indices_for_conversion_to_dense.append(i)

    # Select aggregation method.
    if mode == 'predict':
        aggregator = OutputsAggregator(use_steps, num_samples_or_steps)
    else:
        aggregator = MetricsAggregator(use_steps, num_samples_or_steps)

    callbacks.model.stop_training = False
    callbacks._call_begin_hook(mode)
    progbar.on_train_begin()
    for epoch in range(initial_epoch, epochs):
        if callbacks.model.stop_training:
            break

        # Setup work for each epoch
        results = []
        epoch_logs = {}
        if hasattr(model, 'stateful_metric_functions'):
            for m in model.stateful_metric_functions:
                m.reset_states()
        callbacks.on_epoch_begin(epoch, epoch_logs, mode=mode)
        progbar.on_epoch_begin(epoch, epoch_logs)

        if use_steps:
            # Step-wise loop.
            for step in range(steps_per_epoch):
                batch_logs = {'batch': step, 'size': 1}
                callbacks._call_batch_hook(mode, 'begin', step, batch_logs)
                progbar.on_batch_begin(step, batch_logs)

                # Get outputs.
                try:
                    batch_outs = f(ins)
                except errors.OutOfRangeError:
                    logging.warning(
                        'Your dataset iterator ran out of data; '
                        'interrupting training. Make sure that your dataset '
                        'can generate at least `steps_per_epoch * epochs` '
                        'batches (in this case, %d batches). You may need to'
                        'use the repeat() function when building your '
                        'dataset.' % steps_per_epoch * epochs)
                    break
                if not isinstance(batch_outs, list):
                    batch_outs = [batch_outs]

                # Aggregate results.
                if step == 0:
                    aggregator.create(batch_outs)
                aggregator.aggregate(batch_outs)

                # Callbacks batch end.
                batch_logs.update(_make_logs(model, batch_outs, mode))
                callbacks._call_batch_hook(mode, 'end', step, batch_logs)
                progbar.on_batch_end(step, batch_logs)

                if callbacks.model.stop_training:
                    break
        else:
            # Sample-wise loop.
            index_array = np.arange(num_samples_or_steps)
            if shuffle == 'batch':
                index_array = training_utils.batch_shuffle(
                    index_array, batch_size)
            elif shuffle:
                np.random.shuffle(index_array)
            batches = make_batches(num_samples_or_steps, batch_size)

            for batch_index, (batch_start, batch_end) in enumerate(batches):
                batch_ids = index_array[batch_start:batch_end]

                # Slice into a batch.
                try:
                    if ins and isinstance(ins[-1], int):
                        # Do not slice the training phase flag.
                        ins_batch = slice_arrays(ins[:-1],
                                                 batch_ids) + [ins[-1]]
                    else:
                        ins_batch = slice_arrays(ins, batch_ids)
                except TypeError:
                    raise TypeError('TypeError while preparing batch. '
                                    'If using HDF5 input data, '
                                    'pass shuffle="batch".')

                # Sparse to dense conversion.
                if issparse is not None:
                    for i in indices_for_conversion_to_dense:
                        ins_batch[i] = ins_batch[i].toarray()

                # Callbacks batch_begin.
                batch_logs = {'batch': batch_index, 'size': len(batch_ids)}
                callbacks._call_batch_hook(mode, 'begin', batch_index,
                                           batch_logs)
                progbar.on_batch_begin(batch_index, batch_logs)

                # Get outputs.
                batch_outs = f(ins_batch)
                if not isinstance(batch_outs, list):
                    batch_outs = [batch_outs]

                # Aggregate results.
                if batch_index == 0:
                    aggregator.create(batch_outs)
                aggregator.aggregate(batch_outs, batch_start, batch_end)

                # Callbacks batch end.
                batch_logs.update(_make_logs(model, batch_outs, mode))
                callbacks._call_batch_hook(mode, 'end', batch_index,
                                           batch_logs)
                progbar.on_batch_end(batch_index, batch_logs)

                if callbacks.model.stop_training:
                    break

        aggregator.finalize()
        results = aggregator.results
        epoch_logs.update(_make_logs(model, results, mode))
        if len(results) == 1:
            results = results[0]

        # Run the test loop every epoch during training.
        if do_validation and not callbacks.model.stop_training:
            val_results = model_iteration(model,
                                          val_inputs,
                                          targets=val_targets,
                                          sample_weights=val_sample_weights,
                                          batch_size=batch_size,
                                          steps_per_epoch=validation_steps,
                                          callbacks=callbacks,
                                          verbose=0,
                                          mode='test')
            if not isinstance(val_results, list):
                val_results = [val_results]
            epoch_logs.update(
                _make_logs(model, val_results, mode, prefix='val_'))

        callbacks.on_epoch_end(epoch, epoch_logs, mode=mode)
        progbar.on_epoch_end(epoch, epoch_logs)
    callbacks._call_end_hook(mode)

    if mode == 'train':
        return model.history
    return results