def test_loop(model, inputs, targets, sample_weights=None, batch_size=None, verbose=0, steps=None): """Test function for eager execution. Arguments: model: Model instance that is being evaluated in Eager mode. inputs: List of input arrays. targets: List of target arrays. sample_weights: Optional list of sample weight arrays. batch_size: integer batch size or `None`. verbose: verbosity mode. steps: Total number of steps (batches of samples) before declaring predictions finished. Ignored with the default value of `None`. Returns: Scalar loss (if the model has a single output and no metrics) or list of scalars (if the model has multiple outputs and/or metrics). The attribute `model.metrics_names` will give you the display labels for the scalar outputs. """ inputs, steps = training_utils.convert_to_iterator( x=inputs, y=targets, sample_weights=sample_weights, batch_size=batch_size, steps_per_epoch=steps, is_validation=True) with backend.learning_phase_scope(0): return iterator_test_loop(model, inputs, steps, verbose=verbose)
def test_convert_to_iterator_single_tensor(self): batch_size = 2 a = ops.convert_to_tensor(np.ones([10, 10])) iterator, steps_per_epoch = training_utils.convert_to_iterator( x=a, batch_size=batch_size) self.assertEquals(steps_per_epoch, 5) expected_batch = a[:batch_size, :] actual_batch, = iterator.get_next() self.assertAllEqual(expected_batch, actual_batch)
def test_convert_to_iterator_epochs(self): batch_size = 2 a = np.ones([10, 10]) iterator, steps_per_epoch = training_utils.convert_to_iterator( x=a, batch_size=batch_size, epochs=2) self.assertEquals(steps_per_epoch, 5) expected_batch = a[:batch_size, :] # loop through one whole epoch for _ in range(6): actual_batch, = iterator.get_next() self.assertAllEqual(expected_batch, actual_batch)
def test_convert_to_iterator_y(self): batch_size = 2 a = np.ones([10, 100]) b = np.ones([10, 10]) iterator, steps_per_epoch = training_utils.convert_to_iterator( x=a, y=b, batch_size=batch_size) self.assertEquals(steps_per_epoch, 5) expected_x = a[:batch_size, :] expected_y = b[:batch_size, :] actual_x, actual_y = iterator.get_next() self.assertAllEqual(expected_x, actual_x) self.assertAllEqual(expected_y, actual_y)
def test_convert_to_iterator_sample_weights(self): batch_size = 2 a = ops.convert_to_tensor(np.ones([10, 100])) b = ops.convert_to_tensor(np.ones([10, 10])) sw = ops.convert_to_tensor(np.ones([10])) iterator, steps_per_epoch = training_utils.convert_to_iterator( x=a, y=b, sample_weights=sw, batch_size=batch_size) self.assertEquals(steps_per_epoch, 5) expected_x = a[:batch_size, :] expected_y = b[:batch_size, :] expected_sw = sw[:batch_size] actual_x, actual_y, actual_sw = iterator.get_next() self.assertAllEqual(expected_x, actual_x) self.assertAllEqual(expected_y, actual_y) self.assertAllEqual(expected_sw, actual_sw)
def test_convert_to_iterator_nested(self): batch_size = 2 x = {'1': np.ones([10, 100]), '2': [np.zeros([10, 10]), np.ones([10, 20])]} iterator, steps_per_epoch = training_utils.convert_to_iterator( x=x, batch_size=batch_size) self.assertEquals(steps_per_epoch, 5) expected_x1 = x['1'][:batch_size, :] expected_x2_0 = x['2'][0][:batch_size, :] expected_x2_1 = x['2'][1][:batch_size, :] actual_x, = iterator.get_next() actual_x1 = actual_x['1'][:batch_size, :] actual_x2_0 = actual_x['2'][0][:batch_size, :] actual_x2_1 = actual_x['2'][1][:batch_size, :] self.assertAllEqual(expected_x1, actual_x1) self.assertAllEqual(expected_x2_0, actual_x2_0) self.assertAllEqual(expected_x2_1, actual_x2_1)
def predict_loop(model, inputs, batch_size=32, verbose=0, steps=None): """Predict function for eager execution. Arguments: model: Instance of `Model`. inputs: List of input arrays. batch_size: integer batch size. verbose: verbosity mode. steps: Total number of steps (batches of samples) before declaring `_predict_loop` finished. Ignored with the default value of `None`. Returns: Array of predictions (if the model has a single output) or list of arrays of predictions (if the model has multiple outputs). """ with backend.learning_phase_scope(0): inputs, steps = training_utils.convert_to_iterator( x=inputs, batch_size=batch_size, steps_per_epoch=steps) return iterator_predict_loop(model, inputs, steps, verbose=verbose)
def test_convert_to_iterator_insufficient_info(self): # with batch_size and steps_per_epoch not set with self.assertRaises(ValueError): a = np.ones([10, 10]) _ = training_utils.convert_to_iterator(x=a)
def fit_loop(model, inputs, targets, sample_weights=None, class_weight=None, val_inputs=None, val_targets=None, val_sample_weights=None, batch_size=None, epochs=1, verbose=1, callbacks=None, shuffle=True, initial_epoch=0, steps_per_epoch=None, validation_steps=None): """Fit function for eager execution. Arguments: model: Instance of the model that is being executed in Eager mode. inputs: List of input arrays. targets: List of target arrays. sample_weights: Optional list of sample weight arrays. class_weight: Optional class-weight array to weight the importance of samples in `inputs` based on the class they belong to, as conveyed by `targets`. val_inputs: Input data for validation. val_targets: Target data for validation. val_sample_weights: Sample weight data for validation. batch_size: Integer batch size or None if unknown. epochs: Number of times to iterate over the data verbose: Verbosity mode, 0, 1 or 2 callbacks: List of callbacks to be called during training shuffle: Whether to shuffle the data at the beginning of each epoch initial_epoch: Epoch at which to start training (useful for resuming a previous training run) steps_per_epoch: Total number of steps (batches of samples) before declaring one epoch finished and starting the next epoch. Ignored with the default value of `None`. validation_steps: Number of steps to run validation for (only if doing validation from data tensors). Ignored with default value of `None`. Returns: `History` object. Raises: ValueError: In case of invalid argument values. """ # Convert training inputs to an EagerIterator inputs, steps_per_epoch = training_utils.convert_to_iterator( x=inputs, y=targets, sample_weights=sample_weights, batch_size=batch_size, steps_per_epoch=steps_per_epoch, epochs=epochs, shuffle=shuffle) # Required for eager execution with backend.learning_phase_scope(1): do_validation = val_inputs is not None callbacks = cbks.configure_callbacks( callbacks, model, do_validation=do_validation, batch_size=batch_size, epochs=epochs, steps_per_epoch=steps_per_epoch, val_inputs=val_inputs, val_targets=val_targets, val_sample_weights=val_sample_weights, validation_steps=validation_steps, verbose=verbose) callbacks.on_train_begin() for epoch in range(initial_epoch, epochs): if model._is_compiled: # Model may not be compiled the first time. # Reset stateful metrics for m in model.stateful_metric_functions: m.reset_states() callbacks.on_epoch_begin(epoch) epoch_logs = {} iterator_fit_loop( model, inputs, class_weight, steps_per_epoch=steps_per_epoch, epoch_logs=epoch_logs, val_inputs=val_inputs, val_targets=val_targets, val_sample_weights=val_sample_weights, epochs=epochs, verbose=verbose, callbacks=callbacks, validation_steps=validation_steps, do_validation=do_validation, batch_size=batch_size) callbacks.on_epoch_end(epoch, epoch_logs) if callbacks.model.stop_training: break callbacks.on_train_end() return model.history
def fit_loop(model, inputs, targets, sample_weights=None, class_weight=None, val_inputs=None, val_targets=None, val_sample_weights=None, batch_size=None, epochs=1, verbose=1, callbacks=None, shuffle=True, initial_epoch=0, steps_per_epoch=None, validation_steps=None): """Fit function for eager execution. Arguments: model: Instance of the model that is being executed in Eager mode. inputs: List of input arrays. targets: List of target arrays. sample_weights: Optional list of sample weight arrays. class_weight: Optional class-weight array to weight the importance of samples in `inputs` based on the class they belong to, as conveyed by `targets`. val_inputs: Input data for validation. val_targets: Target data for validation. val_sample_weights: Sample weight data for validation. batch_size: Integer batch size or None if unknown. epochs: Number of times to iterate over the data verbose: Verbosity mode, 0, 1 or 2 callbacks: List of callbacks to be called during training shuffle: Whether to shuffle the data at the beginning of each epoch initial_epoch: Epoch at which to start training (useful for resuming a previous training run) steps_per_epoch: Total number of steps (batches of samples) before declaring one epoch finished and starting the next epoch. Ignored with the default value of `None`. validation_steps: Number of steps to run validation for (only if doing validation from data tensors). Ignored with default value of `None`. Returns: `History` object. Raises: ValueError: In case of invalid argument values. """ # Convert training inputs to an EagerIterator inputs, steps_per_epoch = training_utils.convert_to_iterator( x=inputs, y=targets, sample_weights=sample_weights, batch_size=batch_size, steps_per_epoch=steps_per_epoch, epochs=epochs, shuffle=shuffle) # Required for eager execution with backend.learning_phase_scope(1): do_validation = False if val_inputs: do_validation = True num_train_samples = None out_labels = None callback_metrics = None if model._is_compiled: out_labels = model.metrics_names if do_validation: callback_metrics = copy.copy(out_labels) + [ 'val_' + n for n in out_labels ] else: callback_metrics = copy.copy(out_labels) model.history = cbks.History() callbacks = [cbks.BaseLogger()] + (callbacks or []) + [model.history] if verbose: callbacks += [cbks.ProgbarLogger('steps')] callbacks = cbks.CallbackList(callbacks) # it's possible to callback a different model than self # (used by Sequential models) if hasattr(model, 'callback_model') and model.callback_model: callback_model = model.callback_model else: callback_model = model callbacks.set_model(callback_model) callback_params = { 'batch_size': batch_size, 'epochs': epochs, 'steps': steps_per_epoch, 'samples': num_train_samples, 'verbose': verbose, 'do_validation': do_validation, 'metrics': callback_metrics or [], } if validation_steps: callback_params.update({'validation_steps': validation_steps}) callbacks.set_params(callback_params) for cbk in callbacks: if not val_inputs: cbk.validation_data = [] elif isinstance(val_inputs, iterator_ops.EagerIterator): cbk.validation_data = val_inputs elif val_sample_weights: cbk.validation_data = val_inputs + val_targets + val_sample_weights else: cbk.validation_data = val_inputs + val_targets # validation_data must be set before on_train_begin() is called # so that TensorboardCallback can validate its input callbacks.on_train_begin() callback_model.stop_training = False for epoch in range(initial_epoch, epochs): callbacks.on_epoch_begin(epoch) epoch_logs = {} iterator_fit_loop( model, inputs, class_weight, steps_per_epoch=steps_per_epoch, callback_model=callback_model, out_labels=out_labels, epoch_logs=epoch_logs, val_inputs=val_inputs, val_targets=val_targets, val_sample_weights=val_sample_weights, epochs=epochs, verbose=verbose, callbacks=callbacks, callback_metrics=callback_metrics, validation_steps=validation_steps, do_validation=do_validation, batch_size=batch_size) callbacks.on_epoch_end(epoch, epoch_logs) if callback_model.stop_training: break callbacks.on_train_end() return model.history
def fit_loop(model, inputs, targets, sample_weights=None, class_weight=None, val_inputs=None, val_targets=None, val_sample_weights=None, batch_size=None, epochs=1, verbose=1, callbacks=None, shuffle=True, initial_epoch=0, steps_per_epoch=None, validation_steps=None): """Fit function for eager execution. Arguments: model: Instance of the model that is being executed in Eager mode. inputs: List of input arrays. targets: List of target arrays. sample_weights: Optional list of sample weight arrays. class_weight: Optional class-weight array to weight the importance of samples in `inputs` based on the class they belong to, as conveyed by `targets`. val_inputs: Input data for validation. val_targets: Target data for validation. val_sample_weights: Sample weight data for validation. batch_size: Integer batch size or None if unknown. epochs: Number of times to iterate over the data verbose: Verbosity mode, 0, 1 or 2 callbacks: List of callbacks to be called during training shuffle: Whether to shuffle the data at the beginning of each epoch initial_epoch: Epoch at which to start training (useful for resuming a previous training run) steps_per_epoch: Total number of steps (batches of samples) before declaring one epoch finished and starting the next epoch. Ignored with the default value of `None`. validation_steps: Number of steps to run validation for (only if doing validation from data tensors). Ignored with default value of `None`. Returns: `History` object. Raises: ValueError: In case of invalid argument values. """ # Convert training inputs to an EagerIterator inputs, steps_per_epoch = training_utils.convert_to_iterator( x=inputs, y=targets, sample_weights=sample_weights, batch_size=batch_size, steps_per_epoch=steps_per_epoch, epochs=epochs, shuffle=shuffle) # Required for eager execution with backend.learning_phase_scope(1): do_validation = val_inputs is not None callbacks = cbks.configure_callbacks( callbacks, model, do_validation=do_validation, batch_size=batch_size, epochs=epochs, steps_per_epoch=steps_per_epoch, val_inputs=val_inputs, val_targets=val_targets, val_sample_weights=val_sample_weights, validation_steps=validation_steps, verbose=verbose) # Create metric wrapper for the losses. output_loss_metrics = [] for i in range(len(model.outputs)): loss_fn = model.loss_functions[i] mean_wrapped_loss = metrics_module.MeanMetricWrapper( loss_fn, name=loss_fn.__name__) output_loss_metrics.append(mean_wrapped_loss) callbacks.on_train_begin() for epoch in range(initial_epoch, epochs): if model._is_compiled: # Model may not be compiled the first time. # Reset stateful metrics for m in model.stateful_metric_functions: m.reset_states() for m in output_loss_metrics: m.reset_states() callbacks.on_epoch_begin(epoch) epoch_logs = {} iterator_fit_loop( model, inputs, class_weight, steps_per_epoch=steps_per_epoch, epoch_logs=epoch_logs, val_inputs=val_inputs, val_targets=val_targets, val_sample_weights=val_sample_weights, epochs=epochs, verbose=verbose, callbacks=callbacks, validation_steps=validation_steps, do_validation=do_validation, batch_size=batch_size, output_loss_metrics=output_loss_metrics) callbacks.on_epoch_end(epoch, epoch_logs) if callbacks.model.stop_training: break callbacks.on_train_end() return model.history
def fit_loop(model, inputs, targets, sample_weights=None, class_weight=None, val_inputs=None, val_targets=None, val_sample_weights=None, batch_size=None, epochs=1, verbose=1, callbacks=None, shuffle=True, callback_metrics=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None): """Fit function for eager execution. Arguments: model: Instance of the model that is being executed in Eager mode. inputs: List of input arrays. targets: List of target arrays. sample_weights: Optional list of sample weight arrays. class_weight: Optional class-weight array to weight the importance of samples in `inputs` based on the class they belong to, as conveyed by `targets`. val_inputs: Input data for validation. val_targets: Target data for validation. val_sample_weights: Sample weight data for validation. batch_size: Integer batch size or None if unknown. epochs: Number of times to iterate over the data verbose: Verbosity mode, 0, 1 or 2 callbacks: List of callbacks to be called during training shuffle: Whether to shuffle the data at the beginning of each epoch callback_metrics: List of strings, the display names of the metrics passed to the callbacks. They should be the concatenation of list the display names of the outputs of `f` and the list of display names of the outputs of `f_val`. initial_epoch: Epoch at which to start training (useful for resuming a previous training run) steps_per_epoch: Total number of steps (batches of samples) before declaring one epoch finished and starting the next epoch. Ignored with the default value of `None`. validation_steps: Number of steps to run validation for (only if doing validation from data tensors). Ignored with default value of `None`. Returns: `History` object. Raises: ValueError: In case of invalid argument values. """ # Convert training inputs to an EagerIterator inputs, steps_per_epoch = training_utils.convert_to_iterator( x=inputs, y=targets, sample_weights=sample_weights, batch_size=batch_size, steps_per_epoch=steps_per_epoch, epochs=epochs, shuffle=shuffle) # Required for eager execution with backend.learning_phase_scope(1): do_validation = False if val_inputs: do_validation = True num_train_samples = None out_labels = None if model._is_compiled: out_labels = model.metrics_names if do_validation: callback_metrics = copy.copy(out_labels) + [ 'val_' + n for n in out_labels ] else: callback_metrics = copy.copy(out_labels) model.history = cbks.History() callbacks = [cbks.BaseLogger()] + (callbacks or []) + [model.history] if verbose: callbacks += [cbks.ProgbarLogger('steps')] callbacks = cbks.CallbackList(callbacks) # it's possible to callback a different model than self # (used by Sequential models) if hasattr(model, 'callback_model') and model.callback_model: callback_model = model.callback_model else: callback_model = model callbacks.set_model(callback_model) callback_params = { 'batch_size': batch_size, 'epochs': epochs, 'steps': steps_per_epoch, 'samples': num_train_samples, 'verbose': verbose, 'do_validation': do_validation, 'metrics': callback_metrics or [], } if validation_steps: callback_params.update({'validation_steps': validation_steps}) callbacks.set_params(callback_params) for cbk in callbacks: if not val_inputs: cbk.validation_data = [] elif isinstance(val_inputs, iterator_ops.EagerIterator): cbk.validation_data = val_inputs elif val_sample_weights: cbk.validation_data = val_inputs + val_targets + val_sample_weights else: cbk.validation_data = val_inputs + val_targets # validation_data must be set before on_train_begin() is called # so that TensorboardCallback can validate its input callbacks.on_train_begin() callback_model.stop_training = False for epoch in range(initial_epoch, epochs): callbacks.on_epoch_begin(epoch) epoch_logs = {} iterator_fit_loop( model, inputs, class_weight, steps_per_epoch=steps_per_epoch, callback_model=callback_model, out_labels=out_labels, epoch_logs=epoch_logs, val_inputs=val_inputs, val_targets=val_targets, val_sample_weights=val_sample_weights, epochs=epochs, verbose=verbose, callbacks=callbacks, callback_metrics=callback_metrics, validation_steps=validation_steps, do_validation=do_validation, batch_size=batch_size) callbacks.on_epoch_end(epoch, epoch_logs) if callback_model.stop_training: break callbacks.on_train_end() return model.history