def call(self, inputs, training=None): if training is None: training = K.learning_phase() output = super(Dropout, self).call(inputs, training=training) if training is K.learning_phase(): output._uses_learning_phase = True # pylint: disable=protected-access return output
def call(self, inputs, training=None): if training is None: training = K.learning_phase() output = super(BatchNormalization, self).call(inputs, training=training) if context.in_graph_mode() and training is K.learning_phase(): output._uses_learning_phase = True # pylint: disable=protected-access return output
def call(self, inputs, training=None): if training is None: training = K.learning_phase() output = super(Dropout, self).call(inputs, training=training) # EagerTensor object has no attribute _uses_learning_phase if not context.in_eager_mode() and training is K.learning_phase(): output._uses_learning_phase = True # pylint: disable=protected-access return output
def call(self, inputs, training=None): if training is None: training = K.learning_phase() def dropped_inputs(): return nn.dropout(inputs, 1 - self.rate, noise_shape=self._get_noise_shape(inputs), seed=self.seed) output = tf_utils.smart_cond(training, dropped_inputs, lambda: array_ops.identity(inputs)) # EagerTensor object has no attribute _uses_learning_phase if not context.executing_eagerly() and training is K.learning_phase(): output._uses_learning_phase = True # pylint: disable=protected-access return output
def on_epoch_end(self, epoch, logs=None): logs = logs or {} if not self.validation_data and self.histogram_freq: raise ValueError('If printing histograms, validation_data must be ' 'provided, and cannot be a generator.') if self.validation_data and self.histogram_freq: if epoch % self.histogram_freq == 0: val_data = self.validation_data tensors = ( self.model.inputs + self.model.targets + self.model.sample_weights) if self.model.uses_learning_phase: tensors += [K.learning_phase()] assert len(val_data) == len(tensors) val_size = val_data[0].shape[0] i = 0 while i < val_size: step = min(self.batch_size, val_size - i) batch_val = [] batch_val.append(val_data[0][i:i + step] if val_data[0] is not None else None) batch_val.append(val_data[1][i:i + step] if val_data[1] is not None else None) batch_val.append(val_data[2][i:i + step] if val_data[2] is not None else None) if self.model.uses_learning_phase: # do not slice the learning phase batch_val = [x[i:i + step] if x is not None else None for x in val_data[:-1]] batch_val.append(val_data[-1]) else: batch_val = [x[i:i + step] if x is not None else None for x in val_data] feed_dict = {} for key, val in zip(tensors, batch_val): if val is not None: feed_dict[key] = val result = self.sess.run([self.merged], feed_dict=feed_dict) summary_str = result[0] self.writer.add_summary(summary_str, epoch) i += self.batch_size for name, value in logs.items(): if name in ['batch', 'size']: continue summary = tf_summary.Summary() summary_value = summary.value.add() summary_value.simple_value = value.item() summary_value.tag = name self.writer.add_summary(summary, epoch) self.writer.flush()
def call(self, inputs, training=None): original_training_value = training if training is None: training = K.learning_phase() def dropped_inputs(): return nn.dropout(inputs, 1 - self.rate, noise_shape=self._get_noise_shape(inputs), seed=self.seed) output = tf_utils.smart_cond(training, dropped_inputs, lambda: array_ops.identity(inputs)) # EagerTensor object has no attribute _uses_learning_phase if not context.executing_eagerly() and original_training_value is None: output._uses_learning_phase = True # pylint: disable=protected-access return output
def call(self, inputs, training=None): original_training_value = training if training is None: training = K.learning_phase() in_eager_mode = context.executing_eagerly() if self.virtual_batch_size is not None: # Virtual batches (aka ghost batches) can be simulated by reshaping the # Tensor and reusing the existing batch norm implementation original_shape = [-1] + inputs.shape.as_list()[1:] expanded_shape = [self.virtual_batch_size, -1] + original_shape[1:] # Will cause errors if virtual_batch_size does not divide the batch size inputs = array_ops.reshape(inputs, expanded_shape) def undo_virtual_batching(outputs): outputs = array_ops.reshape(outputs, original_shape) return outputs if self.fused: outputs = self._fused_batch_norm(inputs, training=training) if self.virtual_batch_size is not None: # Currently never reaches here since fused_batch_norm does not support # virtual batching outputs = undo_virtual_batching(outputs) if not context.executing_eagerly( ) and original_training_value is None: outputs._uses_learning_phase = True # pylint: disable=protected-access return outputs # Compute the axes along which to reduce the mean / variance input_shape = inputs.get_shape() ndims = len(input_shape) reduction_axes = [i for i in range(ndims) if i not in self.axis] if self.virtual_batch_size is not None: del reduction_axes[1] # Do not reduce along virtual batch dim # Broadcasting only necessary for single-axis batch norm where the axis is # not the last dimension broadcast_shape = [1] * ndims broadcast_shape[self.axis[0]] = input_shape[self.axis[0]].value def _broadcast(v): if (v is not None and len(v.get_shape()) != ndims and reduction_axes != list(range(ndims - 1))): return array_ops.reshape(v, broadcast_shape) return v scale, offset = _broadcast(self.gamma), _broadcast(self.beta) def _compose_transforms(scale, offset, then_scale, then_offset): if then_scale is not None: scale *= then_scale offset *= then_scale if then_offset is not None: offset += then_offset return (scale, offset) # Determine a boolean value for `training`: could be True, False, or None. training_value = tf_utils.constant_value(training) if training_value is not False: if self.adjustment: adj_scale, adj_bias = self.adjustment(array_ops.shape(inputs)) # Adjust only during training. adj_scale = tf_utils.smart_cond( training, lambda: adj_scale, lambda: array_ops.ones_like(adj_scale)) adj_bias = tf_utils.smart_cond( training, lambda: adj_bias, lambda: array_ops.zeros_like(adj_bias)) scale, offset = _compose_transforms(adj_scale, adj_bias, scale, offset) # Some of the computations here are not necessary when training==False # but not a constant. However, this makes the code simpler. keep_dims = self.virtual_batch_size is not None or len( self.axis) > 1 mean, variance = nn.moments(inputs, reduction_axes, keep_dims=keep_dims) moving_mean = self.moving_mean moving_variance = self.moving_variance mean = tf_utils.smart_cond(training, lambda: mean, lambda: moving_mean) variance = tf_utils.smart_cond(training, lambda: variance, lambda: moving_variance) if self.renorm: r, d, new_mean, new_variance = self._renorm_correction_and_moments( mean, variance, training) # When training, the normalized values (say, x) will be transformed as # x * gamma + beta without renorm, and (x * r + d) * gamma + beta # = x * (r * gamma) + (d * gamma + beta) with renorm. r = _broadcast(array_ops.stop_gradient(r, name='renorm_r')) d = _broadcast(array_ops.stop_gradient(d, name='renorm_d')) scale, offset = _compose_transforms(r, d, scale, offset) else: new_mean, new_variance = mean, variance if self.virtual_batch_size is not None: # This isn't strictly correct since in ghost batch norm, you are # supposed to sequentially update the moving_mean and moving_variance # with each sub-batch. However, since the moving statistics are only # used during evaluation, it is more efficient to just update in one # step and should not make a significant difference in the result. new_mean = math_ops.reduce_mean(new_mean, axis=1, keepdims=True) new_variance = math_ops.reduce_mean(new_variance, axis=1, keepdims=True) def _do_update(var, value): if in_eager_mode and not self.trainable: return return self._assign_moving_average(var, value, self.momentum) mean_update = tf_utils.smart_cond( training, lambda: _do_update(self.moving_mean, new_mean), lambda: self.moving_mean) variance_update = tf_utils.smart_cond( training, lambda: _do_update(self.moving_variance, new_variance), lambda: self.moving_variance) if not context.executing_eagerly(): self.add_update(mean_update, inputs=True) self.add_update(variance_update, inputs=True) else: mean, variance = self.moving_mean, self.moving_variance outputs = nn.batch_normalization(inputs, _broadcast(mean), _broadcast(variance), offset, scale, self.epsilon) # If some components of the shape got lost due to adjustments, fix that. outputs.set_shape(input_shape) if self.virtual_batch_size is not None: outputs = undo_virtual_batching(outputs) if not context.executing_eagerly() and original_training_value is None: outputs._uses_learning_phase = True # pylint: disable=protected-access return outputs
def test_loop(model, inputs, targets, sample_weights=None, batch_size=None, verbose=0, steps=None): """Abstract method to loop over some data in batches. Arguments: model: Keras Model instance. inputs: List of input arrays. targets: List of target arrays. sample_weights: Optional list of sample weight arrays. batch_size: integer batch size or `None`. verbose: verbosity mode. steps: Total number of steps (batches of samples) before declaring predictions finished. Ignored with the default value of `None`. Returns: Scalar loss (if the model has a single output and no metrics) or list of scalars (if the model has multiple outputs and/or metrics). The attribute `model.metrics_names` will give you the display labels for the scalar outputs. """ model._make_test_function() f = model.test_function sample_weights = sample_weights or [] if model.uses_learning_phase and not isinstance(K.learning_phase(), int): ins = inputs + targets + sample_weights + [0] else: ins = inputs + targets + sample_weights if hasattr(model, 'metrics'): for m in model.metrics: if isinstance(m, Layer): m.reset_states() stateful_metric_indices = [ i for i, name in enumerate(model.metrics_names) if str(name) in model.stateful_metric_names ] else: stateful_metric_indices = [] num_samples = training_utils.check_num_samples(ins, batch_size, steps, 'steps') outs = [] if verbose == 1: if steps is not None: progbar = Progbar(target=steps) else: progbar = Progbar(target=num_samples) # To prevent a slowdown, we find beforehand the arrays that need conversion. feed = model._feed_inputs + model._feed_targets + model._feed_sample_weights indices_for_conversion_to_dense = [] for i in range(len(feed)): if issparse is not None and issparse( ins[i]) and not K.is_sparse(feed[i]): indices_for_conversion_to_dense.append(i) if steps is not None: for step in range(steps): batch_outs = f(ins) if isinstance(batch_outs, list): if step == 0: for _ in enumerate(batch_outs): outs.append(0.) for i, batch_out in enumerate(batch_outs): if i in stateful_metric_indices: outs[i] = batch_out else: outs[i] += batch_out else: if step == 0: outs.append(0.) outs[0] += batch_outs if verbose == 1: progbar.update(step + 1) for i in range(len(outs)): if i not in stateful_metric_indices: outs[i] /= steps else: batches = make_batches(num_samples, batch_size) index_array = np.arange(num_samples) for batch_index, (batch_start, batch_end) in enumerate(batches): batch_ids = index_array[batch_start:batch_end] if isinstance(ins[-1], int): # Do not slice the training phase flag. ins_batch = slice_arrays(ins[:-1], batch_ids) + [ins[-1]] else: ins_batch = slice_arrays(ins, batch_ids) for i in indices_for_conversion_to_dense: ins_batch[i] = ins_batch[i].toarray() batch_outs = f(ins_batch) if isinstance(batch_outs, list): if batch_index == 0: for batch_out in enumerate(batch_outs): outs.append(0.) for i, batch_out in enumerate(batch_outs): if i in stateful_metric_indices: outs[i] = batch_out else: outs[i] += batch_out * len(batch_ids) else: if batch_index == 0: outs.append(0.) outs[0] += batch_outs * len(batch_ids) if verbose == 1: progbar.update(batch_end) for i in range(len(outs)): if i not in stateful_metric_indices: outs[i] /= num_samples if len(outs) == 1: return outs[0] return outs
def fit_loop(model, inputs, targets, sample_weights=None, batch_size=None, epochs=100, verbose=1, callbacks=None, val_inputs=None, val_targets=None, val_sample_weights=None, shuffle=True, callback_metrics=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None): """Abstract fit function for arrays of data. Arguments: model: Keras Model instance. inputs: List of input arrays. targets: List of target arrays. sample_weights: Optional list of sample weight arrays. batch_size: Integer batch size or None if unknown. epochs: Number of times to iterate over the data verbose: Verbosity mode, 0, 1 or 2 callbacks: List of callbacks to be called during training val_inputs: List of input arrays. val_targets: List of target arrays. val_sample_weights: Optional list of sample weight arrays. shuffle: Whether to shuffle the data at the beginning of each epoch callback_metrics: List of strings, the display names of the metrics passed to the callbacks. They should be the concatenation of list the display names of the outputs of `f` and the list of display names of the outputs of `f_val`. initial_epoch: Epoch at which to start training (useful for resuming a previous training run) steps_per_epoch: Total number of steps (batches of samples) before declaring one epoch finished and starting the next epoch. Ignored with the default value of `None`. validation_steps: Number of steps to run validation for (only if doing validation from data tensors). Ignored with the default value of `None`. Returns: `History` object. Raises: ValueError: in case of invalid arguments. """ model._make_train_function() f = model.train_function sample_weights = sample_weights or [] val_sample_weights = val_sample_weights or [] if model.uses_learning_phase and not isinstance(K.learning_phase(), int): ins = inputs + targets + sample_weights + [1] if val_inputs: val_ins = val_inputs + val_targets + val_sample_weights + [1] else: ins = inputs + targets + sample_weights if val_inputs: val_ins = val_inputs + val_targets + val_sample_weights if not val_inputs: val_ins = [] do_validation = False if val_inputs: do_validation = True if verbose and inputs and hasattr(inputs[0], 'shape') and hasattr( val_inputs[0], 'shape'): print('Train on %d samples, validate on %d samples' % (inputs[0].shape[0], val_inputs[0].shape[0])) if validation_steps: do_validation = True if steps_per_epoch is None: raise ValueError('Can only use `validation_steps` ' 'when doing step-wise ' 'training, i.e. `steps_per_epoch` ' 'must be set.') out_labels = model.metrics_names if do_validation: callback_metrics = copy.copy(out_labels) + [ 'val_' + n for n in out_labels ] else: callback_metrics = copy.copy(out_labels) num_train_samples = training_utils.check_num_samples( ins, batch_size, steps_per_epoch, 'steps_per_epoch') if num_train_samples is not None: index_array = np.arange(num_train_samples) model.history = cbks.History() all_callbacks = [cbks.BaseLogger( stateful_metrics=model.stateful_metric_names)] if verbose: if steps_per_epoch is not None: count_mode = 'steps' else: count_mode = 'samples' all_callbacks.append( cbks.ProgbarLogger( count_mode, stateful_metrics=model.stateful_metric_names)) all_callbacks += (callbacks or []) + [model.history] callbacks = cbks.CallbackList(all_callbacks) out_labels = out_labels or [] # it's possible to callback a different model than self # (used by Sequential models) if hasattr(model, 'callback_model') and model.callback_model: callback_model = model.callback_model else: callback_model = model callbacks.set_model(callback_model) callbacks.set_params({ 'batch_size': batch_size, 'epochs': epochs, 'steps': steps_per_epoch, 'samples': num_train_samples, 'verbose': verbose, 'do_validation': do_validation, 'metrics': callback_metrics or [], }) callbacks.on_train_begin() callback_model.stop_training = False for cbk in callbacks: cbk.validation_data = val_ins # To prevent a slowdown, we find beforehand the arrays that need conversion. feed = model._feed_inputs + model._feed_targets + model._feed_sample_weights indices_for_conversion_to_dense = [] for i in range(len(feed)): if issparse is not None and issparse(ins[i]) and not K.is_sparse(feed[i]): indices_for_conversion_to_dense.append(i) for epoch in range(initial_epoch, epochs): # Reset stateful metrics for m in model.metrics: if isinstance(m, Layer): m.reset_states() # Update callbacks callbacks.on_epoch_begin(epoch) epoch_logs = {} if steps_per_epoch is not None: for step_index in range(steps_per_epoch): batch_logs = {} batch_logs['batch'] = step_index batch_logs['size'] = 1 callbacks.on_batch_begin(step_index, batch_logs) outs = f(ins) if not isinstance(outs, list): outs = [outs] for l, o in zip(out_labels, outs): batch_logs[l] = o callbacks.on_batch_end(step_index, batch_logs) if callback_model.stop_training: break if do_validation: val_outs = test_loop( model, val_inputs, val_targets, sample_weights=val_sample_weights, batch_size=batch_size, steps=validation_steps, verbose=0) if not isinstance(val_outs, list): val_outs = [val_outs] # Same labels assumed. for l, o in zip(out_labels, val_outs): epoch_logs['val_' + l] = o else: if shuffle == 'batch': index_array = training_utils.batch_shuffle(index_array, batch_size) elif shuffle: np.random.shuffle(index_array) batches = make_batches(num_train_samples, batch_size) for batch_index, (batch_start, batch_end) in enumerate(batches): batch_ids = index_array[batch_start:batch_end] try: if isinstance(ins[-1], int): # Do not slice the training phase flag. ins_batch = slice_arrays(ins[:-1], batch_ids) + [ins[-1]] else: ins_batch = slice_arrays(ins, batch_ids) except TypeError: raise TypeError('TypeError while preparing batch. ' 'If using HDF5 input data, ' 'pass shuffle="batch".') batch_logs = {} batch_logs['batch'] = batch_index batch_logs['size'] = len(batch_ids) callbacks.on_batch_begin(batch_index, batch_logs) for i in indices_for_conversion_to_dense: ins_batch[i] = ins_batch[i].toarray() outs = f(ins_batch) if not isinstance(outs, list): outs = [outs] for l, o in zip(out_labels, outs): batch_logs[l] = o callbacks.on_batch_end(batch_index, batch_logs) if callback_model.stop_training: break if batch_index == len(batches) - 1: # Last batch. if do_validation: val_outs = test_loop( model, val_inputs, val_targets, sample_weights=val_sample_weights, batch_size=batch_size, verbose=0) if not isinstance(val_outs, list): val_outs = [val_outs] # Same labels assumed. for l, o in zip(out_labels, val_outs): epoch_logs['val_' + l] = o callbacks.on_epoch_end(epoch, epoch_logs) if callback_model.stop_training: break callbacks.on_train_end() return model.history
def test_loop(model, inputs, targets, sample_weights=None, batch_size=None, verbose=0, steps=None): """Abstract method to loop over some data in batches. Arguments: model: Keras Model instance. inputs: List of input arrays. targets: List of target arrays. sample_weights: Optional list of sample weight arrays. batch_size: integer batch size or `None`. verbose: verbosity mode. steps: Total number of steps (batches of samples) before declaring predictions finished. Ignored with the default value of `None`. Returns: Scalar loss (if the model has a single output and no metrics) or list of scalars (if the model has multiple outputs and/or metrics). The attribute `model.metrics_names` will give you the display labels for the scalar outputs. """ model._make_test_function() f = model.test_function sample_weights = sample_weights or [] if model.uses_learning_phase and not isinstance(K.learning_phase(), int): ins = inputs + targets + sample_weights + [0] else: ins = inputs + targets + sample_weights if hasattr(model, 'metrics'): for m in model.metrics: if isinstance(m, Layer): m.reset_states() stateful_metric_indices = [ i for i, name in enumerate(model.metrics_names) if str(name) in model.stateful_metric_names ] else: stateful_metric_indices = [] num_samples = training_utils.check_num_samples( ins, batch_size, steps, 'steps') outs = [] if verbose == 1: if steps is not None: progbar = Progbar(target=steps) else: progbar = Progbar(target=num_samples) # To prevent a slowdown, we find beforehand the arrays that need conversion. feed = model._feed_inputs + model._feed_targets + model._feed_sample_weights indices_for_conversion_to_dense = [] for i in range(len(feed)): if issparse is not None and issparse(ins[i]) and not K.is_sparse(feed[i]): indices_for_conversion_to_dense.append(i) if steps is not None: for step in range(steps): batch_outs = f(ins) if isinstance(batch_outs, list): if step == 0: for _ in enumerate(batch_outs): outs.append(0.) for i, batch_out in enumerate(batch_outs): if i in stateful_metric_indices: outs[i] = batch_out else: outs[i] += batch_out else: if step == 0: outs.append(0.) outs[0] += batch_outs if verbose == 1: progbar.update(step + 1) for i in range(len(outs)): if i not in stateful_metric_indices: outs[i] /= steps else: batches = make_batches(num_samples, batch_size) index_array = np.arange(num_samples) for batch_index, (batch_start, batch_end) in enumerate(batches): batch_ids = index_array[batch_start:batch_end] if isinstance(ins[-1], int): # Do not slice the training phase flag. ins_batch = slice_arrays(ins[:-1], batch_ids) + [ins[-1]] else: ins_batch = slice_arrays(ins, batch_ids) for i in indices_for_conversion_to_dense: ins_batch[i] = ins_batch[i].toarray() batch_outs = f(ins_batch) if isinstance(batch_outs, list): if batch_index == 0: for batch_out in enumerate(batch_outs): outs.append(0.) for i, batch_out in enumerate(batch_outs): if i in stateful_metric_indices: outs[i] = batch_out else: outs[i] += batch_out * len(batch_ids) else: if batch_index == 0: outs.append(0.) outs[0] += batch_outs * len(batch_ids) if verbose == 1: progbar.update(batch_end) for i in range(len(outs)): if i not in stateful_metric_indices: outs[i] /= num_samples if len(outs) == 1: return outs[0] return outs
def predict_loop(model, inputs, batch_size=32, verbose=0, steps=None): """Abstract method to loop over some data in batches. Arguments: model: Keras Model instance. inputs: list of tensors to be fed to `f`. batch_size: integer batch size. verbose: verbosity mode. steps: Total number of steps (batches of samples) before declaring `_predict_loop` finished. Ignored with the default value of `None`. Returns: Array of predictions (if the model has a single output) or list of arrays of predictions (if the model has multiple outputs). """ model._make_predict_function() f = model.predict_function if model.uses_learning_phase and not isinstance(K.learning_phase(), int): ins = inputs + [0] else: ins = inputs num_samples = training_utils.check_num_samples( inputs, batch_size, steps, 'steps') if verbose == 1: if steps is not None: progbar = Progbar(target=steps) else: progbar = Progbar(target=num_samples) indices_for_conversion_to_dense = [] for i in range(len(model._feed_inputs)): if (issparse is not None and issparse(inputs[i]) and not K.is_sparse(model._feed_inputs[i])): indices_for_conversion_to_dense.append(i) if steps is not None: # Step-based predictions. # Since we do not know how many samples # we will see, we cannot pre-allocate # the returned Numpy arrays. # Instead, we store one array per batch seen # and concatenate them upon returning. unconcatenated_outs = [] for step in range(steps): batch_outs = f(ins) if not isinstance(batch_outs, list): batch_outs = [batch_outs] if step == 0: for batch_out in batch_outs: unconcatenated_outs.append([]) for i, batch_out in enumerate(batch_outs): unconcatenated_outs[i].append(batch_out) if verbose == 1: progbar.update(step + 1) if len(unconcatenated_outs) == 1: return np.concatenate(unconcatenated_outs[0], axis=0) return [ np.concatenate(unconcatenated_outs[i], axis=0) for i in range(len(unconcatenated_outs)) ] else: # Sample-based predictions. outs = [] batches = make_batches(num_samples, batch_size) index_array = np.arange(num_samples) for batch_index, (batch_start, batch_end) in enumerate(batches): batch_ids = index_array[batch_start:batch_end] if ins and isinstance(ins[-1], int): # Do not slice the training phase flag. ins_batch = slice_arrays(ins[:-1], batch_ids) + [ins[-1]] else: ins_batch = slice_arrays(ins, batch_ids) for i in indices_for_conversion_to_dense: ins_batch[i] = ins_batch[i].toarray() batch_outs = f(ins_batch) if not isinstance(batch_outs, list): batch_outs = [batch_outs] if batch_index == 0: # Pre-allocate the results arrays. for batch_out in batch_outs: shape = (num_samples,) + batch_out.shape[1:] outs.append(np.zeros(shape, dtype=batch_out.dtype)) for i, batch_out in enumerate(batch_outs): outs[i][batch_start:batch_end] = batch_out if verbose == 1: progbar.update(batch_end) if len(outs) == 1: return outs[0] return outs
# load mudules import tensorflow as tf import numpy as np import matplotlib # matplotlib.use('Agg') import matplotlib.pyplot as plt import urllib, cStringIO import os # load the model model = tf.keras.applications.ResNet50() # load compute_margin class from compute_margin import margin from tensorflow.python.keras._impl.keras import backend as K m = margin(tf.log(model.output), model.input, K.get_session(), [K.learning_phase()]) # open the file i = 0 #f = open('/mnt/nfs/nfsshare/user_homes/zybill/imagenet_data/fall11_urls.txt') f = open('./imagenet_data/fall11_urls.txt') # make the write directory #write_dir = '/mnt/nfs/nfsshare/user_homes/zybill/results_imagenet/' write_dir = './results/imagenet_adversarials/' if not os.path.exists(write_dir): os.makedirs(write_dir) # run compute margin input_shape = (224, 224) for i in xrange(100):
def predict_loop(model, inputs, batch_size=32, verbose=0, steps=None): """Abstract method to loop over some data in batches. Arguments: model: Keras Model instance. inputs: list of tensors to be fed to `f`. batch_size: integer batch size. verbose: verbosity mode. steps: Total number of steps (batches of samples) before declaring `_predict_loop` finished. Ignored with the default value of `None`. Returns: Array of predictions (if the model has a single output) or list of arrays of predictions (if the model has multiple outputs). """ model._make_predict_function() f = model.predict_function if model.uses_learning_phase and not isinstance(K.learning_phase(), int): ins = inputs + [0] else: ins = inputs num_samples = training_utils.check_num_samples(inputs, batch_size, steps, 'steps') if verbose == 1: if steps is not None: progbar = Progbar(target=steps) else: progbar = Progbar(target=num_samples) indices_for_conversion_to_dense = [] for i in range(len(model._feed_inputs)): if (issparse is not None and issparse(inputs[i]) and not K.is_sparse(model._feed_inputs[i])): indices_for_conversion_to_dense.append(i) if steps is not None: # Step-based predictions. # Since we do not know how many samples # we will see, we cannot pre-allocate # the returned Numpy arrays. # Instead, we store one array per batch seen # and concatenate them upon returning. unconcatenated_outs = [] for step in range(steps): batch_outs = f(ins) if not isinstance(batch_outs, list): batch_outs = [batch_outs] if step == 0: for batch_out in batch_outs: unconcatenated_outs.append([]) for i, batch_out in enumerate(batch_outs): unconcatenated_outs[i].append(batch_out) if verbose == 1: progbar.update(step + 1) if len(unconcatenated_outs) == 1: return np.concatenate(unconcatenated_outs[0], axis=0) return [ np.concatenate(unconcatenated_outs[i], axis=0) for i in range(len(unconcatenated_outs)) ] else: # Sample-based predictions. outs = [] batches = make_batches(num_samples, batch_size) index_array = np.arange(num_samples) for batch_index, (batch_start, batch_end) in enumerate(batches): batch_ids = index_array[batch_start:batch_end] if ins and isinstance(ins[-1], int): # Do not slice the training phase flag. ins_batch = slice_arrays(ins[:-1], batch_ids) + [ins[-1]] else: ins_batch = slice_arrays(ins, batch_ids) for i in indices_for_conversion_to_dense: ins_batch[i] = ins_batch[i].toarray() batch_outs = f(ins_batch) if not isinstance(batch_outs, list): batch_outs = [batch_outs] if batch_index == 0: # Pre-allocate the results arrays. for batch_out in batch_outs: shape = (num_samples, ) + batch_out.shape[1:] outs.append(np.zeros(shape, dtype=batch_out.dtype)) for i, batch_out in enumerate(batch_outs): outs[i][batch_start:batch_end] = batch_out if verbose == 1: progbar.update(batch_end) if len(outs) == 1: return outs[0] return outs
def fit_generator(model, generator, steps_per_epoch=None, epochs=1, verbose=1, callbacks=None, validation_data=None, validation_steps=None, class_weight=None, max_queue_size=10, workers=1, use_multiprocessing=False, shuffle=True, initial_epoch=0): """See docstring for `Model.fit_generator`.""" wait_time = 0.01 # in seconds epoch = initial_epoch do_validation = bool(validation_data) model._make_train_function() if do_validation: model._make_test_function() is_sequence = isinstance(generator, Sequence) if not is_sequence and use_multiprocessing and workers > 1: logging.warning( UserWarning('Using a generator with `use_multiprocessing=True`' ' and multiple workers may duplicate your data.' ' Please consider using the`keras.utils.Sequence' ' class.')) if steps_per_epoch is None: if is_sequence: steps_per_epoch = len(generator) else: raise ValueError('`steps_per_epoch=None` is only valid for a' ' generator based on the `keras.utils.Sequence`' ' class. Please specify `steps_per_epoch` or use' ' the `keras.utils.Sequence` class.') # python 2 has 'next', 3 has '__next__' # avoid any explicit version checks val_gen = ( hasattr(validation_data, 'next') or hasattr(validation_data, '__next__') or isinstance(validation_data, Sequence)) if (val_gen and not isinstance(validation_data, Sequence) and not validation_steps): raise ValueError('`validation_steps=None` is only valid for a' ' generator based on the `keras.utils.Sequence`' ' class. Please specify `validation_steps` or use' ' the `keras.utils.Sequence` class.') # Prepare display labels. out_labels = model.metrics_names callback_metrics = out_labels + ['val_%s' % n for n in out_labels] # prepare callbacks model.history = cbks.History() callbacks = [cbks.BaseLogger()] + (callbacks or []) + [model.history] if verbose: callbacks += [cbks.ProgbarLogger(count_mode='steps')] callbacks = cbks.CallbackList(callbacks) # it's possible to callback a different model than self: if hasattr(model, 'callback_model') and model.callback_model: callback_model = model.callback_model else: callback_model = model callbacks.set_model(callback_model) callbacks.set_params({ 'epochs': epochs, 'steps': steps_per_epoch, 'verbose': verbose, 'do_validation': do_validation, 'metrics': callback_metrics, }) callbacks.on_train_begin() enqueuer = None val_enqueuer = None try: if do_validation: if val_gen: if workers > 0: if isinstance(validation_data, Sequence): val_enqueuer = OrderedEnqueuer( validation_data, use_multiprocessing=use_multiprocessing) if validation_steps is None: validation_steps = len(validation_data) else: val_enqueuer = GeneratorEnqueuer( validation_data, use_multiprocessing=use_multiprocessing, wait_time=wait_time) val_enqueuer.start(workers=workers, max_queue_size=max_queue_size) validation_generator = val_enqueuer.get() else: validation_generator = validation_data else: if len(validation_data) == 2: val_x, val_y = validation_data # pylint: disable=unpacking-non-sequence val_sample_weight = None elif len(validation_data) == 3: val_x, val_y, val_sample_weight = validation_data # pylint: disable=unpacking-non-sequence else: raise ValueError( '`validation_data` should be a tuple ' '`(val_x, val_y, val_sample_weight)` ' 'or `(val_x, val_y)`. Found: ' + str(validation_data)) val_x, val_y, val_sample_weights = model._standardize_user_data( val_x, val_y, val_sample_weight) val_data = val_x + val_y + val_sample_weights if model.uses_learning_phase and not isinstance( K.learning_phase(), int): val_data += [0] for cbk in callbacks: cbk.validation_data = val_data if workers > 0: if is_sequence: enqueuer = OrderedEnqueuer( generator, use_multiprocessing=use_multiprocessing, shuffle=shuffle) else: enqueuer = GeneratorEnqueuer( generator, use_multiprocessing=use_multiprocessing, wait_time=wait_time) enqueuer.start(workers=workers, max_queue_size=max_queue_size) output_generator = enqueuer.get() else: output_generator = generator callback_model.stop_training = False # Construct epoch logs. epoch_logs = {} while epoch < epochs: callbacks.on_epoch_begin(epoch) steps_done = 0 batch_index = 0 while steps_done < steps_per_epoch: generator_output = next(output_generator) if not hasattr(generator_output, '__len__'): raise ValueError('Output of generator should be ' 'a tuple `(x, y, sample_weight)` ' 'or `(x, y)`. Found: ' + str(generator_output)) if len(generator_output) == 2: x, y = generator_output sample_weight = None elif len(generator_output) == 3: x, y, sample_weight = generator_output else: raise ValueError('Output of generator should be ' 'a tuple `(x, y, sample_weight)` ' 'or `(x, y)`. Found: ' + str(generator_output)) # build batch logs batch_logs = {} if isinstance(x, list): batch_size = x[0].shape[0] elif isinstance(x, dict): batch_size = list(x.values())[0].shape[0] else: batch_size = x.shape[0] batch_logs['batch'] = batch_index batch_logs['size'] = batch_size callbacks.on_batch_begin(batch_index, batch_logs) outs = model.train_on_batch( x, y, sample_weight=sample_weight, class_weight=class_weight) if not isinstance(outs, list): outs = [outs] for l, o in zip(out_labels, outs): batch_logs[l] = o callbacks.on_batch_end(batch_index, batch_logs) batch_index += 1 steps_done += 1 # Epoch finished. if steps_done >= steps_per_epoch and do_validation: if val_gen: val_outs = evaluate_generator( model, validation_generator, validation_steps, workers=0) else: # No need for try/except because # data has already been validated. val_outs = model.evaluate( val_x, val_y, batch_size=batch_size, sample_weight=val_sample_weights, verbose=0) if not isinstance(val_outs, list): val_outs = [val_outs] # Same labels assumed. for l, o in zip(out_labels, val_outs): epoch_logs['val_' + l] = o if callback_model.stop_training: break callbacks.on_epoch_end(epoch, epoch_logs) epoch += 1 if callback_model.stop_training: break finally: try: if enqueuer is not None: enqueuer.stop() finally: if val_enqueuer is not None: val_enqueuer.stop() callbacks.on_train_end() return model.history
def fit_loop(model, inputs, targets, sample_weights=None, batch_size=None, epochs=100, verbose=1, callbacks=None, val_inputs=None, val_targets=None, val_sample_weights=None, shuffle=True, callback_metrics=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None): """Abstract fit function for arrays of data. Arguments: model: Keras Model instance. inputs: List of input arrays. targets: List of target arrays. sample_weights: Optional list of sample weight arrays. batch_size: Integer batch size or None if unknown. epochs: Number of times to iterate over the data verbose: Verbosity mode, 0, 1 or 2 callbacks: List of callbacks to be called during training val_inputs: List of input arrays. val_targets: List of target arrays. val_sample_weights: Optional list of sample weight arrays. shuffle: Whether to shuffle the data at the beginning of each epoch callback_metrics: List of strings, the display names of the metrics passed to the callbacks. They should be the concatenation of list the display names of the outputs of `f` and the list of display names of the outputs of `f_val`. initial_epoch: Epoch at which to start training (useful for resuming a previous training run) steps_per_epoch: Total number of steps (batches of samples) before declaring one epoch finished and starting the next epoch. Ignored with the default value of `None`. validation_steps: Number of steps to run validation for (only if doing validation from data tensors). Ignored with the default value of `None`. Returns: `History` object. Raises: ValueError: in case of invalid arguments. """ model._make_train_function() f = model.train_function sample_weights = sample_weights or [] val_sample_weights = val_sample_weights or [] if model.uses_learning_phase and not isinstance(K.learning_phase(), int): ins = inputs + targets + sample_weights + [1] if val_inputs: val_ins = val_inputs + val_targets + val_sample_weights + [1] else: ins = inputs + targets + sample_weights if val_inputs: val_ins = val_inputs + val_targets + val_sample_weights if not val_inputs: val_ins = [] do_validation = False if val_inputs: do_validation = True if verbose and inputs and hasattr(inputs[0], 'shape') and hasattr( val_inputs[0], 'shape'): print('Train on %d samples, validate on %d samples' % (inputs[0].shape[0], val_inputs[0].shape[0])) if validation_steps: do_validation = True if steps_per_epoch is None: raise ValueError('Can only use `validation_steps` ' 'when doing step-wise ' 'training, i.e. `steps_per_epoch` ' 'must be set.') out_labels = model.metrics_names if do_validation: callback_metrics = copy.copy(out_labels) + [ 'val_' + n for n in out_labels ] else: callback_metrics = copy.copy(out_labels) num_train_samples = training_utils.check_num_samples( ins, batch_size, steps_per_epoch, 'steps_per_epoch') if num_train_samples is not None: index_array = np.arange(num_train_samples) model.history = cbks.History() all_callbacks = [ cbks.BaseLogger(stateful_metrics=model.stateful_metric_names) ] if verbose: if steps_per_epoch is not None: count_mode = 'steps' else: count_mode = 'samples' all_callbacks.append( cbks.ProgbarLogger(count_mode, stateful_metrics=model.stateful_metric_names)) all_callbacks += (callbacks or []) + [model.history] callbacks = cbks.CallbackList(all_callbacks) out_labels = out_labels or [] # it's possible to callback a different model than self # (used by Sequential models) if hasattr(model, 'callback_model') and model.callback_model: callback_model = model.callback_model else: callback_model = model callbacks.set_model(callback_model) callbacks.set_params({ 'batch_size': batch_size, 'epochs': epochs, 'steps': steps_per_epoch, 'samples': num_train_samples, 'verbose': verbose, 'do_validation': do_validation, 'metrics': callback_metrics or [], }) callbacks.on_train_begin() callback_model.stop_training = False for cbk in callbacks: cbk.validation_data = val_ins # To prevent a slowdown, we find beforehand the arrays that need conversion. feed = model._feed_inputs + model._feed_targets + model._feed_sample_weights indices_for_conversion_to_dense = [] for i in range(len(feed)): if issparse is not None and issparse( ins[i]) and not K.is_sparse(feed[i]): indices_for_conversion_to_dense.append(i) for epoch in range(initial_epoch, epochs): # Reset stateful metrics for m in model.metrics: if isinstance(m, Layer): m.reset_states() # Update callbacks callbacks.on_epoch_begin(epoch) epoch_logs = {} if steps_per_epoch is not None: for step_index in range(steps_per_epoch): batch_logs = {} batch_logs['batch'] = step_index batch_logs['size'] = 1 callbacks.on_batch_begin(step_index, batch_logs) try: outs = f(ins) except errors.OutOfRangeError: logging.warning( 'Your dataset iterator ran out of data; ' 'interrupting training. Make sure that your dataset ' 'can generate at least `steps_per_epoch * epochs` ' 'batches (in this case, %d batches).' % steps_per_epoch * epochs) break if not isinstance(outs, list): outs = [outs] for l, o in zip(out_labels, outs): batch_logs[l] = o callbacks.on_batch_end(step_index, batch_logs) if callback_model.stop_training: break if do_validation: val_outs = test_loop(model, val_inputs, val_targets, sample_weights=val_sample_weights, batch_size=batch_size, steps=validation_steps, verbose=0) if not isinstance(val_outs, list): val_outs = [val_outs] # Same labels assumed. for l, o in zip(out_labels, val_outs): epoch_logs['val_' + l] = o else: if shuffle == 'batch': index_array = training_utils.batch_shuffle( index_array, batch_size) elif shuffle: np.random.shuffle(index_array) batches = make_batches(num_train_samples, batch_size) for batch_index, (batch_start, batch_end) in enumerate(batches): batch_ids = index_array[batch_start:batch_end] try: if isinstance(ins[-1], int): # Do not slice the training phase flag. ins_batch = slice_arrays(ins[:-1], batch_ids) + [ins[-1]] else: ins_batch = slice_arrays(ins, batch_ids) except TypeError: raise TypeError('TypeError while preparing batch. ' 'If using HDF5 input data, ' 'pass shuffle="batch".') batch_logs = {} batch_logs['batch'] = batch_index batch_logs['size'] = len(batch_ids) callbacks.on_batch_begin(batch_index, batch_logs) for i in indices_for_conversion_to_dense: ins_batch[i] = ins_batch[i].toarray() outs = f(ins_batch) if not isinstance(outs, list): outs = [outs] for l, o in zip(out_labels, outs): batch_logs[l] = o callbacks.on_batch_end(batch_index, batch_logs) if callback_model.stop_training: break if batch_index == len(batches) - 1: # Last batch. if do_validation: val_outs = test_loop(model, val_inputs, val_targets, sample_weights=val_sample_weights, batch_size=batch_size, verbose=0) if not isinstance(val_outs, list): val_outs = [val_outs] # Same labels assumed. for l, o in zip(out_labels, val_outs): epoch_logs['val_' + l] = o callbacks.on_epoch_end(epoch, epoch_logs) if callback_model.stop_training: break callbacks.on_train_end() return model.history
def fit_generator(model, generator, steps_per_epoch=None, epochs=1, verbose=1, callbacks=None, validation_data=None, validation_steps=None, class_weight=None, max_queue_size=10, workers=1, use_multiprocessing=False, shuffle=True, initial_epoch=0): """See docstring for `Model.fit_generator`.""" wait_time = 0.01 # in seconds epoch = initial_epoch do_validation = bool(validation_data) model._make_train_function() if do_validation: model._make_test_function() is_sequence = isinstance(generator, Sequence) if not is_sequence and use_multiprocessing and workers > 1: logging.warning( UserWarning('Using a generator with `use_multiprocessing=True`' ' and multiple workers may duplicate your data.' ' Please consider using the`keras.utils.Sequence' ' class.')) if steps_per_epoch is None: if is_sequence: steps_per_epoch = len(generator) else: raise ValueError('`steps_per_epoch=None` is only valid for a' ' generator based on the `keras.utils.Sequence`' ' class. Please specify `steps_per_epoch` or use' ' the `keras.utils.Sequence` class.') # python 2 has 'next', 3 has '__next__' # avoid any explicit version checks val_gen = (hasattr(validation_data, 'next') or hasattr(validation_data, '__next__') or isinstance(validation_data, Sequence)) if (val_gen and not isinstance(validation_data, Sequence) and not validation_steps): raise ValueError('`validation_steps=None` is only valid for a' ' generator based on the `keras.utils.Sequence`' ' class. Please specify `validation_steps` or use' ' the `keras.utils.Sequence` class.') # Prepare display labels. out_labels = model.metrics_names callback_metrics = out_labels + ['val_%s' % n for n in out_labels] # prepare callbacks model.history = cbks.History() callbacks = [cbks.BaseLogger()] + (callbacks or []) + [model.history] if verbose: callbacks += [cbks.ProgbarLogger(count_mode='steps')] callbacks = cbks.CallbackList(callbacks) # it's possible to callback a different model than self: if hasattr(model, 'callback_model') and model.callback_model: callback_model = model.callback_model else: callback_model = model callbacks.set_model(callback_model) callbacks.set_params({ 'epochs': epochs, 'steps': steps_per_epoch, 'verbose': verbose, 'do_validation': do_validation, 'metrics': callback_metrics, }) callbacks.on_train_begin() enqueuer = None val_enqueuer = None try: if do_validation and not val_gen: # Prepare data for validation if len(validation_data) == 2: val_x, val_y = validation_data # pylint: disable=unpacking-non-sequence val_sample_weight = None elif len(validation_data) == 3: val_x, val_y, val_sample_weight = validation_data # pylint: disable=unpacking-non-sequence else: raise ValueError('`validation_data` should be a tuple ' '`(val_x, val_y, val_sample_weight)` ' 'or `(val_x, val_y)`. Found: ' + str(validation_data)) val_x, val_y, val_sample_weights = model._standardize_user_data( val_x, val_y, val_sample_weight) val_data = val_x + val_y + val_sample_weights if model.uses_learning_phase and not isinstance( K.learning_phase(), int): val_data += [0.] for cbk in callbacks: cbk.validation_data = val_data if workers > 0: if is_sequence: enqueuer = OrderedEnqueuer( generator, use_multiprocessing=use_multiprocessing, shuffle=shuffle) else: enqueuer = GeneratorEnqueuer( generator, use_multiprocessing=use_multiprocessing, wait_time=wait_time) enqueuer.start(workers=workers, max_queue_size=max_queue_size) output_generator = enqueuer.get() else: if is_sequence: output_generator = iter(generator) else: output_generator = generator callback_model.stop_training = False # Construct epoch logs. epoch_logs = {} while epoch < epochs: callbacks.on_epoch_begin(epoch) steps_done = 0 batch_index = 0 while steps_done < steps_per_epoch: generator_output = next(output_generator) if not hasattr(generator_output, '__len__'): raise ValueError('Output of generator should be ' 'a tuple `(x, y, sample_weight)` ' 'or `(x, y)`. Found: ' + str(generator_output)) if len(generator_output) == 2: x, y = generator_output sample_weight = None elif len(generator_output) == 3: x, y, sample_weight = generator_output else: raise ValueError('Output of generator should be ' 'a tuple `(x, y, sample_weight)` ' 'or `(x, y)`. Found: ' + str(generator_output)) # build batch logs batch_logs = {} if isinstance(x, list): batch_size = x[0].shape[0] elif isinstance(x, dict): batch_size = list(x.values())[0].shape[0] else: batch_size = x.shape[0] batch_logs['batch'] = batch_index batch_logs['size'] = batch_size callbacks.on_batch_begin(batch_index, batch_logs) outs = model.train_on_batch(x, y, sample_weight=sample_weight, class_weight=class_weight) if not isinstance(outs, list): outs = [outs] for l, o in zip(out_labels, outs): batch_logs[l] = o callbacks.on_batch_end(batch_index, batch_logs) batch_index += 1 steps_done += 1 # Epoch finished. if steps_done >= steps_per_epoch and do_validation: if val_gen: val_outs = evaluate_generator( model, validation_data, validation_steps, workers=workers, use_multiprocessing=use_multiprocessing, max_queue_size=max_queue_size) else: # No need for try/except because # data has already been validated. val_outs = model.evaluate( val_x, val_y, batch_size=batch_size, sample_weight=val_sample_weights, verbose=0) if not isinstance(val_outs, list): val_outs = [val_outs] # Same labels assumed. for l, o in zip(out_labels, val_outs): epoch_logs['val_' + l] = o if callback_model.stop_training: break callbacks.on_epoch_end(epoch, epoch_logs) epoch += 1 if callback_model.stop_training: break finally: try: if enqueuer is not None: enqueuer.stop() finally: if val_enqueuer is not None: val_enqueuer.stop() callbacks.on_train_end() return model.history