def __init__(self, monitor='val_loss', patience=0,verbose=0, mode='auto', decayRatio=0.1): super(Callback, self).__init__() self.monitor = monitor self.patience = patience self.verbose = verbose self.wait = 0 self.decayRatio = decayRatio if mode not in ['auto', 'min', 'max']: warnings.warn('Mode %s is unknown, ' 'fallback to auto mode.' % (self.mode), RuntimeWarning) mode = 'auto' if mode == 'min': self.monitor_op = np.less self.best = np.Inf elif mode == 'max': self.monitor_op = np.greater self.best = -np.Inf else: if 'acc' in self.monitor: self.monitor_op = np.greater self.best = -np.Inf else: self.monitor_op = np.less self.best = np.Inf
def __init__(self, filepath, base_model, monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=1): super(MultiGPUCheckpointCallback, self).__init__() self.base_model = base_model self.monitor = monitor self.verbose = verbose self.filepath = filepath self.save_best_only = save_best_only self.save_weights_only = save_weights_only self.period = period self.epochs_since_last_save = 0 if mode not in ['auto', 'min', 'max']: warnings.warn('ModelCheckpoint mode %s is unknown, ' 'fallback to auto mode.' % (mode), RuntimeWarning) mode = 'auto' if mode == 'min': self.monitor_op = np.less self.best = np.Inf elif mode == 'max': self.monitor_op = np.greater self.best = -np.Inf else: if 'acc' in self.monitor or self.monitor.startswith('fmeasure'): self.monitor_op = np.greater self.best = -np.Inf else: self.monitor_op = np.less self.best = np.Inf
def on_epoch_end(self, epoch, logs=None): logs = logs or {} self.epochs_since_last_save += 1 if self.epochs_since_last_save >= self.period: self.epochs_since_last_save = 0 filepath = self.filepath.format(epoch=epoch + 1, **logs) if self.save_best_only: current = logs.get(self.monitor) if current is None: warnings.warn('Can save best model only with %s available, ' 'skipping.' % (self.monitor), RuntimeWarning) else: if self.monitor_op(current, self.best): if self.verbose > 0: print('Epoch %05d: %s improved from %0.5f to %0.5f,' ' saving model to %s' % (epoch + 1, self.monitor, self.best, current, filepath)) self.best = current if self.save_weights_only: self.model.save_weights(filepath, overwrite=True) else: self.model.save(filepath, overwrite=True) else: if self.verbose > 0: print('Epoch %05d: %s did not improve' % (epoch + 1, self.monitor)) else: if self.verbose > 0: print('Epoch %05d: saving model to %s' % (epoch + 1, filepath)) if self.save_weights_only: self.model.save_weights(filepath, overwrite=True) else: self.model.save(filepath, overwrite=True)
def on_epoch_end(self, epoch, logs={}): current = logs.get(self.monitor) current_lr = K.get_value(self.model.optimizer.lr) print(" \nLearning rate:", current_lr) if current is None: warnings.warn( 'AdvancedLearnignRateScheduler' ' requires %s available!' % (self.monitor), RuntimeWarning) if self.monitor_op(current, self.best): self.best = current self.wait = 0 else: if self.wait >= self.patience: assert hasattr(self.model.optimizer, 'lr'), \ 'Optimizer must have a "lr" attribute.' current_lr = K.get_value(self.model.optimizer.lr) new_lr = current_lr * self.decayRatio if self.verbose > 0: print(' \nEpoch %05d: reducing learning rate' % (epoch)) sys.stderr.write(' \nnew lr: %.5f\n' % new_lr) K.set_value(self.model.optimizer.lr, new_lr) self.wait = 0 self.wait += 1
def on_epoch_end(self, epoch, logs=None): logs = logs or {} self.epochs_since_last_save += 1 if self.epochs_since_last_save >= self.period: self.epochs_since_last_save = 0 filepath = self.filepath.format(epoch=epoch + 1, **logs) if self.save_best_only: current = logs.get(self.monitor) if current is None: warnings.warn( 'Can save best model only with %s available, ' 'skipping.' % (self.monitor), RuntimeWarning) else: if self.monitor_op(current, self.best): if self.verbose > 0: print( 'Epoch %05d: %s improved from %0.5f to %0.5f,' ' saving model to %s' % (epoch + 1, self.monitor, self.best, current, filepath)) self.best = current # if self.save_weights_only: # self.base_model.save_weights(filepath, overwrite=True) # else: # self.base_model.save(filepath, overwrite=True) ##################################### # this is used for avoid error: # OSError: Unable to create file # (unable to lock file, errno = 11, error message = 'Resource temporarily unavailable') # The error occurs because h5py allows for only one instance with write access on a file. saved_correctly = False while not saved_correctly: try: if self.save_weights_only: self.base_model.save_weights( filepath, overwrite=True) else: self.base_model.save(filepath, overwrite=True) print('Model saved successfully.') saved_correctly = True except Exception as error: print( 'Error while trying to save the model: {}.\nTrying again...' .format(error)) sleep(5) else: if self.verbose > 0: print('Epoch %05d: %s did not improve' % (epoch + 1, self.monitor)) else: if self.verbose > 0: print('Epoch %05d: saving model to %s' % (epoch + 1, filepath)) if self.save_weights_only: self.base_model.save_weights(filepath, overwrite=True) else: self.base_model.save(filepath, overwrite=True)
def on_epoch_end(self, epoch, logs={}): current = logs.get(self.monitor) if current is None: return warnings.warn("Early stopping requires %s available!" % self.monitor, RuntimeWarning) if current >= self.value: if self.verbose > 0: print("Epoch %05d: early stopping THR" % epoch) self.model.stop_training = True