示例#1
0
    def callbacks(self, model):
        opts = self.opts
        cbacks = []

        cbacks.append(cb.ProgressLogger())
        cbacks.append(cb.EarlyStopping(patience=opts.early_stop, verbose=1))
        if opts.max_time is not None:
            cbacks.append(cb.Timer(opts.max_time * 3600 * 0.8))

        h = ModelCheckpoint(pt.join(opts.out_dir, 'model_weights_last.h5'),
                            save_best_only=False)
        cbacks.append(h)
        h = ModelCheckpoint(pt.join(opts.out_dir, 'model_weights.h5'),
                            save_best_only=True,
                            verbose=1)
        cbacks.append(h)

        def lr_schedule():
            old_lr = model.optimizer.lr.get_value()
            new_lr = old_lr * opts.lr_decay
            model.optimizer.lr.set_value(new_lr)
            print('Learning rate dropped from %g to %g' % (old_lr, new_lr))

        h = cb.LearningRateScheduler(lr_schedule, patience=opts.lr_schedule)
        cbacks.append(h)

        def save_lc():
            log = {
                'lc.csv': perf_logger.frame(),
                'lc_batch.csv': perf_logger.batch_frame()
            }
            for k, v in log.items():
                with open(pt.join(opts.out_dir, k), 'w') as f:
                    f.write(perf_logs_str(v))

        perf_logger = cb.PerformanceLogger(callbacks=[save_lc])
        cbacks.append(perf_logger)

        return cbacks
示例#2
0
    def get_callbacks(self):
        opts = self.opts
        callbacks = []

        if opts.val_files:
            callbacks.append(
                kcbk.EarlyStopping('val_loss' if opts.val_files else 'loss',
                                   patience=opts.early_stopping,
                                   verbose=1))

        callbacks.append(
            kcbk.ModelCheckpoint(os.path.join(opts.out_dir,
                                              'model_weights_train.h5'),
                                 save_best_only=False))
        monitor = 'val_loss' if opts.val_files else 'loss'
        callbacks.append(
            kcbk.ModelCheckpoint(os.path.join(opts.out_dir,
                                              'model_weights_val.h5'),
                                 monitor=monitor,
                                 save_best_only=True,
                                 verbose=1))

        max_time = int(opts.max_time * 3600) if opts.max_time else None
        callbacks.append(
            cbk.TrainingStopper(max_time=max_time,
                                stop_file=opts.stop_file,
                                verbose=1))

        def learning_rate_schedule(epoch):
            lr = opts.learning_rate * opts.learning_rate_decay**epoch
            print('Learning rate: %.3g' % lr)
            return lr

        callbacks.append(kcbk.LearningRateScheduler(learning_rate_schedule))

        def save_lc(epoch, epoch_logs, val_epoch_logs):
            logs = {'lc_train.csv': epoch_logs, 'lc_val.csv': val_epoch_logs}
            for name, logs in six.iteritems(logs):
                if not logs:
                    continue
                logs = pd.DataFrame(logs)
                with open(os.path.join(opts.out_dir, name), 'w') as f:
                    f.write(perf_logs_str(logs))

        metrics = OrderedDict()
        for metric_funs in six.itervalues(self.metrics):
            for metric_fun in metric_funs:
                metrics[metric_fun.__name__] = True
        metrics = ['loss'] + list(metrics.keys())

        self.perf_logger = cbk.PerformanceLogger(
            callbacks=[save_lc],
            metrics=metrics,
            precision=LOG_PRECISION,
            verbose=not opts.no_log_outputs)
        callbacks.append(self.perf_logger)

        if K._BACKEND == 'tensorflow':
            callbacks.append(
                kcbk.TensorBoard(log_dir=opts.out_dir,
                                 histogram_freq=1,
                                 write_graph=True,
                                 write_images=True))

        return callbacks
示例#3
0
    def get_callbacks(self): 
        #callbacks, a function in keras, used to view the internal statistics and states during the training process
        opts = self.opts
        callbacks = [] #define callbacks arguments

        if opts.val_files:
            callbacks.append(kcbk.EarlyStopping( #top training when a monitored quantity has stopped improving
                'val_loss' if opts.val_files else 'loss',  #val_loss and loss are the quantity to moniter
                patience=opts.early_stopping,   #the number of epochs model can tolerate without improve
                verbose=1 #verbosity mode
            ))

 #keras.callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=0, 
 #save_best_only=False, save_weights_only=False, mode='auto', period=1)
            
        callbacks.append(kcbk.ModelCheckpoint( #save model after every epoch
            os.path.join(opts.out_dir, 'model_weights_train.h5'),  #filepath, save the model
            save_best_only=False)) #the lastest best model based on monitered quantity will be overwritten
        
        monitor = 'val_loss' if opts.val_files else 'loss'
        
        callbacks.append(kcbk.ModelCheckpoint(
            os.path.join(opts.out_dir, 'model_weights_val.h5'),  #filepath
            monitor=monitor,  #quantity to moniter
            save_best_only=True, verbose=1  # latest best model according to the quantity monitored will not be overwritten.
        ))

        max_time = int(opts.max_time * 3600) if opts.max_time else None
        callbacks.append(cbk.TrainingStopper( #Stop training after certain time or when file is detected.
            max_time=max_time, #Maximum training time in seconds
            stop_file=opts.stop_file, #Name of stop file that triggers the end of training when existing
            verbose=1 #If `True`, log message when training is stopped.
        ))

        def learning_rate_schedule(epoch): #calcualte learning rate schedule by input rate and decay rate
            lr = opts.learning_rate * opts.learning_rate_decay**epoch
            print('Learning rate: %.3g' % lr)
            return lr

        callbacks.append(kcbk.LearningRateScheduler(learning_rate_schedule)) #a function that takes an epoch index as input 
        #(integer, indexed from 0) and current learning rate and returns a new learning rate as output (float).

        def save_lc(epoch, epoch_logs, val_epoch_logs):
            logs = {'lc_train.tsv': epoch_logs,
                    'lc_val.tsv': val_epoch_logs}
            for name, logs in six.iteritems(logs): #Returns an iterator over dictionary‘s items.
                if not logs:
                    continue
                logs = pd.DataFrame(logs)
                with open(os.path.join(opts.out_dir, name), 'w') as f:
                    f.write(perf_logs_str(logs))

        metrics = OrderedDict()
        for metric_funs in six.itervalues(self.metrics): #Returns an iterator over dictionary‘s values.
            for metric_fun in metric_funs:
                metrics[metric_fun.__name__] = True
        metrics = ['loss'] + list(metrics.keys())

        self.perf_logger = cbk.PerformanceLogger( #Logs performance metrics during training.
            callbacks=[save_lc], #List of functions with parameters `epoch`, `epoch_logs`, and
        #`val_epoch_logs` that are called at the end of each epoch.
            metrics=metrics, #Name of metrics to be logged.
            precision=LOG_PRECISION, # Floating point precision. as defined: 4
            verbose=not opts.no_log_outputs #If `True`, log performance metrics of individual outputs.
        )
        callbacks.append(self.perf_logger)

        if K._BACKEND == 'tensorflow' and not opts.no_tensorboard:
            callbacks.append(kcbk.TensorBoard( #TensorBoard basic visualizations, which is visualization tool
                log_dir=opts.out_dir, #the path of the directory where to save the log files to be parsed by TensorBoard.
                histogram_freq=0, #frequency (in epochs) at which to compute activation and weight histograms for the layers of the model.
                write_graph=True, #whether to visualize the graph in TensorBoard.
                write_images=True #whether to write model weights to visualize as image in TensorBoard.
            ))

        return callbacks