def main(self, name, opts): logging.basicConfig(filename=opts.log_file, format='%(levelname)s (%(asctime)s): %(message)s') log = logging.getLogger(name) if opts.verbose: log.setLevel(logging.DEBUG) else: log.setLevel(logging.INFO) if opts.seed is not None: np.random.seed(opts.seed) random.seed(opts.seed) self.log = log self.opts = opts make_dir(opts.out_dir) log.info('Building model ...') model = self.build_model() model.summary() self.set_trainability(model) if opts.filter_weights: conv_layer = mod.get_first_conv_layer(model.layers) log.info('Initializing filters of %s ...' % conv_layer.name) self.init_filter_weights(opts.filter_weights, conv_layer) mod.save_model(model, os.path.join(opts.out_dir, 'model.json')) log.info('Computing output statistics ...') output_names = [] for output_layer in model.output_layers: output_names.append(output_layer.name) output_stats = OrderedDict() if opts.no_class_weights: class_weights = None else: class_weights = OrderedDict() for name in output_names: output = hdf.read(opts.train_files, 'outputs/%s' % name, nb_sample=opts.nb_train_sample) output = list(output.values())[0] output_stats[name] = get_output_stats(output) if class_weights is not None: class_weights[name] = get_output_class_weights(name, output) self.print_output_stats(output_stats) if class_weights: self.print_class_weights(class_weights) output_weights = None if opts.output_weights: log.info('Initializing output weights ...') output_weights = get_output_weights(output_names, opts.output_weights) print('Output weights:') for output_name in output_names: if output_name in output_weights: print('%s: %.2f' % (output_name, output_weights[output_name])) print() self.metrics = dict() for output_name in output_names: self.metrics[output_name] = get_metrics(output_name) optimizer = Adam(lr=opts.learning_rate) model.compile(optimizer=optimizer, loss=mod.get_objectives(output_names), loss_weights=output_weights, metrics=self.metrics) log.info('Loading data ...') replicate_names = dat.get_replicate_names(opts.train_files[0], regex=opts.replicate_names, nb_key=opts.nb_replicate) data_reader = mod.data_reader_from_model( model, replicate_names=replicate_names) nb_train_sample = dat.get_nb_sample(opts.train_files, opts.nb_train_sample) train_data = data_reader(opts.train_files, class_weights=class_weights, batch_size=opts.batch_size, nb_sample=nb_train_sample, shuffle=True, loop=True) if opts.val_files: nb_val_sample = dat.get_nb_sample(opts.val_files, opts.nb_val_sample) val_data = data_reader(opts.val_files, batch_size=opts.batch_size, nb_sample=nb_val_sample, shuffle=False, loop=True) else: val_data = None nb_val_sample = None log.info('Initializing callbacks ...') callbacks = self.get_callbacks() log.info('Training model ...') print() print('Training samples: %d' % nb_train_sample) if nb_val_sample: print('Validation samples: %d' % nb_val_sample) model.fit_generator(train_data, nb_train_sample, opts.nb_epoch, callbacks=callbacks, validation_data=val_data, nb_val_samples=nb_val_sample, max_q_size=opts.data_q_size, nb_worker=opts.data_nb_worker, verbose=0) print('\nTraining set performance:') print( format_table(self.perf_logger.epoch_logs, precision=LOG_PRECISION)) if self.perf_logger.val_epoch_logs: print('\nValidation set performance:') print( format_table(self.perf_logger.val_epoch_logs, precision=LOG_PRECISION)) # Restore model with highest validation performance filename = os.path.join(opts.out_dir, 'model_weights_val.h5') if os.path.isfile(filename): model.load_weights(filename) # Delete metrics since they cause problems when loading the model # from HDF5 file. Metrics can be loaded from json + weights file. model.metrics = None model.metrics_names = None model.metrics_tensors = None model.save(os.path.join(opts.out_dir, 'model.h5')) log.info('Done!') return 0
def main(self, name, opts): logging.basicConfig(filename=opts.log_file, format='%(levelname)s (%(asctime)s): %(message)s') log = logging.getLogger(name) if opts.verbose: log.setLevel(logging.DEBUG) else: log.setLevel(logging.INFO) if opts.seed is not None: np.random.seed(opts.seed) random.seed(opts.seed) self.log = log self.opts = opts make_dir(opts.out_dir) log.info('Building model ...') model = self.build_model() model.summary() self.set_trainability(model) if opts.filter_weights: conv_layer = mod.get_first_conv_layer(model.layers) log.info('Initializing filters of %s ...' % conv_layer.name) self.init_filter_weights(opts.filter_weights, conv_layer) mod.save_model(model, os.path.join(opts.out_dir, 'model.json')) log.info('Computing output statistics ...') output_names = model.output_names output_stats = OrderedDict() if opts.no_class_weights: class_weights = None else: class_weights = OrderedDict() for name in output_names: output = hdf.read(opts.train_files, 'outputs/%s' % name, nb_sample=opts.nb_train_sample) output = list(output.values())[0] output_stats[name] = get_output_stats(output) if class_weights is not None: class_weights[name] = get_output_class_weights(name, output) self.print_output_stats(output_stats) if class_weights: self.print_class_weights(class_weights) output_weights = None if opts.output_weights: log.info('Initializing output weights ...') output_weights = get_output_weights(output_names, opts.output_weights) print('Output weights:') for output_name in output_names: if output_name in output_weights: print('%s: %.2f' % (output_name, output_weights[output_name])) print() self.metrics = dict() for output_name in output_names: self.metrics[output_name] = get_metrics(output_name) optimizer = Adam(lr=opts.learning_rate) model.compile(optimizer=optimizer, loss=mod.get_objectives(output_names), loss_weights=output_weights, metrics=self.metrics) log.info('Loading data ...') replicate_names = dat.get_replicate_names( opts.train_files[0], regex=opts.replicate_names, nb_key=opts.nb_replicate) data_reader = mod.data_reader_from_model( model, replicate_names=replicate_names) nb_train_sample = dat.get_nb_sample(opts.train_files, opts.nb_train_sample) train_data = data_reader(opts.train_files, class_weights=class_weights, batch_size=opts.batch_size, nb_sample=nb_train_sample, shuffle=True, loop=True) if opts.val_files: nb_val_sample = dat.get_nb_sample(opts.val_files, opts.nb_val_sample) val_data = data_reader(opts.val_files, batch_size=opts.batch_size, nb_sample=nb_val_sample, shuffle=False, loop=True) else: val_data = None nb_val_sample = None log.info('Initializing callbacks ...') callbacks = self.get_callbacks() log.info('Training model ...') print() print('Training samples: %d' % nb_train_sample) if nb_val_sample: print('Validation samples: %d' % nb_val_sample) model.fit_generator( train_data, steps_per_epoch=nb_train_sample // opts.batch_size, epochs=opts.nb_epoch, callbacks=callbacks, validation_data=val_data, validation_steps=nb_val_sample // opts.batch_size, max_queue_size=opts.data_q_size, workers=opts.data_nb_worker, verbose=0) print('\nTraining set performance:') print(format_table(self.perf_logger.epoch_logs, precision=LOG_PRECISION)) if self.perf_logger.val_epoch_logs: print('\nValidation set performance:') print(format_table(self.perf_logger.val_epoch_logs, precision=LOG_PRECISION)) # Restore model with highest validation performance filename = os.path.join(opts.out_dir, 'model_weights_val.h5') if os.path.isfile(filename): model.load_weights(filename) # Delete metrics since they cause problems when loading the model # from HDF5 file. Metrics can be loaded from json + weights file. model.metrics = None model.metrics_names = None model.metrics_tensors = None model.save(os.path.join(opts.out_dir, 'model.h5')) log.info('Done!') return 0