def main(args): """ Evaluate all models in a user-defined directory against the DIV2K validation set. The results are written to a user-defined JSON file. All models in the input directory must have been trained for the same downgrade operator (bicubic or unknown) and the same scale (2, 3 or 4). """ mps = model_paths(args.indir) if mps: generator = fullsize_sequence(args.dataset, scale=args.scale, subset='valid', downgrade=args.downgrade) psnr_dict = {} for mp in mps: reset_session(args.gpu_memory_fraction) psnr = evaluate_model(mp, generator) logger.info('PSNR = %.4f for model %s', psnr, mp) psnr_dict[mp] = psnr logger.info('Write results to %s', args.outfile) with open(args.outfile, 'w') as f: json.dump(psnr_dict, f) best_psnr, best_model = select_best_psnr(psnr_dict) logger.info('Best PSNR = %.4f for model %s', best_psnr, best_model) else: logger.warning('No models found in %s', args.indir)
def main(args): train_dir, models_dir = create_train_workspace(args.outdir) write_args(train_dir, args) logger.info('Training workspace is %s', train_dir) training_generator = cropped_sequence(args.dataset, scale=args.scale, subset='train', downgrade=args.downgrade, image_ids=args.training_images, batch_size=args.batch_size) if args.benchmark: logger.info('Validation with DIV2K benchmark') validation_steps = len(args.validation_images) validation_generator = fullsize_sequence( args.dataset, scale=args.scale, subset='valid', downgrade=args.downgrade, image_ids=args.validation_images) else: logger.info( 'Validation with randomly cropped images from DIV2K validation set' ) validation_steps = args.validation_steps validation_generator = cropped_sequence( args.dataset, scale=args.scale, subset='valid', downgrade=args.downgrade, image_ids=args.validation_images, batch_size=args.batch_size) if args.initial_epoch: logger.info('Resume training of model %s', args.pretrained_model) model = _load_model(args.pretrained_model) else: if args.model == "edsr": loss = mean_absolute_error model = edsr.edsr(scale=args.scale, num_filters=args.num_filters, num_res_blocks=args.num_res_blocks, res_block_scaling=args.res_scaling) else: loss = mae model_fn = wdsr.wdsr_b if args.model == 'wdsr-b' else wdsr.wdsr_a model = model_fn(scale=args.scale, num_filters=args.num_filters, num_res_blocks=args.num_res_blocks, res_block_expansion=args.res_expansion, res_block_scaling=args.res_scaling) if args.weightnorm: model.compile( optimizer=wn.AdamWithWeightnorm(lr=args.learning_rate), loss=loss, metrics=[psnr]) if args.num_init_batches > 0: logger.info( 'Data-based initialization of weights with %d batches', args.num_init_batches) model_weightnorm_init(model, training_generator, args.num_init_batches) else: model.compile(optimizer=Adam(lr=args.learning_rate), loss=loss, metrics=[psnr]) if args.pretrained_model: logger.info( 'Initialization with weights from pre-trained model %s', args.pretrained_model) copy_weights(from_model=_load_model(args.pretrained_model), to_model=model) if args.print_model_summary: model.summary() callbacks = [ tensor_board(train_dir), learning_rate(step_size=args.learning_rate_step_size, decay=args.learning_rate_decay), model_checkpoint_after(args.save_models_after_epoch, models_dir, monitor='val_psnr', save_best_only=args.save_best_models_only or args.benchmark) ] model.fit_generator(training_generator, epochs=args.epochs, initial_epoch=args.initial_epoch, steps_per_epoch=args.steps_per_epoch, validation_data=validation_generator, validation_steps=validation_steps, use_multiprocessing=args.use_multiprocessing, max_queue_size=args.max_queue_size, workers=args.num_workers, callbacks=callbacks)