コード例 #1
0
elif opts.trainer == 'UNIT':
    trainer = UNIT_Trainer(config)
else:
    sys.exit("Only support MUNIT|UNIT")

try:
    state_dict = torch.load(opts.checkpoint)
    trainer.gen_a.load_state_dict(state_dict['a'])
    trainer.gen_b.load_state_dict(state_dict['b'])
except:
    state_dict = pytorch03_to_pytorch04(torch.load(opts.checkpoint),
                                        opts.trainer)
    trainer.gen_a.load_state_dict(state_dict['a'])
    trainer.gen_b.load_state_dict(state_dict['b'])

trainer.cuda()
trainer.eval()
encode = trainer.gen_a.encode if opts.a2b else trainer.gen_b.encode  # encode function
style_encode = trainer.gen_b.encode if opts.a2b else trainer.gen_a.encode  # encode function
decode = trainer.gen_b.decode if opts.a2b else trainer.gen_a.decode  # decode function

if 'new_size' in config:
    new_size = config['new_size']
else:
    if opts.a2b == 1:
        new_size = config['new_size_a']
    else:
        new_size = config['new_size_b']

with torch.no_grad():
    transform = transforms.Compose([
コード例 #2
0
ファイル: test_batch.py プロジェクト: phonx/MUNIT
data_loader = get_data_loader_folder(opts.input_folder, 1, False, new_size=config['new_size_a'], crop=False)

config['vgg_model_path'] = opts.output_path
if opts.trainer == 'MUNIT':
    style_dim = config['gen']['style_dim']
    trainer = MUNIT_Trainer(config)
elif opts.trainer == 'UNIT':
    trainer = UNIT_Trainer(config)
else:
    sys.exit("Only support MUNIT|UNIT")


state_dict = torch.load(opts.checkpoint)
trainer.gen_a.load_state_dict(state_dict['a'])
trainer.gen_b.load_state_dict(state_dict['b'])
trainer.cuda()
trainer.eval()
encode = trainer.gen_a.encode if opts.a2b else trainer.gen_b.encode # encode function
decode = trainer.gen_b.decode if opts.a2b else trainer.gen_a.decode # decode function

if opts.trainer == 'MUNIT':
    # Start testing
    style_fixed = Variable(torch.randn(opts.num_style, style_dim, 1, 1).cuda(), volatile=True)
    for i, (images, names) in enumerate(zip(data_loader,image_names)):
        print(names[1])
        images = Variable(images.cuda(), volatile=True)
        content, _ = encode(images)
        style = style_fixed if opts.synchronized else Variable(torch.randn(opts.num_style, style_dim, 1, 1).cuda(), volatile=True)
        for j in range(opts.num_style):
            s = style[j].unsqueeze(0)
            outputs = decode(content, s)
コード例 #3
0
def main(argv):
    (opts, args) = parser.parse_args(argv)
    cudnn.benchmark = True

    # Load experiment setting
    config = get_config(opts.config)
    max_iter = config['max_iter']

    # Setup logger and output folders
    output_subfolders = prepare_logging_folders(config['output_root'],
                                                config['experiment_name'])
    logger = create_logger(
        os.path.join(output_subfolders['logs'], 'train_log.log'))
    shutil.copy(opts.config,
                os.path.join(
                    output_subfolders['logs'],
                    'config.yaml'))  # copy config file to output folder

    tb_logger = tensorboard_logger.Logger(output_subfolders['logs'])

    logger.info('============ Initialized logger ============')
    logger.info('Config File: {}'.format(opts.config))

    # Setup model and data loader
    trainer = MUNIT_Trainer(config, opts)
    trainer.cuda()
    loaders = get_all_data_loaders(config)
    val_display_images = next(iter(loaders['val']))
    logger.info('Test images: {}'.format(val_display_images['A_paths']))

    # Start training
    iterations = trainer.resume(opts.model_path,
                                hyperparameters=config) if opts.resume else 0

    while True:
        for it, images in enumerate(loaders['train']):
            trainer.update_learning_rate()
            images_a = images['A']
            images_b = images['B']

            images_a, images_b = Variable(images_a.cuda()), Variable(
                images_b.cuda())

            # Main training code
            trainer.dis_update(images_a, images_b, config)
            trainer.gen_update(images_a, images_b, config)

            # Dump training stats in log file
            if (iterations + 1) % config['log_iter'] == 0:
                for tag, value in trainer.loss.items():
                    tb_logger.scalar_summary(tag, value, iterations)

                val_output_imgs = trainer.sample(
                    Variable(val_display_images['A'].cuda()),
                    Variable(val_display_images['B'].cuda()))

                tb_imgs = []
                for imgs in val_output_imgs.values():
                    tb_imgs.append(torch.cat(torch.unbind(imgs, 0), dim=2))

                tb_logger.image_summary(list(val_output_imgs.keys()), tb_imgs,
                                        iterations)

            if (iterations + 1) % config['print_iter'] == 0:
                logger.info(
                    "Iteration: {:08}/{:08} Discriminator Loss: {:.4f} Generator Loss: {:.4f}"
                    .format(iterations + 1, max_iter, trainer.loss['D/total'],
                            trainer.loss['G/total']))

            # Write images
            # if (iterations + 1) % config['image_save_iter'] == 0:
            #     val_output_imgs = trainer.sample(
            #         Variable(val_display_images['A'].cuda()),
            #         Variable(val_display_images['B'].cuda()))
            #
            #     for key, imgs in val_output_imgs.items():
            #         key = key.replace('/', '_')
            #         write_images(imgs, config['display_size'], '{}/{}_{:08}.jpg'.format(output_subfolders['images'], key, iterations+1))
            #
            #     logger.info('Saved images to: {}'.format(output_subfolders['images']))

            # Save network weights
            if (iterations + 1) % config['snapshot_save_iter'] == 0:
                trainer.save(output_subfolders['models'], iterations)

            iterations += 1
            if iterations >= max_iter:
                return