Example #1
0
File: test.py Project: zergey/MUNIT
def main(argv):
    (opts, args) = parser.parse_args(argv)
    torch.manual_seed(opts.seed)
    torch.cuda.manual_seed(opts.seed)
    if not os.path.exists(opts.output_folder):
        os.makedirs(opts.output_folder)

    # Load experiment setting
    config = get_config(opts.config)
    style_dim = config['gen']['style_dim']
    opts.num_style = 1 if opts.style != '' else opts.num_style

    # Setup model and data loader
    trainer = MUNIT_Trainer(config)
    state_dict = torch.load(opts.checkpoint)
    trainer.gen_a.load_state_dict(state_dict['a'])
    trainer.gen_b.load_state_dict(state_dict['b'])
    trainer.cuda()
    trainer.eval()
    encode = trainer.gen_a.encode if opts.a2b else trainer.gen_b.encode  # encode function
    style_encode = trainer.gen_b.encode if opts.a2b else trainer.gen_a.encode  # encode function
    decode = trainer.gen_b.decode if opts.a2b else trainer.gen_a.decode  # decode function

    transform = transforms.Compose([
        transforms.Resize(config['new_size']),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])
    image = Variable(transform(Image.open(
        opts.input).convert('RGB')).unsqueeze(0).cuda(),
                     volatile=True)
    style_image = Variable(transform(Image.open(
        opts.style).convert('RGB')).unsqueeze(0).cuda(),
                           volatile=True) if opts.style != '' else None

    # Start testing
    style_rand = Variable(torch.randn(opts.num_style, style_dim, 1, 1).cuda(),
                          volatile=True)
    content, _ = encode(image)
    if opts.style != '':
        _, style = style_encode(style_image)
    else:
        style = style_rand
    for j in range(opts.num_style):
        s = style[j].unsqueeze(0)
        outputs = decode(content, s)
        outputs = (outputs + 1) / 2.
        path = os.path.join(opts.output_folder, 'output{:03d}.jpg'.format(j))
        vutils.save_image(outputs.data, path, padding=0, normalize=True)
    if not opts.output_only:
        # also save input images
        vutils.save_image(image.data,
                          os.path.join(opts.output_folder, 'input.jpg'),
                          padding=0,
                          normalize=True)
Example #2
0
def main(argv):
    (opts, args) = parser.parse_args(argv)
    torch.manual_seed(opts.seed)
    torch.cuda.manual_seed(opts.seed)
    if not os.path.exists(opts.output_folder):
        os.makedirs(opts.output_folder)

    # Load experiment setting
    config = get_config(opts.config)
    input_dim = config['new_size'] if opts.a2b else config['input_dim_b']
    style_dim = config['gen']['style_dim']

    # Setup model and data loader
    data_loader = get_data_loader_folder(opts.input_folder,
                                         1,
                                         False,
                                         input_dim == 1,
                                         crop=False)
    trainer = MUNIT_Trainer(config)
    state_dict = torch.load(opts.checkpoint)
    trainer.gen_a.load_state_dict(state_dict['a'])
    trainer.gen_b.load_state_dict(state_dict['b'])
    trainer.cuda()
    trainer.eval()
    encode = trainer.gen_a.encode if opts.a2b else trainer.gen_b.encode  # encode function
    decode = trainer.gen_b.decode if opts.a2b else trainer.gen_a.decode  # decode function

    # Start testing
    style_fixed = Variable(torch.randn(opts.num_style, style_dim, 1, 1).cuda(),
                           volatile=True)
    for i, images in enumerate(data_loader):
        images = Variable(images.cuda(), volatile=True)
        content, _ = encode(images)
        style = style_fixed if opts.synchronized else Variable(
            torch.randn(opts.num_style, style_dim, 1, 1).cuda(), volatile=True)
        for j in range(opts.num_style):
            s = style[j].unsqueeze(0)
            outputs = decode(content, s)
            outputs = (outputs + 1) / 2.
            path = os.path.join(opts.output_folder,
                                'input{:03d}_output{:03d}.jpg'.format(i, j))
            vutils.save_image(outputs.data, path, padding=0, normalize=True)
        if not opts.output_only:
            # also save input images
            vutils.save_image(images.data,
                              os.path.join(opts.output_folder,
                                           'input{:03d}.jpg'.format(i)),
                              padding=0,
                              normalize=True)
Example #3
0
    style_dim = config['gen']['style_dim']
    trainer = MUNIT_Trainer(config)
else:
    sys.exit("Only support MUNIT")

try:
    state_dict = torch.load(opts.checkpoint)
    trainer.gen_a.load_state_dict(state_dict['a'])
    trainer.gen_b.load_state_dict(state_dict['b'])
except:
    state_dict = pytorch03_to_pytorch04(torch.load(opts.checkpoint), opts.trainer)
    trainer.gen_a.load_state_dict(state_dict['a'])
    trainer.gen_b.load_state_dict(state_dict['b'])

trainer.cuda()
trainer.eval()
encode = trainer.gen_a.encode if opts.a2b else trainer.gen_b.encode # encode function
style_encode = trainer.gen_b.encode if opts.a2b else trainer.gen_a.encode # encode function
decode = trainer.gen_b.decode if opts.a2b else trainer.gen_a.decode # decode function

if 'new_size' in config:
    new_size = config['new_size']
else:
    if opts.a2b==1:
        new_size = config['new_size_a']
    else:
        new_size = config['new_size_b']

transform = transforms.Compose([transforms.Resize(new_size),
                                transforms.ToTensor(),
                                transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
Example #4
0
config['vgg_model_path'] = opts.output_path
if opts.trainer == 'MUNIT':
    style_dim = config['gen']['style_dim']
    trainer = MUNIT_Trainer(config)
elif opts.trainer == 'UNIT':
    trainer = UNIT_Trainer(config)
else:
    sys.exit("Only support MUNIT|UNIT")


state_dict = torch.load(opts.checkpoint)
trainer.gen_a.load_state_dict(state_dict['a'])
trainer.gen_b.load_state_dict(state_dict['b'])
trainer.cuda()
trainer.eval()
encode = trainer.gen_a.encode if opts.a2b else trainer.gen_b.encode # encode function
decode = trainer.gen_b.decode if opts.a2b else trainer.gen_a.decode # decode function

if opts.trainer == 'MUNIT':
    # Start testing
    style_fixed = Variable(torch.randn(opts.num_style, style_dim, 1, 1).cuda(), volatile=True)
    for i, (images, names) in enumerate(zip(data_loader,image_names)):
        print(names[1])
        images = Variable(images.cuda(), volatile=True)
        content, _ = encode(images)
        style = style_fixed if opts.synchronized else Variable(torch.randn(opts.num_style, style_dim, 1, 1).cuda(), volatile=True)
        for j in range(opts.num_style):
            s = style[j].unsqueeze(0)
            outputs = decode(content, s)
            outputs = (outputs + 1) / 2.