opts.num_style = 1 if opts.style != '' else opts.num_style # Setup model and data loader config['vgg_model_path'] = opts.output_path if opts.trainer == 'MUNIT': style_dim = config['gen']['style_dim'] trainer = MUNIT_Trainer(config) else: sys.exit("Only support MUNIT") try: state_dict = torch.load(opts.checkpoint) trainer.gen_a.load_state_dict(state_dict['a']) trainer.gen_b.load_state_dict(state_dict['b']) except: state_dict = pytorch03_to_pytorch04(torch.load(opts.checkpoint), opts.trainer) trainer.gen_a.load_state_dict(state_dict['a']) trainer.gen_b.load_state_dict(state_dict['b']) trainer.cuda() trainer.eval() encode = trainer.gen_a.encode if opts.a2b else trainer.gen_b.encode # encode function style_encode = trainer.gen_b.encode if opts.a2b else trainer.gen_a.encode # encode function decode = trainer.gen_b.decode if opts.a2b else trainer.gen_a.decode # decode function if 'new_size' in config: new_size = config['new_size'] else: if opts.a2b==1: new_size = config['new_size_a'] else:
trainer = UNIT_Trainer(config) else: sys.exit("Only support MUNIT|UNIT") ############## configure checkpoint from output_folder checkpoint_path = find_latest_model_file(os.path.join(opts.output_folder, 'checkpoints'), opts.checkpoint, keyword='gen') try: state_dict = torch.load(checkpoint_path) trainer.gen_a.load_state_dict(state_dict['a']) trainer.gen_b.load_state_dict(state_dict['b']) except: state_dict = pytorch03_to_pytorch04(torch.load(checkpoint_path)) trainer.gen_a.load_state_dict(state_dict['a']) trainer.gen_b.load_state_dict(state_dict['b']) trainer.cuda() trainer.eval() ##### Dataset building Dataset = choose_dataset(config['dataset_name']) dataset = Dataset(config['data_root'], config, split='val') dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, drop_last=True, shuffle=False,
sys.exit("Only support AttnMUNIT|MUNIT|UNIT") # if opts.trainer == 'MUNIT': # trainer = MUNIT_Trainer(config) # elif opts.trainer == 'UNIT': # trainer = UNIT_Trainer(config) # elif opts.trainer == 'AttnMUNIT': # trainer = AttnMUNIT_Trainer(config) # else: # sys.exit("Only support AttnMUNIT|MUNIT|UNIT") try: state_dict = torch.load(opts.checkpoints[count]) trainer.gen_a.load_state_dict(state_dict['a']) trainer.gen_b.load_state_dict(state_dict['b']) except: state_dict = pytorch03_to_pytorch04(torch.load(opts.checkpoints[count]), trainer_name) trainer.gen_a.load_state_dict(state_dict['a']) trainer.gen_b.load_state_dict(state_dict['b']) trainer.cuda() trainer.eval() encode = None style_encode = None decode = None style_decode = None if trainer_name in ['AttnMUNIT','MUNIT']: encode = trainer.gen_a.encode if opts.a2b else trainer.gen_b.encode # encode function style_encode = trainer.gen_b.encode if opts.a2b else trainer.gen_a.encode # encode function decode = trainer.gen_b.decode if opts.a2b else trainer.gen_a.decode # decode function style_decode = trainer.gen_a.decode if opts.a2b else trainer.gen_b.decode # decode function
config = get_config(opts.config) config['vgg_model_path'] = opts.output_path if not os.path.exists(opts.output_folder): os.makedirs(opts.output_folder) trainer = MUNIT_Trainer(config, device) try: state_dict = torch.load(opts.checkpoint) trainer.dis_a.load_state_dict(state_dict['a']) trainer.dis_b.load_state_dict(state_dict['b']) except: state_dict = pytorch03_to_pytorch04(torch.load(opts.checkpoint), 'MUNIT') trainer.dis_a.load_state_dict(state_dict['a']) trainer.dis_b.load_state_dict(state_dict['b']) dis = trainer.dis_b if opts.a2b else trainer.dis_a trainer.to(device) trainer.eval() transform_list = [transforms.Resize(config['crop_image_width'])] if opts.centercrop: transform_list.append(transforms.CenterCrop((config['crop_image_height'], config['crop_image_width']))) transform_list.extend([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) transform = transforms.Compose(transform_list) input_list = [ [],[],[] ]
config['vgg_model_path'] = opts.output_path if opts.trainer == 'MUNIT': style_dim = config['gen']['style_dim'] trainer = MUNIT_Trainer(config, device) elif opts.trainer == 'UNIT': trainer = UNIT_Trainer(config, device) else: sys.exit("Only support MUNIT|UNIT") try: state_dict = torch.load(opts.checkpoint) trainer.gen_a.load_state_dict(state_dict['a']) trainer.gen_b.load_state_dict(state_dict['b']) except: state_dict = pytorch03_to_pytorch04( torch.load(opts.checkpoint, map_location=lambda storage, loc: storage), opts.trainer) trainer.gen_a.load_state_dict(state_dict['a']) trainer.gen_b.load_state_dict(state_dict['b']) trainer.to(device=device) trainer.eval() encode = trainer.gen_a.encode if opts.a2b else trainer.gen_b.encode # encode function style_encode = trainer.gen_b.encode if opts.a2b else trainer.gen_a.encode # encode function decode = trainer.gen_b.decode if opts.a2b else trainer.gen_a.decode # decode function if 'new_size' in config: new_size = config['new_size'] else: if opts.a2b == 1: new_size = config['new_size_a']