def __init__(self, config_text, norm_nc, label_nc, nhidden=128, separable_conv_norm='none'): super(MobileSPADE, self).__init__() assert config_text.startswith('spade') parsed = re.search(r'spade(\D+)(\d)x\d', config_text) param_free_norm_type = str(parsed.group(1)) ks = int(parsed.group(2)) if param_free_norm_type == 'instance': self.param_free_norm = nn.InstanceNorm2d(norm_nc, affine=False) elif param_free_norm_type == 'syncbatch': self.param_free_norm = SynchronizedBatchNorm2d(norm_nc, affine=False) elif param_free_norm_type == 'batch': self.param_free_norm = nn.BatchNorm2d(norm_nc, affine=False) else: raise ValueError('%s is not a recognized param-free norm type in SPADE' % param_free_norm_type) # The dimension of the intermediate embedding space. Yes, hardcoded. pw = ks // 2 self.mlp_shared = nn.Sequential( nn.Conv2d(label_nc, nhidden, kernel_size=ks, padding=pw), nn.ReLU() ) norm_layer = get_norm_layer(separable_conv_norm) self.mlp_gamma = SeparableConv2d(nhidden, norm_nc, kernel_size=ks, padding=pw, norm_layer=norm_layer) self.mlp_beta = SeparableConv2d(nhidden, norm_nc, kernel_size=ks, padding=pw, norm_layer=norm_layer)
def __init__(self): super(UnetNormalized, self).__init__(3, 3, 8, 64, norm_layer=networks.get_norm_layer('batch'), use_dropout=False)
def initialize(cls, opt, files): for file in files: model = GlobalGenerator(opt.input_nc, opt.output_nc, opt.ngf, opt.n_downsample_global, opt.n_blocks_global, get_norm_layer()) state = torch.load(file) model.load_state_dict(state) cls.models.append(model)
def main(): model = networks.NLayerDiscriminator( input_nc=3, ndf=64, norm_layer=networks.get_norm_layer(norm_type='instance'), use_sigmoid=False, gpu_ids=[0], ) out_file = osp.join(here, 'data/D_random.pth') torch.save(model.state_dict(), out_file) print('Saved model file: {:s}'.format(out_file))
def main(): default_model_file = osp.join(here, 'data/G_horse2zebra.pth') default_out_file = osp.join(here, 'logs/create_horse2zebra_pytorch.gif') parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('video_file', help='Video file of horse.') parser.add_argument('-g', '--gpu', type=int, default=0, help='GPU id.') parser.add_argument('-m', '--model-file', default=default_model_file, help='Model file.') parser.add_argument('-o', '--out-file', default=default_out_file, help='Output video file.') args = parser.parse_args() if args.gpu >= 0: os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu) print('GPU id: {:d}'.format(args.gpu)) print('Model file: {:s}'.format(args.model_file)) print('Video file: {:s}'.format(args.video_file)) print('Output file: {:s}'.format(args.out_file)) model = networks.ResnetGenerator( input_nc=3, output_nc=3, ngf=64, norm_layer=networks.get_norm_layer(norm_type='instance'), use_dropout=False, n_blocks=9, gpu_ids=[args.gpu], padding_type='reflect', ) model.load_state_dict(torch.load(args.model_file)) if torch.cuda.is_available(): model = model.cuda() model = model.eval() batch_size = 1 video = imageio.get_reader(args.video_file) writer = imageio.get_writer(args.out_file) for img in tqdm.tqdm(video): img_org = img.copy() img = cv2.resize(img, (256, 256)) xi = img.astype(np.float32) xi = (xi / 255 * 2) - 1 xi = xi.transpose(2, 0, 1) x = np.repeat(xi[None, :, :, :], batch_size, axis=0) x = torch.from_numpy(x) if torch.cuda.is_available(): x = x.cuda() x = Variable(x, volatile=True) y = model(x) yi = y[0].data yi = (yi + 1) / 2 * 255 yi = yi.cpu().numpy() yi = yi.transpose(1, 2, 0) out = yi.astype(np.uint8) out = cv2.resize(out, (img_org.shape[1], img_org.shape[0])) writer.append_data(np.hstack([img_org, out])) print('Wrote video: {:s}'.format(args.out_file))
def convert_D(D_model_file, out_file): output_nc = 3 D = D_NLayersMulti( input_nc=output_nc, ndf=64, n_layers=3, norm_layer=get_norm_layer('instance'), use_sigmoid=False, gpu_ids=[], num_D=2, ) D.load_state_dict(torch.load(D_model_file)) D_chainer = chainer_bicyclegan.models.D_NLayersMulti( input_nc=output_nc, ndf=64, n_layers=3, norm_layer='instance', use_sigmoid=False, num_D=2, ) def copyto(l2_list, l1_list): assert len(l2_list) == len(l1_list) for l1, l2 in zip(l1_list, l2_list): if isinstance(l2, (L.Convolution2D, L.Deconvolution2D, L.Linear)): np.copyto(l2.W.array, l1.weight.data.numpy()) np.copyto(l2.b.array, l1.bias.data.numpy()) elif isinstance(l2, InstanceNormalization): np.copyto(l2.avg_mean, l1.running_mean.numpy()) np.copyto(l2.avg_var, l1.running_var.numpy()) elif isinstance(l2, chainer_bicyclegan.models.BasicBlock): l2_list = l2.conv.functions l1_list = l1.conv copyto(l2_list, l1_list) l2_list = l2.shortcut.functions l1_list = l1.shortcut copyto(l2_list, l1_list) elif isinstance(l2, chainer_bicyclegan.models.Sequential): l2_list = l2.functions l1_list = l1 copyto(l2_list, l1_list) else: print('Skip: {} -> {}'.format(type(l1), type(l2))) continue print('Copy: {} -> {}'.format(type(l1), type(l2))) copyto(D_chainer.model.functions, D.model) chainer.serializers.save_npz(out_file, D_chainer) print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>') params = [] for param in D.parameters(): params.append(param.data.numpy().flatten()) params = np.hstack(params) print(params.min(), params.mean(), params.max()) print('==========================================================') params = [] for param in D_chainer.params(): params.append(param.array.flatten()) params = np.hstack(params) print(params.min(), params.mean(), params.max()) print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
def main(): default_model_file = osp.join(here, 'data/G_horse2zebra.pth') default_img_file = 'https://images2.onionstatic.com/clickhole/3570/2/original/600.jpg' # NOQA parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-g', '--gpu', type=int, default=0, help='GPU id.') parser.add_argument('-m', '--model-file', default=default_model_file, help='Model file.') parser.add_argument('-i', '--img-file', default=default_img_file, help='Image file.') args = parser.parse_args() if args.gpu >= 0: os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu) print('GPU id: {:d}'.format(args.gpu)) print('Model file: {:s}'.format(args.model_file)) print('Image file: {:s}'.format(args.img_file)) model = networks.ResnetGenerator( input_nc=3, output_nc=3, ngf=64, norm_layer=networks.get_norm_layer(norm_type='instance'), use_dropout=False, n_blocks=9, gpu_ids=[args.gpu], padding_type='reflect', ) model.load_state_dict(torch.load(args.model_file)) if torch.cuda.is_available(): model = model.cuda() model = model.eval() img = skimage.io.imread(args.img_file) batch_size = 3 img_org = img.copy() img = cv2.resize(img, (256, 256)) xi = img.astype(np.float32) xi = (xi / 255 * 2) - 1 xi = xi.transpose(2, 0, 1) x = np.repeat(xi[None, :, :, :], batch_size, axis=0) x = torch.from_numpy(x) if torch.cuda.is_available(): x = x.cuda() x = Variable(x, volatile=True) y = model(x) yi = y[0].data yi = (yi + 1) / 2 * 255 yi = yi.cpu().numpy() yi = yi.transpose(1, 2, 0) out = yi.astype(np.uint8) out = cv2.resize(out, (img_org.shape[1], img_org.shape[0])) plt.figure(figsize=(12, 6)) plt.subplot(121) plt.imshow(img_org) plt.title('Input (PyTorch)') plt.subplot(122) plt.imshow(out) plt.title('Output (PyTorch)') plt.show()
gpu = args.gpu output_nc = 3 img_file = osp.join(here, 'data/edges2shoes_val_100_AB.jpg') D_model_file = osp.join(here, 'data/edges2shoes_net_D.pth') print('GPU id: %d' % gpu) print('D model: %s' % D_model_file) print('Input file: %s' % img_file) os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu) D = D_NLayersMulti( input_nc=output_nc, ndf=64, n_layers=3, norm_layer=get_norm_layer('instance'), use_sigmoid=False, gpu_ids=[], num_D=2, ) D.load_state_dict(torch.load(D_model_file)) D.cuda() img = skimage.io.imread(img_file) H, W = img.shape[:2] real_A = img[:, :W // 2, :] real_A = real_A[:, :, 0:1] # edges real_B = img[:, W // 2:, :] # shoes xi_A = real_A.astype(np.float32) / 255. * 2 - 1 x_A = xi_A.transpose(2, 0, 1)[None]
def convert_G(nz, output_nc): G_model_file = osp.join(here, 'data/edges2shoes_net_G.pth') G = G_Unet_add_all( input_nc=1, output_nc=output_nc, nz=nz, num_downs=8, ngf=64, norm_layer=get_norm_layer('instance'), nl_layer=get_non_linearity('relu'), use_dropout=True, gpu_ids=[], upsample='basic', ) G.load_state_dict(torch.load(G_model_file)) G_chainer = chainer_bicyclegan.models.G_Unet_add_all( input_nc=1, output_nc=output_nc, nz=nz, num_downs=8, ngf=64, norm_layer='instance', nl_layer='relu', use_dropout=True, upsample='basic', ) def copyto(l2_list, l1_list): assert len(l1_list) == len(l2_list) for l1, l2 in zip(l1_list, l2_list): if isinstance(l2, (L.Convolution2D, L.Deconvolution2D)): np.copyto(l2.W.array, l1.weight.data.numpy()) np.copyto(l2.b.array, l1.bias.data.numpy()) elif isinstance(l2, InstanceNormalization): np.copyto(l2.avg_mean, l1.running_mean.numpy()) np.copyto(l2.avg_var, l1.running_var.numpy()) else: print('Skip: {} -> {}'.format(type(l1), type(l2))) continue print('Copy: {} -> {}'.format(type(l1), type(l2))) unet = G.model unet_chainer = G_chainer.model while True: print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>') print(unet) l1_list = unet.down l2_list = unet_chainer.down.functions copyto(l2_list, l1_list) l1_list = unet.up l2_list = unet_chainer.up.functions copyto(l2_list, l1_list) print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<') if unet.submodule is None: assert unet_chainer.submodule is None break unet = unet.submodule unet_chainer = unet_chainer.submodule out_file = osp.join(here, 'data/edges2shoes_net_G_from_chainer.npz') chainer.serializers.save_npz(out_file, G_chainer) print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>') params = [] for param in G.parameters(): params.append(param.data.numpy().flatten()) params = np.hstack(params) print(params.min(), params.mean(), params.max()) print('==========================================================') params = [] for param in G_chainer.params(): params.append(param.array.flatten()) params = np.hstack(params) print(params.min(), params.mean(), params.max()) print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
def convert_E(nz, output_nc): E_model_file = osp.join(here, 'data/edges2shoes_net_E.pth') E = E_ResNet( input_nc=output_nc, output_nc=nz, ndf=64, n_blocks=5, norm_layer=get_norm_layer('instance'), nl_layer=get_non_linearity('lrelu'), gpu_ids=[], vaeLike=True, ) E.load_state_dict(torch.load(E_model_file)) E_chainer = chainer_bicyclegan.models.E_ResNet( input_nc=output_nc, output_nc=nz, ndf=64, n_blocks=5, norm_layer='instance', nl_layer='lrelu', vaeLike=True, ) def copyto(l2_list, l1_list): assert len(l2_list) == len(l1_list) for l1, l2 in zip(l1_list, l2_list): if isinstance(l2, (L.Convolution2D, L.Deconvolution2D, L.Linear)): np.copyto(l2.W.array, l1.weight.data.numpy()) np.copyto(l2.b.array, l1.bias.data.numpy()) elif isinstance(l2, InstanceNormalization): np.copyto(l2.avg_mean, l1.running_mean.numpy()) np.copyto(l2.avg_var, l1.running_var.numpy()) elif isinstance(l2, chainer_bicyclegan.models.BasicBlock): l2_list = l2.conv.functions l1_list = l1.conv copyto(l2_list, l1_list) l2_list = l2.shortcut.functions l1_list = l1.shortcut copyto(l2_list, l1_list) elif isinstance(l2, chainer_bicyclegan.models.Sequential): l2_list = l2.functions l1_list = l1 copyto(l2_list, l1_list) else: print('Skip: {} -> {}'.format(type(l1), type(l2))) continue print('Copy: {} -> {}'.format(type(l1), type(l2))) print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>') print(E.fc) l1_list = E.fc l2_list = E_chainer.fc.functions copyto(l2_list, l1_list) print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<') print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>') print(E.fcVar) l1_list = E.fcVar l2_list = E_chainer.fcVar.functions copyto(l2_list, l1_list) print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<') print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>') print(E.conv) l1_list = E.conv l2_list = E_chainer.conv.functions copyto(l2_list, l1_list) print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<') out_file = osp.join(here, 'data/edges2shoes_net_E_from_chainer.npz') chainer.serializers.save_npz(out_file, E_chainer) print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>') params = [] for param in E.parameters(): params.append(param.data.numpy().flatten()) params = np.hstack(params) print(params.min(), params.mean(), params.max()) print('==========================================================') params = [] for param in E_chainer.params(): params.append(param.array.flatten()) params = np.hstack(params) print(params.min(), params.mean(), params.max()) print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')