def __init__(self, downs, n_res, input_dim, dim, norm, activ, pad_type): super(ContentEncoder, self).__init__() self.model = [] self.model += [ Conv2dBlock(input_dim, dim, 7, 1, 3, norm=norm, activation=activ, pad_type=pad_type) ] for i in range(downs): self.model += [ Conv2dBlock(dim, 2 * dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type) ] dim *= 2 self.model += [ ResBlocks(n_res, dim, norm=norm, activation=activ, pad_type=pad_type) ] self.model = nn.Sequential(*self.model) self.output_dim = dim
def __init__(self, ups, n_res, dim, out_dim, res_norm, activ, pad_type): super(Decoder, self).__init__() self.model = [] self.model += [ ResBlocks(n_res, dim, res_norm, activ, pad_type=pad_type) ] for i in range(ups): self.model += [ nn.Upsample(scale_factor=2), Conv2dBlock(dim, dim // 2, 5, 1, 2, norm='in', activation=activ, pad_type=pad_type) ] dim //= 2 self.model += [ Conv2dBlock(dim, out_dim, 7, 1, 3, norm='none', activation='tanh', pad_type=pad_type) ] self.model = nn.Sequential(*self.model)
def __init__(self, hp): super(GPPatchMcResDis, self).__init__() assert hp['n_res_blks'] % 2 == 0, 'n_res_blk must be multiples of 2' self.n_layers = hp['n_res_blks'] // 2 nf = hp['nf'] cnn_f = [Conv2dBlock(3, nf, 7, 1, 3, pad_type='reflect', norm='none', activation='none')] for i in range(self.n_layers - 1): nf_out = np.min([nf * 2, 1024]) cnn_f += [ActFirstResBlock(nf, nf, None, 'lrelu', 'none')] cnn_f += [ActFirstResBlock(nf, nf_out, None, 'lrelu', 'none')] cnn_f += [nn.ReflectionPad2d(1)] cnn_f += [nn.AvgPool2d(kernel_size=3, stride=2)] nf = np.min([nf * 2, 1024]) nf_out = np.min([nf * 2, 1024]) cnn_f += [ActFirstResBlock(nf, nf, None, 'lrelu', 'none')] cnn_f += [ActFirstResBlock(nf, nf_out, None, 'lrelu', 'none')] cnn_c = [Conv2dBlock(nf_out, hp['num_classes'], 1, 1, norm='none', activation='lrelu', activation_first=True)] self.cnn_f = nn.Sequential(*cnn_f) self.cnn_c = nn.Sequential(*cnn_c)
def __init__(self): super(DisModel, self).__init__() self.n_layers = 6 self.final_size = 1024 nf = 16 cnn_f = [Conv2dBlock(1, nf, 7, 1, 3, pad_type='reflect', norm='none', activation='none')] for i in range(self.n_layers - 1): nf_out = np.min([nf * 2, 1024]) cnn_f += [ActFirstResBlock(nf, nf, None, 'lrelu', 'none')] cnn_f += [ActFirstResBlock(nf, nf_out, None, 'lrelu', 'none')] cnn_f += [nn.ReflectionPad2d(1)] cnn_f += [nn.AvgPool2d(kernel_size=3, stride=2)] nf = np.min([nf * 2, 1024]) nf_out = np.min([nf * 2, 1024]) cnn_f += [ActFirstResBlock(nf, nf, None, 'lrelu', 'none')] cnn_f += [ActFirstResBlock(nf, nf_out, None, 'lrelu', 'none')] cnn_c = [Conv2dBlock(nf_out, self.final_size, IMG_HEIGHT//(2**(self.n_layers-1)), IMG_WIDTH//(2**(self.n_layers-1))+1, norm='none', activation='lrelu', activation_first=True)] self.cnn_f = nn.Sequential(*cnn_f) self.cnn_c = nn.Sequential(*cnn_c) self.bce = nn.BCEWithLogitsLoss()
def __init__(self, num_writers): super(WriterClaModel, self).__init__() self.n_layers = 6 nf = 16 cnn_f = [Conv2dBlock(1, nf, 7, 1, 3, pad_type='reflect', norm='none', activation='none')] for i in range(self.n_layers - 1): nf_out = np.min([nf * 2, 1024]) cnn_f += [ActFirstResBlock(nf, nf, None, 'lrelu', 'none')] cnn_f += [ActFirstResBlock(nf, nf_out, None, 'lrelu', 'none')] cnn_f += [nn.ReflectionPad2d(1)] cnn_f += [nn.AvgPool2d(kernel_size=3, stride=2)] nf = np.min([nf * 2, 1024]) nf_out = np.min([nf * 2, 1024]) cnn_f += [ActFirstResBlock(nf, nf, None, 'lrelu', 'none')] cnn_f += [ActFirstResBlock(nf, nf_out, None, 'lrelu', 'none')] cnn_c = [Conv2dBlock(nf_out, num_writers, IMG_HEIGHT//(2**(self.n_layers-1)), IMG_WIDTH//(2**(self.n_layers-1))+1, norm='none', activation='lrelu', activation_first=True)] self.cnn_f = nn.Sequential(*cnn_f) self.cnn_c = nn.Sequential(*cnn_c) self.cross_entropy = nn.CrossEntropyLoss()
def __init__(self, batch_size, downs, ind_im, dim, latent_dim, norm, activ, pad_type): super(ClassModelEncoder, self).__init__() s_s_layers = [] dim_size = dim for i in range(downs[0]): if i == 0: s_s_layers.append(Conv2dBlock(ind_im, dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)) else: dim = dim*2 s_s_layers.append(Conv2dBlock(dim // 2, dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)) self.enc_s_s = nn.Sequential(*s_s_layers) dim = dim_size s_c_layers = [] for i in range(downs[1]): if i == 0: s_c_layers.append(Conv2dBlock(ind_im, dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)) else: dim = dim*2 s_c_layers.append(Conv2dBlock(dim // 2, dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)) dim = dim * 2 s_c_layers.append(Conv2dBlock(dim // 2, dim, 3, 1, 1, norm=norm, activation=activ, pad_type=pad_type)) s_c_layers.append(ResBlocks(2, dim, norm=norm, activation=activ, pad_type=pad_type)) self.enc_s_c = nn.Sequential(*s_c_layers) self.linear_s = nn.Linear(dim*2, latent_dim) self.linear_c = nn.Linear(dim, latent_dim) self.csb = torch.randn(batch_size, dim).cuda()
def __init__(self, downs, n_res, input_dim, dim, norm, activ, pad_type): super(ContentEncoder, self).__init__() s_c_layers = [] for i in range(downs): if i == 0: s_c_layers.append(Conv2dBlock(input_dim, dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)) else: dim = dim * 2 s_c_layers.append(Conv2dBlock(dim // 2, dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)) dim = dim * 2 s_c_layers.append(Conv2dBlock(dim // 2, dim, 3, 1, 1, norm=norm, activation=activ, pad_type=pad_type)) s_c_layers.append(ResBlocks(n_res, dim, norm=norm, activation=activ, pad_type=pad_type)) self.model = nn.Sequential(*s_c_layers) self.output_dim = dim
def __init__(self, hp): super(Generator, self).__init__() self.mlp = nn.Sequential( Conv2dBlock(in_dim=hp['in_dim_mlp'], out_dim=1024, ks=1, st=1, padding=0, norm='none', activation='lrelu', dropout=0.1), Conv2dBlock(in_dim=1024, out_dim=960, ks=1, st=1, padding=0, norm='none', activation='lrelu', dropout=0.1), Conv2dBlock(in_dim=960, out_dim=864, ks=1, st=1, padding=0, norm='none', activation='lrelu', dropout=0.3), Conv2dBlock(in_dim=864, out_dim=784, ks=1, st=1, padding=0, norm='none', activation='lrelu', dropout=0.5), Conv2dBlock(in_dim=784, out_dim=720, ks=1, st=1, padding=0, norm='none', activation='lrelu', dropout=0.5), nn.Conv2d(720, hp['out_dim_mlp'], 1, stride=1, padding=0), ) self.sigmoid = nn.Sigmoid()
def __init__(self, ups, n_res, dim, out_dim, res_norm, activ, pad_type): super(Decoder, self).__init__() self.model = [] self.model += [ResBlocks(n_res, dim, res_norm, activ, pad_type=pad_type)] for i in range(ups): self.model.append(AdainUpResBlock(dim, activation=activ, pad_type=pad_type)) dim = dim // 2 self.model.append(Conv2dBlock(dim, out_dim, 3, 1, 1, norm='none', activation='tanh', pad_type=pad_type)) self.model = nn.Sequential(*self.model)
def __init__(self, hp): super(GPPatchMcResDis, self).__init__() #InceptionBlock = Conv2dBlock assert hp['n_res_blks'] % 2 == 0, 'n_res_blk must be multiples of 2' self.n_layers = hp['n_res_blks'] // 2 nf = hp['nf'] input_channels = hp['input_nc'] cnn_f = [ Conv2dBlock(input_channels, nf, KERNEL_SIZE_7, 1, 3, pad_type='reflect', norm='none', activation='none') ] for i in range(self.n_layers - 1): nf_out = np.min([nf * 2, 1024]) cnn_f += [ActFirstResBlock(nf, nf, None, 'lrelu', 'none')] cnn_f += [ActFirstResBlock(nf, nf_out, None, 'lrelu', 'none')] cnn_f += [nn.ReflectionPad2d(1)] cnn_f += [nn.AvgPool2d(kernel_size=3, stride=2)] nf = np.min([nf * 2, 1024]) nf_out = np.min([nf * 2, 1024]) cnn_f += [ActFirstResBlock(nf, nf, None, 'lrelu', 'none')] cnn_f += [ActFirstResBlock(nf, nf_out, None, 'lrelu', 'none')] cnn_c = [ Conv2dBlock(nf_out, hp['num_classes'], 1, 1, norm='none', activation='lrelu', activation_first=True) ] self.cnn_f = nn.Sequential(*cnn_f) self.cnn_c = nn.Sequential(*cnn_c) #self.register_backward_hook(debug.printgradnorm) self.debug = Debugger()
def __init__(self, args): super(Refiner, self).__init__() c_up = args.c_up // 2 # 32 down = args.down # 2 self.model = [ Conv2dBlock(6, c_up, 7, 1, 3, norm='in', pad_type='reflect'), # RGB + target ] for i in range(down): self.model.append( Conv2dBlock(c_up, 2 * c_up, 4, 2, 1, norm='in', pad_type='reflect')) c_up *= 2 self.model.append( ResBlocks(5, c_up, norm='in', activation='relu', pad_type='reflect')) for i in range(down): self.model.append( UpConv2dBlock(c_up, norm='in', activation='relu', pad_type='reflect')) c_up //= 2 self.model.append( Conv2dBlock(c_up, 3, 7, 1, padding=3, norm='none', activation='none', pad_type='reflect')) self.model = nn.Sequential(*self.model)
def __init__(self, downs, n_res, input_dim, dim, norm, activ, pad_type): super(ContentEncoder, self).__init__() #InceptionBlock = Conv2dBlock self.model = [] self.model += [ InceptionBlock(input_dim, dim, KERNEL_SIZE_7, 1, 3, norm=norm, activation=activ, pad_type=pad_type) ] """ for i in range(downs): self.model += [InceptionBlock(dim, 2 * dim, KERNEL_SIZE_4, 1,#2, 1, norm=norm, activation=activ, pad_type=pad_type)] if (i == downs-1): self.model += [ nn.MaxPool2d(KERNEL_SIZE_4, 2, padding=1) ] else: self.model += [ nn.MaxPool2d(KERNEL_SIZE_4, 2, padding=0) ] dim *= 2 """ for i in range(downs): self.model += [ Conv2dBlock(dim, 2 * dim, KERNEL_SIZE_4, 2, 1, norm=norm, activation=activ, pad_type=pad_type) ] dim *= 2 self.model += [ ResBlocks(n_res, dim, norm=norm, activation=activ, pad_type=pad_type, inception=True) ] self.model = nn.Sequential(*self.model) self.output_dim = dim
def __init__(self, downs, ind_im, dim, latent_dim, norm, activ, pad_type): super(ClassModelEncoder, self).__init__() self.model = [] self.model += [ Conv2dBlock(ind_im, dim, 7, 1, 3, norm=norm, activation=activ, pad_type=pad_type) ] for i in range(2): self.model += [ Conv2dBlock(dim, 2 * dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type) ] dim *= 2 for i in range(downs - 2): self.model += [ Conv2dBlock(dim, dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type) ] self.model += [nn.AdaptiveAvgPool2d(1)] self.model += [nn.Conv2d(dim, latent_dim, 1, 1, 0)] self.model = nn.Sequential(*self.model) self.output_dim = dim
def __init__(self, hp): super(GPPatchMcResDis, self).__init__() assert hp['n_res_blks'] % 2 == 0, 'n_res_blk must be multiples of 2' self.n_layers = hp['n_res_blks'] // 2 # 5 nf = hp['nf'] # 64 cnn_f = [ Conv2dBlock(3, nf, 7, 1, 3, pad_type='reflect', norm='none', activation='none') ] # 第一个卷积层提取特征,输出batchsize*128*128*64维度向量 for i in range( self.n_layers - 1): # 128->256->512->1024,输出batchsize*8*8*1024维度向量,在原论文条件下 nf_out = np.min([nf * 2, 1024]) cnn_f += [ActFirstResBlock(nf, nf, None, 'lrelu', 'none')] cnn_f += [ActFirstResBlock(nf, nf_out, None, 'lrelu', 'none')] cnn_f += [nn.ReflectionPad2d(1)] cnn_f += [nn.AvgPool2d(kernel_size=3, stride=2)] nf = np.min([nf * 2, 1024]) nf_out = np.min([nf * 2, 1024]) # 1024 cnn_f += [ActFirstResBlock(nf, nf, None, 'lrelu', 'none')] cnn_f += [ActFirstResBlock(nf, nf_out, None, 'lrelu', 'none')] # 再经过两个ResBlock,得到1024深度 cnn_c = [ Conv2dBlock(nf_out, hp['num_classes'], 1, 1, norm='none', activation='lrelu', activation_first=True) ] # 最后再经过一个卷积层得到各个种类的概率:输出batchsize*patch*patch*num_class的四维向量 self.cnn_f = nn.Sequential(*cnn_f) self.cnn_c = nn.Sequential(*cnn_c)
def __init__(self, hp): super(Discriminator, self).__init__() self.fc = Conv2dBlock(in_dim=hp['in_dim_fc'], out_dim=hp['out_dim_fc'], ks=1, st=1, padding=0, norm=hp['norm_fc'], activation=hp['activ_fc'], dropout=hp['drop_fc']) self.pred = nn.Conv2d(hp['out_dim_fc'], 1, 1, stride=1, padding=0) self.sigmoid = nn.Sigmoid() self.cls = nn.Conv2d(hp['out_dim_fc'], hp['out_dim_cls'], 1, stride=1, padding=0)