def __init__(self, downs, n_res, input_dim, dim, norm, activ, pad_type): super(ContentEncoder, self).__init__() self.model = [] self.model += [ Conv2dBlock(input_dim, dim, 7, 1, 3, norm=norm, activation=activ, pad_type=pad_type) ] for i in range(downs): self.model += [ Conv2dBlock(dim, 2 * dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type) ] dim *= 2 self.model += [ ResBlocks(n_res, dim, norm=norm, activation=activ, pad_type=pad_type) ] self.model = nn.Sequential(*self.model) self.output_dim = dim
def __init__(self, ups, n_res, dim, out_dim, res_norm, activ, pad_type): super(Decoder, self).__init__() self.model = [] self.model += [ ResBlocks(n_res, dim, res_norm, activ, pad_type=pad_type) ] for i in range(ups): self.model += [ nn.Upsample(scale_factor=2), Conv2dBlock(dim, dim // 2, 5, 1, 2, norm='in', activation=activ, pad_type=pad_type) ] dim //= 2 self.model += [ Conv2dBlock(dim, out_dim, 7, 1, 3, norm='none', activation='tanh', pad_type=pad_type) ] self.model = nn.Sequential(*self.model)
def __init__(self, batch_size, downs, ind_im, dim, latent_dim, norm, activ, pad_type): super(ClassModelEncoder, self).__init__() s_s_layers = [] dim_size = dim for i in range(downs[0]): if i == 0: s_s_layers.append(Conv2dBlock(ind_im, dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)) else: dim = dim*2 s_s_layers.append(Conv2dBlock(dim // 2, dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)) self.enc_s_s = nn.Sequential(*s_s_layers) dim = dim_size s_c_layers = [] for i in range(downs[1]): if i == 0: s_c_layers.append(Conv2dBlock(ind_im, dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)) else: dim = dim*2 s_c_layers.append(Conv2dBlock(dim // 2, dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)) dim = dim * 2 s_c_layers.append(Conv2dBlock(dim // 2, dim, 3, 1, 1, norm=norm, activation=activ, pad_type=pad_type)) s_c_layers.append(ResBlocks(2, dim, norm=norm, activation=activ, pad_type=pad_type)) self.enc_s_c = nn.Sequential(*s_c_layers) self.linear_s = nn.Linear(dim*2, latent_dim) self.linear_c = nn.Linear(dim, latent_dim) self.csb = torch.randn(batch_size, dim).cuda()
def __init__(self, ups, n_res, dim, out_dim, res_norm, activ, pad_type): super(Decoder, self).__init__() self.model = [] self.model += [ResBlocks(n_res, dim, res_norm, activ, pad_type=pad_type)] for i in range(ups): self.model.append(AdainUpResBlock(dim, activation=activ, pad_type=pad_type)) dim = dim // 2 self.model.append(Conv2dBlock(dim, out_dim, 3, 1, 1, norm='none', activation='tanh', pad_type=pad_type)) self.model = nn.Sequential(*self.model)
def __init__(self, downs, n_res, input_dim, dim, norm, activ, pad_type): super(ContentEncoder, self).__init__() #InceptionBlock = Conv2dBlock self.model = [] self.model += [ InceptionBlock(input_dim, dim, KERNEL_SIZE_7, 1, 3, norm=norm, activation=activ, pad_type=pad_type) ] """ for i in range(downs): self.model += [InceptionBlock(dim, 2 * dim, KERNEL_SIZE_4, 1,#2, 1, norm=norm, activation=activ, pad_type=pad_type)] if (i == downs-1): self.model += [ nn.MaxPool2d(KERNEL_SIZE_4, 2, padding=1) ] else: self.model += [ nn.MaxPool2d(KERNEL_SIZE_4, 2, padding=0) ] dim *= 2 """ for i in range(downs): self.model += [ Conv2dBlock(dim, 2 * dim, KERNEL_SIZE_4, 2, 1, norm=norm, activation=activ, pad_type=pad_type) ] dim *= 2 self.model += [ ResBlocks(n_res, dim, norm=norm, activation=activ, pad_type=pad_type, inception=True) ] self.model = nn.Sequential(*self.model) self.output_dim = dim
def __init__(self, downs, n_res, input_dim, dim, norm, activ, pad_type): super(ContentEncoder, self).__init__() s_c_layers = [] for i in range(downs): if i == 0: s_c_layers.append(Conv2dBlock(input_dim, dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)) else: dim = dim * 2 s_c_layers.append(Conv2dBlock(dim // 2, dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)) dim = dim * 2 s_c_layers.append(Conv2dBlock(dim // 2, dim, 3, 1, 1, norm=norm, activation=activ, pad_type=pad_type)) s_c_layers.append(ResBlocks(n_res, dim, norm=norm, activation=activ, pad_type=pad_type)) self.model = nn.Sequential(*s_c_layers) self.output_dim = dim
def __init__(self, args): super(Refiner, self).__init__() c_up = args.c_up // 2 # 32 down = args.down # 2 self.model = [ Conv2dBlock(6, c_up, 7, 1, 3, norm='in', pad_type='reflect'), # RGB + target ] for i in range(down): self.model.append( Conv2dBlock(c_up, 2 * c_up, 4, 2, 1, norm='in', pad_type='reflect')) c_up *= 2 self.model.append( ResBlocks(5, c_up, norm='in', activation='relu', pad_type='reflect')) for i in range(down): self.model.append( UpConv2dBlock(c_up, norm='in', activation='relu', pad_type='reflect')) c_up //= 2 self.model.append( Conv2dBlock(c_up, 3, 7, 1, padding=3, norm='none', activation='none', pad_type='reflect')) self.model = nn.Sequential(*self.model)