def __init__(self, in_size, out_size, inner_nc, dropout=0.0, innermost=False, outermost=False, submodule=None): super(UnetBlock, self).__init__() self.outermost = outermost downconv = nn.Conv(in_size, inner_nc, 4, stride=2, padding=1, bias=False) downnorm = nn.BatchNorm2d(inner_nc) downrelu = nn.LeakyReLU(0.2) upnorm = nn.BatchNorm2d(out_size) uprelu = nn.ReLU() if outermost: upconv = nn.ConvTranspose(2*inner_nc, out_size, 4, stride=2, padding=1) down = [downconv] up = [uprelu, upconv, nn.Tanh()] model = down + [submodule] + up elif innermost: upconv = nn.ConvTranspose(inner_nc, out_size, 4, stride=2, padding=1, bias=False) down = [downrelu, downconv] up = [uprelu, upconv, upnorm] model = down + up else: upconv = nn.ConvTranspose(2*inner_nc, out_size, 4, stride=2, padding=1, bias=False) down = [downrelu, downconv, downnorm] up = [uprelu, upconv, upnorm] if dropout: model = down + [submodule] + up + [nn.Dropout(dropout)] else: model = down + [submodule] + up self.model = nn.Sequential(*model) for m in self.modules(): weights_init_normal(m)
def __init__(self, latent_dim, n_c, x_shape, verbose=False): super(Generator_CNN, self).__init__() self.name = 'generator' self.latent_dim = latent_dim self.n_c = n_c self.x_shape = x_shape self.ishape = (128, 7, 7) self.iels = int(np.prod(self.ishape)) self.verbose = verbose self.model0 = nn.Sequential( nn.Linear((self.latent_dim + self.n_c), 1024)) self.model1 = nn.Sequential(BatchNorm1d(1024), nn.Leaky_relu(0.2)) self.model2 = nn.Sequential(nn.Linear(1024, self.iels), BatchNorm1d(self.iels), nn.Leaky_relu(0.2)) self.model3 = nn.Sequential( Reshape(self.ishape), nn.ConvTranspose(128, 64, 4, stride=2, padding=1, bias=True), nn.BatchNorm(64), nn.Leaky_relu(0.2)) self.model4 = nn.Sequential( nn.ConvTranspose(64, 1, 4, stride=2, padding=1, bias=True)) self.sigmoid = nn.Sigmoid() initialize_weights(self) if self.verbose: print('Setting up {}...\n'.format(self.name)) print(self.model)
def __init__(self, cin, cout, zdim=128, nf=64): super(ConfNet, self).__init__() network = [ nn.Conv(cin, nf, 4, stride=2, padding=1, bias=False), nn.GroupNorm(16, nf), nn.LeakyReLU(scale=0.2), nn.Conv(nf, (nf * 2), 4, stride=2, padding=1, bias=False), nn.GroupNorm((16 * 2), (nf * 2)), nn.LeakyReLU(scale=0.2), nn.Conv((nf * 2), (nf * 4), 4, stride=2, padding=1, bias=False), nn.GroupNorm((16 * 4), (nf * 4)), nn.LeakyReLU(scale=0.2), nn.Conv((nf * 4), (nf * 8), 4, stride=2, padding=1, bias=False), nn.LeakyReLU(scale=0.2), nn.Conv((nf * 8), zdim, 4, stride=1, padding=0, bias=False), nn.ReLU() ] network += [ nn.ConvTranspose(zdim, (nf * 8), 4, padding=0, bias=False), nn.ReLU(), nn.ConvTranspose((nf * 8), (nf * 4), 4, stride=2, padding=1, bias=False), nn.GroupNorm((16 * 4), (nf * 4)), nn.ReLU(), nn.ConvTranspose((nf * 4), (nf * 2), 4, stride=2, padding=1, bias=False), nn.GroupNorm((16 * 2), (nf * 2)), nn.ReLU() ] self.network = nn.Sequential(*network) out_net1 = [ nn.ConvTranspose((nf * 2), nf, 4, stride=2, padding=1, bias=False), nn.GroupNorm(16, nf), nn.ReLU(), nn.ConvTranspose(nf, nf, 4, stride=2, padding=1, bias=False), nn.GroupNorm(16, nf), nn.ReLU(), nn.Conv(nf, 2, 5, stride=1, padding=2, bias=False), nn.Softplus() ] self.out_net1 = nn.Sequential(*out_net1) out_net2 = [ nn.Conv((nf * 2), 2, 3, stride=1, padding=1, bias=False), nn.Softplus() ] self.out_net2 = nn.Sequential(*out_net2)
def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm, padding_type='reflect'): assert (n_blocks >= 0) super(GlobalGenerator, self).__init__() activation = nn.ReLU() model = [nn.ReflectionPad2d(3), nn.Conv(input_nc, ngf, 7, padding=0), norm_layer(ngf), activation] ### downsample for i in range(n_downsampling): mult = (2 ** i) model += [nn.Conv((ngf * mult), ((ngf * mult) * 2), 3, stride=2, padding=1), norm_layer(((ngf * mult) * 2)), activation] ### resnet blocks mult = (2 ** n_downsampling) for i in range(n_blocks): model += [ResnetBlock((ngf * mult), padding_type=padding_type, activation=activation, norm_layer=norm_layer)] ### upsample for i in range(n_downsampling): mult = (2 ** (n_downsampling - i)) model += [nn.ConvTranspose((ngf * mult), int(((ngf * mult) / 2)), 3, stride=2, padding=1, output_padding=1), norm_layer(int(((ngf * mult) / 2))), activation] model += [nn.ReflectionPad2d(3), nn.Conv(ngf, output_nc, 7, padding=0), nn.Tanh()] self.model = nn.Sequential(*model) for m in self.modules(): weights_init_normal(m)
def __init__(self, in_channels, out_channels, bilinear=True): super().__init__() if bilinear: self.up = nn.Upsample(scale_factor=2, mode='bilinear') self.conv = DoubleConv(in_channels * 2, out_channels, out_channels) else: self.up = nn.ConvTranspose(in_channels, in_channels, 2, stride=2) self.conv = DoubleConv(in_channels * 2, out_channels, out_channels)
def __init__(self, channel_in, channel_out, kernel_size=4, padding=1, stride=2, output_padding=0, norelu=False): super(DecoderBlock, self).__init__() layers_list = [] layers_list.append(nn.ConvTranspose(channel_in, channel_out, kernel_size, padding=padding, stride=stride, output_padding=output_padding)) layers_list.append(nn.BatchNorm(channel_out, momentum=0.9)) if (norelu == False): layers_list.append(nn.LeakyReLU(1)) self.conv = nn.Sequential(*layers_list)
def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.InstanceNorm2d, padding_type='reflect'): assert (n_blocks >= 0) super(GlobalGenerator, self).__init__() activation = nn.ReLU() model = [ nn.ReflectionPad2d(3), nn.Conv(input_nc, ngf, 7, padding=0), norm_layer(ngf), activation ] ### downsample for i in range(n_downsampling): mult = 2**i model += [ nn.Conv(ngf * mult, ngf * mult * 2, 3, stride=2, padding=1), norm_layer(ngf * mult * 2), activation ] ### resnet blocks mult = 2**n_downsampling for i in range(n_blocks): model += [ ResnetBlock(ngf * mult, norm_type='in', padding_type=padding_type) ] ### upsample for i in range(n_downsampling): mult = 2**(n_downsampling - i) model += [ nn.ConvTranspose(ngf * mult, int(ngf * mult / 2), 3, stride=2, padding=1, output_padding=1), norm_layer(int(ngf * mult / 2)), activation ] model += [ nn.ReflectionPad2d(3), nn.Conv(ngf, output_nc, kernel_size=7, padding=0), nn.Tanh() ] self.model = nn.Sequential(*model)
def __init__(self, input_nc, output_nc, h=96, w=96): super(AutoEncoderWithFC, self).__init__() out_features = 64 model = [nn.Conv(input_nc, 64, kernel_size=4, stride=2, padding=1, bias=False)] in_features = out_features for _ in range(3): out_features *= 2 model += [nn.LeakyReLU(0.2), nn.Conv(in_features, out_features, 4, stride=2, padding=1, bias=False), nn.BatchNorm2d(out_features)] in_features = out_features self.encoder = nn.Sequential(*model) self.rh = int(h/16) self.rw = int(w/16) self.feat_dim = 512 * self.rh * self.rw self.fc1 = nn.Linear(self.feat_dim, 1024) self.relu = nn.ReLU() self.fc2 = nn.Linear(1024, self.feat_dim) model2 = [] for _ in range(3): out_features //= 2 model2 += [nn.ReLU(), nn.ConvTranspose(in_features, out_features, 4, stride=2, padding=1, bias=False), nn.BatchNorm2d(out_features)] in_features = out_features model2 += [nn.ReLU(), nn.ConvTranspose(out_features, output_nc, 4, stride=2, padding=1, bias=False), nn.Tanh()] self.decoder = nn.Sequential(*model2) for m in self.modules(): weights_init_normal(m)
def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d, padding_type='reflect'): assert (n_blocks >= 0) super(Part_Generator, self).__init__() activation = nn.ReLU() model = [] ### resnet blocks mult = 2**n_downsampling for i in range(n_blocks): model += [ ResnetBlock(ngf * mult, norm_type='adain', padding_type=padding_type) ] for i in range(n_downsampling): mult = 2**(n_downsampling - i) model += [ nn.ConvTranspose(ngf * mult, int(ngf * mult / 2), 3, stride=2, padding=1, output_padding=1) ] model += [AdaptiveInstanceNorm2d(int(ngf * mult / 2))] model += [activation] model += [ nn.ReflectionPad2d(3), nn.Conv(ngf, output_nc, 7, padding=0), nn.Tanh() ] self.model = nn.Sequential(*model) # style encoder self.enc_style = StyleEncoder(5, 3, 16, self.get_num_adain_params(self.model), norm='none', activ='relu', pad_type='reflect')
def __init__(self, in_size, out_size, dropout=0.0): super(UNetUp, self).__init__() layers = [ nn.ConvTranspose(in_size, out_size, 4, stride=2, padding=1, bias=False), nn.BatchNorm2d(out_size), nn.ReLU() ] if dropout: layers.append(nn.Dropout(dropout)) self.model = nn.Sequential(*layers)
def make_layer(layer_cfg): nonlocal in_channels # Possible patterns: # ( 256, 3, {}) -> conv # ( 256,-2, {}) -> deconv # (None,-2, {}) -> bilinear interpolate # ('cat',[],{}) -> concat the subnetworks in the list # # You know it would have probably been simpler just to adopt a 'c' 'd' 'u' naming scheme. # Whatever, it's too late now. if isinstance(layer_cfg[0], str): layer_name = layer_cfg[0] if layer_name == 'cat': nets = [make_net(in_channels, x) for x in layer_cfg[1]] layer = Concat([net[0] for net in nets], layer_cfg[2]) num_channels = sum([net[1] for net in nets]) else: num_channels = layer_cfg[0] kernel_size = layer_cfg[1] if kernel_size > 0: layer = nn.Conv(in_channels, num_channels, kernel_size, **layer_cfg[2]) else: if num_channels is None: layer = InterpolateModule(scale_factor=-kernel_size, mode='bilinear', align_corners=False, **layer_cfg[2]) else: layer = nn.ConvTranspose(in_channels, num_channels, -kernel_size, **layer_cfg[2]) in_channels = num_channels if num_channels is not None else in_channels # Don't return a ReLU layer if we're doing an upsample. This probably doesn't affect anything # output-wise, but there's no need to go through a ReLU here. # Commented out for backwards compatibility with previous models # if num_channels is None: # return [layer] # else: return [layer, nn.ReLU()]
def __init__(self, in_channels=3, out_channels=1, num_res_blocks=9): super(GeneratorResNet, self).__init__() out_features = 64 model = [nn.ReflectionPad2d(3), nn.Conv(in_channels, out_features, 7, bias=False), nn.BatchNorm2d(out_features), nn.ReLU()] in_features = out_features for _ in range(2): out_features *= 2 model += [nn.Conv(in_features, out_features, 3, stride=2, padding=1, bias=False), nn.BatchNorm2d(out_features), nn.ReLU()] in_features = out_features for _ in range(num_res_blocks): model += [ResidualBlock(out_features)] for _ in range(2): out_features //= 2 model += [nn.ConvTranspose(in_features, out_features, 3, stride=2, padding=1, output_padding=1, bias=False), nn.BatchNorm2d(out_features), nn.ReLU()] in_features = out_features model += [nn.ReflectionPad2d(3), nn.Conv(out_features, out_channels, 7), nn.Tanh()] self.model = nn.Sequential(*model) for m in self.modules(): weights_init_normal(m)
def __init__(self, img_shape=(3, 128, 128), res_blocks=9, c_dim=5): super(GeneratorResNet, self).__init__() (channels, img_size, _) = img_shape model = [ nn.Conv((channels + c_dim), 64, 7, stride=1, padding=3, bias=False), nn.InstanceNorm2d(64, affine=None), nn.ReLU() ] curr_dim = 64 for _ in range(2): model += [ nn.Conv(curr_dim, (curr_dim * 2), 4, stride=2, padding=1, bias=False), nn.InstanceNorm2d((curr_dim * 2), affine=None), nn.ReLU() ] curr_dim *= 2 for _ in range(res_blocks): model += [ResidualBlock(curr_dim)] for _ in range(2): model += [ nn.ConvTranspose(curr_dim, (curr_dim // 2), 4, stride=2, padding=1, bias=False), nn.InstanceNorm2d((curr_dim // 2), affine=None), nn.ReLU() ] curr_dim = (curr_dim // 2) model += [ nn.Conv(curr_dim, channels, 7, stride=1, padding=3), nn.Tanh() ] self.model = nn.Sequential(*model) for m in self.model: weights_init_normal(m)
def __init__(self, dim=3): super(generator, self).__init__() self.fc = nn.Linear(1024, 7 * 7 * 256) self.fc_bn = nn.BatchNorm(256) self.deconv1 = nn.ConvTranspose(256, 256, 3, 2, 1, 1) self.deconv1_bn = nn.BatchNorm(256) self.deconv2 = nn.ConvTranspose(256, 256, 3, 1, 1) self.deconv2_bn = nn.BatchNorm(256) self.deconv3 = nn.ConvTranspose(256, 256, 3, 2, 1, 1) self.deconv3_bn = nn.BatchNorm(256) self.deconv4 = nn.ConvTranspose(256, 256, 3, 1, 1) self.deconv4_bn = nn.BatchNorm(256) self.deconv5 = nn.ConvTranspose(256, 128, 3, 2, 1, 1) self.deconv5_bn = nn.BatchNorm(128) self.deconv6 = nn.ConvTranspose(128, 64, 3, 2, 1, 1) self.deconv6_bn = nn.BatchNorm(64) self.deconv7 = nn.ConvTranspose(64, dim, 3, 1, 1) self.relu = nn.ReLU() self.tanh = nn.Tanh()
def upsample(in_feat, out_feat, normalize=True): layers = [nn.ConvTranspose(in_feat, out_feat, 4, stride=2, padding=1)] if normalize: layers.append(nn.BatchNorm(out_feat, eps=0.8)) layers.append(nn.ReLU()) return layers
def __init__(self, cin, cout, zdim=128, nf=64, activation=nn.Tanh): super(EDDeconv, self).__init__() network = [ nn.Conv(cin, nf, 4, stride=2, padding=1, bias=False), nn.GroupNorm(16, nf), nn.LeakyReLU(scale=0.2), nn.Conv(nf, (nf * 2), 4, stride=2, padding=1, bias=False), nn.GroupNorm((16 * 2), (nf * 2)), nn.LeakyReLU(scale=0.2), nn.Conv((nf * 2), (nf * 4), 4, stride=2, padding=1, bias=False), nn.GroupNorm((16 * 4), (nf * 4)), nn.LeakyReLU(scale=0.2), nn.Conv((nf * 4), (nf * 8), 4, stride=2, padding=1, bias=False), nn.LeakyReLU(scale=0.2), nn.Conv((nf * 8), zdim, 4, stride=1, padding=0, bias=False), nn.ReLU() ] network += [ nn.ConvTranspose(zdim, (nf * 8), 4, stride=1, padding=0, bias=False), nn.ReLU(), nn.Conv((nf * 8), (nf * 8), 3, stride=1, padding=1, bias=False), nn.ReLU(), nn.ConvTranspose((nf * 8), (nf * 4), 4, stride=2, padding=1, bias=False), nn.GroupNorm((16 * 4), (nf * 4)), nn.ReLU(), nn.Conv((nf * 4), (nf * 4), 3, stride=1, padding=1, bias=False), nn.GroupNorm((16 * 4), (nf * 4)), nn.ReLU(), nn.ConvTranspose((nf * 4), (nf * 2), 4, stride=2, padding=1, bias=False), nn.GroupNorm((16 * 2), (nf * 2)), nn.ReLU(), nn.Conv((nf * 2), (nf * 2), 3, stride=1, padding=1, bias=False), nn.GroupNorm((16 * 2), (nf * 2)), nn.ReLU(), nn.ConvTranspose((nf * 2), nf, 4, stride=2, padding=1, bias=False), nn.GroupNorm(16, nf), nn.ReLU(), nn.Conv(nf, nf, 3, stride=1, padding=1, bias=False), nn.GroupNorm(16, nf), nn.ReLU(), nn.Upsample(scale_factor=2, mode='nearest'), nn.Conv(nf, nf, 3, stride=1, padding=1, bias=False), nn.GroupNorm(16, nf), nn.ReLU(), nn.Conv(nf, nf, 5, stride=1, padding=2, bias=False), nn.GroupNorm(16, nf), nn.ReLU(), nn.Conv(nf, cout, 5, stride=1, padding=2, bias=False) ] if (activation is not None): network += [activation()] self.network = nn.Sequential(*network)