def __init__(self, dim_fx, num_id, num_c_expr): super(Encoder, self).__init__() self.input_height = 100 self.input_width = 100 self.input_dim = 3 self.dim_fx = dim_fx self.num_id = num_id self.num_c_expr = num_c_expr self.conv = nn.Sequential( nn.Conv2d(self.input_dim, 64, 11, 4, 1, bias=True), nn.BatchNorm2d(64), nn.ReLU(), nn.Conv2d(64, 128, 5, 2, 1, bias=True), nn.BatchNorm2d(128), nn.ReLU(), nn.Conv2d(128, 256, 5, 2, 1, bias=True), nn.BatchNorm2d(256), nn.ReLU(), nn.Conv2d(256, 512, 5, 2, 1, bias=True), nn.BatchNorm2d(512), nn.ReLU(), nn.Conv2d(512, dim_fx, 8, 1, 1, bias=True), nn.Sigmoid(), ) utils.initialize_weights(self)
def __init__(self, D_dim=200, maxout_pieces=5): super(Discriminator, self).__init__() self.input_dim = 4 self.hid_dim = D_dim self.maxout_pieces = maxout_pieces self.output_dim = 1 self.fc = nn.Sequential( nn.Linear(self.input_dim, self.hid_dim * self.maxout_pieces), nn.BatchNorm1d(self.hid_dim * self.maxout_pieces), ) self.fcmax1 = nn.Sequential( nn.Linear(self.hid_dim, self.hid_dim * self.maxout_pieces), nn.BatchNorm1d(self.hid_dim * self.maxout_pieces), ) self.fcmax2 = nn.Sequential( nn.Linear(self.hid_dim, self.hid_dim * self.maxout_pieces), nn.BatchNorm1d(self.hid_dim * self.maxout_pieces), ) self.fo = nn.Sequential( nn.Linear(self.hid_dim, self.hid_dim), nn.BatchNorm1d(self.hid_dim), nn.LeakyReLU(0.2), nn.Linear(self.hid_dim, self.output_dim), nn.Sigmoid(), ) utils.initialize_weights(self)
def __init__(self, E_dim=400): super(Encoder, self).__init__() self.input_dim = 2 self.hid_dim = E_dim self.output_dim = 2 self.fc = nn.Sequential( nn.Linear(self.input_dim * 2, self.hid_dim, bias=True), nn.BatchNorm1d(self.hid_dim), nn.LeakyReLU(0.2), nn.Linear(self.hid_dim, self.hid_dim, bias=True), nn.BatchNorm1d(self.hid_dim), nn.LeakyReLU(0.2), nn.Linear(self.hid_dim, self.hid_dim, bias=True), nn.BatchNorm1d(self.hid_dim), nn.LeakyReLU(0.2), nn.Linear(self.hid_dim, self.hid_dim, bias=True), nn.BatchNorm1d(self.hid_dim), nn.LeakyReLU(0.2), nn.Linear(self.hid_dim, self.hid_dim, bias=True), nn.BatchNorm1d(self.hid_dim), nn.LeakyReLU(0.2), nn.Linear(self.hid_dim, self.hid_dim, bias=True), nn.BatchNorm1d(self.hid_dim), nn.LeakyReLU(0.2), nn.Linear(self.hid_dim, self.hid_dim, bias=True), nn.BatchNorm1d(self.hid_dim), nn.LeakyReLU(0.2), nn.Linear(self.hid_dim, self.output_dim, bias=True), ) utils.initialize_weights(self)
def __init__(self): super(generator, self).__init__() self.input_height = 64 self.input_width = 64 self.input_dim = 60498 # 62 self.proj_dim = 62 self.output_dim = 3 #self.proj = nn.Sequential( # nn.Linear(self.input_dim, self.proj_dim) #) self.fc = nn.Sequential( nn.Linear(self.input_dim, 1024), nn.BatchNorm1d(1024), nn.ReLU(), nn.Linear(1024, 128 * (self.input_height // 4) * (self.input_width // 4)), nn.BatchNorm1d(128 * (self.input_height // 4) * (self.input_width // 4)), nn.ReLU(), ) self.deconv = nn.Sequential( nn.ConvTranspose2d(128, 64, 4, 2, 1), nn.BatchNorm2d(64), nn.ReLU(), nn.ConvTranspose2d(64, self.output_dim, 4, 2, 1), nn.Tanh(), ) utils.initialize_weights(self)
def __init__(self, num_classes, pretrained=True, use_aux=True): super(PSPNet, self).__init__() self.use_aux = use_aux resnet = models.resnet101() if pretrained: resnet.load_state_dict(torch.load(res101_path)) self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool) self.layer1, self.layer2, self.layer3, self.layer4 = resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4 for n, m in self.layer3.named_modules(): if 'conv2' in n: m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1) elif 'downsample.0' in n: m.stride = (1, 1) for n, m in self.layer4.named_modules(): if 'conv2' in n: m.dilation, m.padding, m.stride = (4, 4), (4, 4), (1, 1) elif 'downsample.0' in n: m.stride = (1, 1) self.ppm = _PyramidPoolingModule(2048, 512, (1, 2, 3, 6)) self.final = nn.Sequential( nn.Conv2d(4096, 512, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(512, momentum=.95), nn.ReLU(inplace=True), nn.Dropout(0.1), nn.Conv2d(512, num_classes, kernel_size=1)) if use_aux: self.aux_logits = nn.Conv2d(1024, num_classes, kernel_size=1) initialize_weights(self.aux_logits) initialize_weights(self.ppm, self.final)
def __init__(self, dataset='mnist'): super(discriminator, self).__init__() if dataset == 'mnist' or dataset == 'fashion-mnist': self.input_height = 28 self.input_width = 28 self.input_dim = 1 self.output_dim = 1 elif dataset == 'celebA': self.input_height = 64 self.input_width = 64 self.input_dim = 3 self.output_dim = 3 self.conv = nn.Sequential( nn.Conv2d(self.input_dim, 64, 4, 2, 1), nn.ReLU(), ) self.fc = nn.Sequential( nn.Linear(64 * (self.input_height // 2) * (self.input_width // 2), 32), nn.BatchNorm1d(32), nn.ReLU(), nn.Linear(32, 64 * (self.input_height // 2) * (self.input_width // 2)), nn.BatchNorm1d(64 * (self.input_height // 2) * (self.input_width // 2)), nn.ReLU(), ) self.deconv = nn.Sequential( nn.ConvTranspose2d(64, self.output_dim, 4, 2, 1), #nn.Sigmoid(), ) utils.initialize_weights(self)
def __init__(self, dataset='mnist'): super(generator, self).__init__() if dataset == 'mnist' or dataset == 'fashion-mnist': self.input_height = 28 self.input_width = 28 self.input_dim = 62 self.output_dim = 1 elif dataset == 'celebA': self.input_height = 64 self.input_width = 64 self.input_dim = 62 self.output_dim = 3 #全连接层 将多个输入noise_array 转换成一个缩小的图像 self.fc = nn.Sequential( nn.Linear(self.input_dim, 1024), nn.BatchNorm1d(1024), nn.ReLU(), nn.Linear(1024, 128 * (self.input_height // 4) * (self.input_width // 4)), nn.BatchNorm1d(128 * (self.input_height // 4) * (self.input_width // 4)), nn.ReLU(), ) #use deconv layer to transfer a represent map to realistic map self.deconv = nn.Sequential( nn.ConvTranspose2d(128, 64, 4, 2, 1), nn.BatchNorm2d(64), nn.ReLU(), nn.ConvTranspose2d(64, self.output_dim, 4, 2, 1), nn.Sigmoid(), ) #init parameter in the layer of the G utils.initialize_weights(self)
def __init__(self, dataset = 'mnist'): super(discriminator, self).__init__() if dataset == 'mnist' or dataset == 'fashion-mnist': self.input_height = 28 self.input_width = 28 self.input_dim = 1 self.output_dim = 1 elif dataset == 'celebA': self.input_height = 64 self.input_width = 64 self.input_dim = 3 self.output_dim = 3 self.conv = nn.Sequential( nn.Conv2d(self.input_dim, 64, 4, 2, 1), nn.ReLU(), ) self.code = nn.Sequential( nn.Linear(64 * (self.input_height // 2) * (self.input_width // 2), 32), # bn and relu are excluded since code is used in pullaway_loss ) self.fc = nn.Sequential( nn.Linear(32, 64 * (self.input_height // 2) * (self.input_width // 2)), nn.BatchNorm1d(64 * (self.input_height // 2) * (self.input_width // 2)), nn.ReLU(), ) self.deconv = nn.Sequential( nn.ConvTranspose2d(64, self.output_dim, 4, 2, 1), #nn.Sigmoid(), # EBGAN does not work well when using Sigmoid(). ) utils.initialize_weights(self)
def __init__(self, num_id=105, num_c_expr=48, nInputCh=4, norm=nn.BatchNorm3d): super(discriminator3d, self).__init__() self.nInputCh = nInputCh self.conv = nn.Sequential(nn.Conv3d(nInputCh, 32, 4, 2, 1, bias=False), norm(32), nn.LeakyReLU(0.2), nn.Conv3d(32, 64, 4, 2, 1, bias=False), norm(64), nn.LeakyReLU(0.2), nn.Conv3d(64, 128, 4, 2, 1, bias=False), norm(128), nn.LeakyReLU(0.2), nn.Conv3d(128, 256, 4, 2, 1, bias=False), norm(256), nn.LeakyReLU(0.2), nn.Conv3d(256, 512, 4, 2, 1, bias=False), norm(512), nn.LeakyReLU(0.2)) self.convGAN = nn.Sequential(nn.Conv3d(512, 1, 4, bias=False), nn.Sigmoid()) self.convID = nn.Sequential(nn.Conv3d(512, num_id, 4, bias=False), ) self.convPCode = nn.Sequential( nn.Conv3d(512, num_c_expr, 4, bias=False), ) utils.initialize_weights(self)
def __init__(self, z_dim=100, pix_level=3): super(Encoder, self).__init__() self.input_dim = pix_level self.output_dim = z_dim d = 128 self.conv = nn.Sequential( nn.Conv2d(self.input_dim, d, 4, 2, 1, bias=True), nn.LeakyReLU(0.2), nn.Conv2d(d, d * 2, 4, 2, 1, bias=True), nn.BatchNorm2d(d * 2), nn.LeakyReLU(0.2), nn.Conv2d(d * 2, d * 4, 4, 2, 1, bias=True), nn.BatchNorm2d(d * 4), nn.LeakyReLU(0.2), nn.Conv2d(d * 4, d * 8, 4, 2, 1, bias=True), nn.BatchNorm2d(d * 8), nn.LeakyReLU(0.2), ) self.fc_mu = nn.Sequential( nn.Conv2d(d * 8, self.output_dim, 4, 1, 0, bias=True), ) self.fc_sigma = nn.Sequential( nn.Conv2d(d * 8, self.output_dim, 4, 1, 0, bias=True), ) utils.initialize_weights(self)
def __init__(self, input_dim=1, output_dim=1, input_size=32): super(discriminator, self).__init__() self.input_dim = input_dim self.output_dim = output_dim self.input_size = input_size #input: 1 output_dim: 1 input_size: 28 self.conv = nn.Sequential( nn.Conv2d( self.input_dim, 64, 4, 2, 1), #(inchannels,out_channels,kernel_size,stride,padding) nn.LeakyReLU(0.2), nn.Conv2d(64, 128, 4, 2, 1), nn.BatchNorm2d(128), nn.LeakyReLU(0.2), ) self.fc = nn.Sequential( nn.Linear(128 * (self.input_size // 4) * (self.input_size // 4), 1024), nn.BatchNorm1d(1024), nn.LeakyReLU(0.2), nn.Linear(1024, self.output_dim), nn.Sigmoid(), ) utils.initialize_weights(self)
def __init__(self, Nid=105, Ncode=48): super(discriminator, self).__init__() self.conv = nn.Sequential( nn.Conv3d(1, 32, 4, 2, 1, bias=False), nn.BatchNorm3d(32), nn.LeakyReLU(0.2), nn.Conv3d(32, 64, 4, 2, 1, bias=False), nn.BatchNorm3d(64), nn.LeakyReLU(0.2), nn.Conv3d(64, 128, 4, 2, 1, bias=False), nn.BatchNorm3d(128), nn.LeakyReLU(0.2), nn.Conv3d(128, 256, 4, 2, 1, bias=False), nn.BatchNorm3d(256), nn.LeakyReLU(0.2), nn.Conv3d(256, 512, 4, 2, 1, bias=False), nn.BatchNorm3d(512), nn.LeakyReLU(0.2) ) self.convID = nn.Sequential( nn.Conv3d(512, Nid, 4, bias=False), ) self.convPCode = nn.Sequential( nn.Conv3d(512, Ncode, 4, bias=False), ) utils.initialize_weights(self)
def __init__(self, params): super(GenerativeNetwork, self).__init__() self.params = params self.fc1 = nn.Linear(in_features=self.params.latent_dim, out_features=self.params.hidden_dim) self.fc2 = nn.Linear(in_features=self.params.hidden_dim, out_features=self.params.input_dim) self.activation_fn = GELU() initialize_weights(self)
def __init__(self, E_dim=200): super(Encoder, self).__init__() self.input_dim = 2 self.hid_dim = E_dim self.output_dim = 2 # self.fc = nn.Sequential( # nn.Linear(self.hid_dim, self.hid_dim), # nn.BatchNorm1d(self.hid_dim), # nn.ReLU() # ) utils.initialize_weights(self) self.fc_mu = nn.Sequential( torch.nn.utils.spectral_norm(nn.Linear(self.hid_dim, self.hid_dim)), # nn.BatchNorm1d(self.hid_dim), nn.ReLU(), nn.Linear(self.hid_dim, self.output_dim), ) self.fc_sigma = nn.Sequential( torch.nn.utils.spectral_norm(nn.Linear(self.hid_dim, self.hid_dim)), # nn.BatchNorm1d(self.hid_dim), nn.ReLU(), nn.Linear(self.hid_dim, self.output_dim), ) utils.initialize_weights(self)
def __init__(self, input_dim=1, output_dim=1, input_size=32, len_discrete_code=10, len_continuous_code=2): super(discriminator, self).__init__() self.input_dim = input_dim self.output_dim = output_dim self.input_size = input_size self.len_discrete_code = len_discrete_code # categorical distribution (i.e. label) self.len_continuous_code = len_continuous_code # gaussian distribution (e.g. rotation, thickness) self.conv = nn.Sequential( nn.Conv2d(self.input_dim, 64, 4, 2, 1), nn.LeakyReLU(0.2), nn.Conv2d(64, 128, 4, 2, 1), nn.BatchNorm2d(128), nn.LeakyReLU(0.2), ) self.fc = nn.Sequential( nn.Linear(128 * (self.input_size // 4) * (self.input_size // 4), 1024), nn.BatchNorm1d(1024), nn.LeakyReLU(0.2), nn.Linear( 1024, self.output_dim + self.len_continuous_code + self.len_discrete_code), # nn.Sigmoid(), ) utils.initialize_weights(self)
def __init__(self, dataset='mnist'): super(discriminator, self).__init__() if dataset == 'mnist' or 'fashion-mnist': self.input_height = 28 self.input_width = 28 self.input_dim = 1 self.output_dim = 1 self.len_discrete_code = 10 # categorical distribution (i.e. label) self.len_continuous_code = 2 # gaussian distribution (e.g. rotation, thickness) self.conv = nn.Sequential( nn.Conv2d(self.input_dim, 64, 4, 2, 1), nn.LeakyReLU(0.2), nn.Conv2d(64, 128, 4, 2, 1), nn.BatchNorm2d(128), nn.LeakyReLU(0.2), ) self.fc = nn.Sequential( nn.Linear(128 * (self.input_height // 4) * (self.input_width // 4), 1024), nn.BatchNorm1d(1024), nn.LeakyReLU(0.2), nn.Linear(1024, self.output_dim + self.len_continuous_code + self.len_discrete_code), nn.Sigmoid(), ) utils.initialize_weights(self)
def __init__(self, dataset = 'mnist'): super(generator, self).__init__() self.input_height_small = 16 self.input_width_small = 16 self.input_dim = 62 self.output_dim = 1 self.fc = nn.Sequential( nn.Linear(self.input_dim, 1024), nn.BatchNorm1d(1024), nn.LeakyReLU(0.2), nn.Linear(1024, 128 * (self.input_height_small // 4) * (self.input_width_small // 4)), nn.BatchNorm1d(128 * (self.input_height_small // 4) * (self.input_width_small // 4)), nn.LeakyReLU(0.2), ) self.deconv = nn.Sequential( nn.ConvTranspose2d(128, 64, 4, 2, 1), nn.BatchNorm2d(64), nn.LeakyReLU(0.2), nn.ConvTranspose2d(64, self.output_dim, 4, 2, 1), # nn.BatchNorm2d(self.output_dim), nn.Sigmoid(), ) utils.initialize_weights(self)
def __init__(self, input_dim=100, output_dim=1, input_size=32, len_discrete_code=10, len_continuous_code=2): super(generator, self).__init__() self.input_dim = input_dim self.output_dim = output_dim self.input_size = input_size self.len_discrete_code = len_discrete_code # categorical distribution (i.e. label) self.len_continuous_code = len_continuous_code # gaussian distribution (e.g. rotation, thickness) self.fc = nn.Sequential( nn.Linear( self.input_dim + self.len_discrete_code + self.len_continuous_code, 1024), nn.BatchNorm1d(1024), nn.ReLU(), nn.Linear(1024, 128 * (self.input_size // 4) * (self.input_size // 4)), nn.BatchNorm1d(128 * (self.input_size // 4) * (self.input_size // 4)), nn.ReLU(), ) self.deconv = nn.Sequential( nn.ConvTranspose2d(128, 64, 4, 2, 1), nn.BatchNorm2d(64), nn.ReLU(), nn.ConvTranspose2d(64, self.output_dim, 4, 2, 1), nn.Tanh(), ) utils.initialize_weights(self)
def __init__(self, in_nc, out_nc, nf=32): super(discriminator, self).__init__() self.input_nc = in_nc self.output_nc = out_nc self.nf = nf self.convs = nn.Sequential( nn.Conv2d(in_nc, nf, 3, 1, 1), nn.LeakyReLU(0.2, True), nn.Conv2d(nf, nf * 2, 3, 2, 1), nn.LeakyReLU(0.2, True), nn.Conv2d(nf * 2, nf * 4, 3, 1, 1), nn.InstanceNorm2d(nf * 4), nn.LeakyReLU(0.2, True), nn.Conv2d(nf * 4, nf * 4, 3, 2, 1), nn.LeakyReLU(0.2, True), nn.Conv2d(nf * 4, nf * 8, 3, 1, 1), nn.InstanceNorm2d(nf * 8), nn.LeakyReLU(0.2, True), nn.Conv2d(nf * 8, nf * 8, 3, 1, 1), nn.InstanceNorm2d(nf * 8), nn.LeakyReLU(0.2, True), nn.Conv2d(nf * 8, out_nc, 3, 1, 1), nn.Sigmoid(), ) utils.initialize_weights(self)
def __init__(self, class_num, dataset = 'mnist'): super(E_net, self).__init__() if dataset == 'mnist' or 'fashion-mnist': self.input_height = 28 self.input_width = 28 self.input_dim = 1 + 10 self.output_dim = 1 self.conv = nn.Sequential( nn.Conv2d(1, 64, 4, 2, 1), nn.BatchNorm2d(64), nn.LeakyReLU(0.2), nn.Conv2d(64, 128, 4, 2, 1), nn.BatchNorm2d(128), nn.LeakyReLU(0.2), nn.Conv2d(128, 256, 4, 2, 1), nn.BatchNorm2d(256), nn.LeakyReLU(0.2), ) self.fc = nn.Sequential( nn.Linear(256 * 3 * 3, 1024), nn.BatchNorm1d(1024), nn.LeakyReLU(0.2), nn.Linear(1024, class_num), ) utils.initialize_weights(self)
def __init__(self, dataset = 'mnist', z_dim = 64, height = None, width = None, pix_level = None): super(Encoder, self).__init__() self.input_height = height self.input_width = width self.input_dim = pix_level self.output_dim = z_dim self.conv = nn.Sequential( nn.Conv2d(128, 64, 4, 2, 1), nn.BatchNorm2d(64), nn.LeakyReLU(0.1), ) self.fc_dim = 64*3*3 #self.fc_dim = 128 * (self.input_height // 4) * (self.input_width // 4) self.fc_mu = nn.Sequential( nn.Linear(self.fc_dim, self.output_dim), nn.LeakyReLU(0.1), nn.BatchNorm1d(self.output_dim), nn.Linear(self.output_dim, self.output_dim), ) self.fc_sigma = nn.Sequential( nn.Linear(self.fc_dim , self.output_dim), nn.LeakyReLU(0.1), nn.BatchNorm1d(self.output_dim), nn.Linear(self.output_dim, self.output_dim), ) utils.initialize_weights(self)
def __init__(self, num_classes, input_size, pretrained=True): super(GCN, self).__init__() self.input_size = input_size resnet = models.resnet152() if pretrained: resnet.load_state_dict(torch.load(res152_path)) self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu) self.layer1 = nn.Sequential(resnet.maxpool, resnet.layer1) self.layer2 = resnet.layer2 self.layer3 = resnet.layer3 self.layer4 = resnet.layer4 self.gcm1 = _GlobalConvModule(2048, num_classes, (7, 7)) self.gcm2 = _GlobalConvModule(1024, num_classes, (7, 7)) self.gcm3 = _GlobalConvModule(512, num_classes, (7, 7)) self.gcm4 = _GlobalConvModule(256, num_classes, (7, 7)) self.brm1 = _BoundaryRefineModule(num_classes) self.brm2 = _BoundaryRefineModule(num_classes) self.brm3 = _BoundaryRefineModule(num_classes) self.brm4 = _BoundaryRefineModule(num_classes) self.brm5 = _BoundaryRefineModule(num_classes) self.brm6 = _BoundaryRefineModule(num_classes) self.brm7 = _BoundaryRefineModule(num_classes) self.brm8 = _BoundaryRefineModule(num_classes) self.brm9 = _BoundaryRefineModule(num_classes) initialize_weights(self.gcm1, self.gcm2, self.gcm3, self.gcm4, self.brm1, self.brm2, self.brm3, self.brm4, self.brm5, self.brm6, self.brm7, self.brm8, self.brm9)
def __init__(self, dataset = 'mnist'): super(discriminator, self).__init__() if dataset == 'mnist' or dataset == 'fashion-mnist': self.input_height = 28 self.input_width = 28 self.input_dim = 1 self.output_dim = 1 elif dataset == 'celebA': self.input_height = 64 self.input_width = 64 self.input_dim = 3 self.output_dim = 3 self.conv = nn.Sequential( nn.Conv2d(self.input_dim, 64, 4, 2, 1), nn.ReLU(), ) self.fc = nn.Sequential( nn.Linear(64 * (self.input_height // 2) * (self.input_width // 2), 32), nn.BatchNorm1d(32), nn.ReLU(), nn.Linear(32, 64 * (self.input_height // 2) * (self.input_width // 2)), nn.BatchNorm1d(64 * (self.input_height // 2) * (self.input_width // 2)), nn.ReLU(), ) self.deconv = nn.Sequential( nn.ConvTranspose2d(64, self.output_dim, 4, 2, 1), #nn.Sigmoid(), ) utils.initialize_weights(self)
def __init__(self, input_dim=100, output_dim=1, input_size=32): super(generator, self).__init__() self.input_dim = input_dim #62 self.output_dim = output_dim #判断概率,1 self.input_size = input_size #28 #input: 62 output_dim: 1 input_size: 28 self.fc = nn.Sequential( nn.Linear(self.input_dim, 1024), #62,1024 nn.BatchNorm1d(1024), nn.ReLU(), nn.Linear(1024, 128 * (self.input_size // 4) * (self.input_size // 4)), #(1024,128*7*7) nn.BatchNorm1d(128 * (self.input_size // 4) * (self.input_size // 4)), nn.ReLU(), ) self.deconv = nn.Sequential( nn.ConvTranspose2d(128, 64, 4, 2, 1), nn.BatchNorm2d(64), #得到(batch_size,64,14,14) nn.ReLU(), nn.ConvTranspose2d(64, self.output_dim, 4, 2, 1), #得到(batch_size,1,28,28) nn.Tanh(), ) utils.initialize_weights(self)
def __init__(self, input_dim=100, output_dim=1, input_size=32): super(generator, self).__init__() self.input_dim = input_dim self.output_dim = output_dim self.input_size = input_size self.fc = nn.Sequential( nn.Linear(self.input_dim, 1024), # nn.BatchNorm1d(1024), nn.ReLU(), nn.Linear(1024, 64 * (8 * 4)), # nn.BatchNorm1d(64 * (8 * 4)), nn.ReLU(), ) self.deconv = nn.Sequential( nn.ConvTranspose2d(64, 32, (4, 2), 2, 1), # nn.BatchNorm2d(32), nn.ReLU(), nn.ConvTranspose2d(32, 16, (4, 3), 2, 1), # nn.BatchNorm2d(16), nn.ReLU(), nn.ConvTranspose2d(16, self.output_dim, (4, 3), 2, 1), nn.Tanh(), ) utils.initialize_weights(self)
def __init__(self, dataset = 'mnist'): super(generator, self).__init__() if dataset == 'mnist' or dataset == 'fashion-mnist': self.input_height = 28 self.input_width = 28 self.input_dim = 62 self.output_dim = 1 elif dataset == 'celebA': self.input_height = 64 self.input_width = 64 self.input_dim = 62 self.output_dim = 3 self.fc = nn.Sequential( nn.Linear(self.input_dim, 1024), nn.BatchNorm1d(1024), nn.ReLU(), nn.Linear(1024, 128 * (self.input_height // 4) * (self.input_width // 4)), nn.BatchNorm1d(128 * (self.input_height // 4) * (self.input_width // 4)), nn.ReLU(), ) self.deconv = nn.Sequential( nn.ConvTranspose2d(128, 64, 4, 2, 1), nn.BatchNorm2d(64), nn.ReLU(), nn.ConvTranspose2d(64, self.output_dim, 4, 2, 1), nn.Sigmoid(), ) utils.initialize_weights(self)
def __init__(self, dataset = 'mnist'): super(discriminator, self).__init__() if dataset == 'mnist' or dataset == 'fashion-mnist': self.input_height = 28 self.input_width = 28 self.input_dim = 1 self.output_dim = 1 elif dataset == 'celebA': self.input_height = 64 self.input_width = 64 self.input_dim = 3 self.output_dim = 1 self.conv = nn.Sequential( nn.Conv2d(self.input_dim, 64, 4, 2, 1), nn.LeakyReLU(0.2), nn.Conv2d(64, 128, 4, 2, 1), nn.BatchNorm2d(128), nn.LeakyReLU(0.2), ) self.fc = nn.Sequential( nn.Linear(128 * (self.input_height // 4) * (self.input_width // 4), 1024), nn.BatchNorm1d(1024), nn.LeakyReLU(0.2), nn.Linear(1024, self.output_dim), nn.Sigmoid(), ) utils.initialize_weights(self)
def __init__(self, input_dim=1, output_dim=1, input_size=32, class_num=10): super(discriminator, self).__init__() self.input_dim = input_dim self.output_dim = output_dim self.input_size = input_size self.class_num = class_num self.conv = nn.Sequential( nn.Conv2d(self.input_dim, 64, 4, 2, 1), nn.LeakyReLU(0.2), nn.Conv2d(64, 128, 4, 2, 1), nn.BatchNorm2d(128), nn.LeakyReLU(0.2), ) self.fc1 = nn.Sequential( nn.Linear(128 * (self.input_size // 4) * (self.input_size // 4), 1024), nn.BatchNorm1d(1024), nn.LeakyReLU(0.2), ) self.dc = nn.Sequential( nn.Linear(1024, self.output_dim), nn.Sigmoid(), ) self.cl = nn.Sequential(nn.Linear(1024, self.class_num), ) utils.initialize_weights(self)
def __init__(self, dataset='mnist'): super(discriminator, self).__init__() if dataset == 'mnist' or dataset == 'fashion-mnist': self.input_height = 28 self.input_width = 28 self.input_dim = 1 self.output_dim = 1 elif dataset == 'celebA': self.input_height = 64 self.input_width = 64 self.input_dim = 3 self.output_dim = 1 self.conv = nn.Sequential( nn.Conv2d(self.input_dim, 64, 4, 2, 1), nn.LeakyReLU(0.2), nn.Conv2d(64, 128, 4, 2, 1), nn.BatchNorm2d(128), nn.LeakyReLU(0.2), ) self.fc = nn.Sequential( nn.Linear(128 * (self.input_height // 4) * (self.input_width // 4), 1024), nn.BatchNorm1d(1024), nn.LeakyReLU(0.2), nn.Linear(1024, self.output_dim), nn.Sigmoid(), ) utils.initialize_weights(self)
def __init__(self, input_dim=100, output_dim=1, input_size=32, class_num=10): super(generator, self).__init__() self.input_dim = input_dim self.output_dim = output_dim self.input_size = input_size self.class_num = class_num self.fc = nn.Sequential( nn.Linear(self.input_dim + self.class_num, 1024), nn.BatchNorm1d(1024), nn.ReLU(), nn.Linear(1024, 128 * (self.input_size // 4) * (self.input_size // 4)), nn.BatchNorm1d(128 * (self.input_size // 4) * (self.input_size // 4)), nn.ReLU(), ) self.deconv = nn.Sequential( nn.ConvTranspose2d(128, 64, 4, 2, 1), nn.BatchNorm2d(64), nn.ReLU(), nn.ConvTranspose2d(64, self.output_dim, 4, 2, 1), nn.Tanh(), ) utils.initialize_weights(self)
def __init__(self, num_cls): super(Discriminator, self).__init__() self.input_dim = 3 self.num_cls = num_cls self.conv = nn.Sequential( #64->32 nn.Conv2d(self.input_dim, 32, 4, 2, 1, bias=False), nn.BatchNorm2d(32), nn.LeakyReLU(), #32->16 nn.Conv2d(32, 64, 4, 2, 1, bias=False), nn.BatchNorm2d(64), nn.LeakyReLU(0.2), #16->8 nn.Conv2d(64, 128, 4, 2, 1, bias=False), nn.BatchNorm2d(128), nn.LeakyReLU(0.2), #8->4 nn.Conv2d(128, 256, 4, 2, 1, bias=False), nn.BatchNorm2d(256), nn.LeakyReLU(0.2), ) self.convCls = nn.Sequential( nn.Conv2d(256, self.num_cls, 4, bias=False)) self.convGAN = nn.Sequential(nn.Conv2d(256, 1, 4, bias=False), nn.Sigmoid()) utils.initialize_weights(self)
def __init__(self): super(Encoder, self).__init__() self.input_dim = 3 self.input_height = 64 self.input_width = 64 self.output_dim = 50 self.conv = nn.Sequential( nn.Conv2d(self.input_dim, 64, 3, 4, 2, bias=True), nn.BatchNorm2d(64), #nn.InstanceNorm2d(64, affine=True), nn.ReLU(), nn.Conv2d(64, 128, 4, 2, 1, bias=True), nn.BatchNorm2d(128), #nn.InstanceNorm2d(128, affine=True), nn.ReLU(), nn.Conv2d(128, 256, 4, 2, 1, bias=True), nn.BatchNorm2d(256), #nn.InstanceNorm2d(256, affine=True), nn.ReLU(), nn.Conv2d(256, 512, 4, 2, 1, bias=True), nn.BatchNorm2d(512), #nn.InstanceNorm2d(512, affine=True), nn.ReLU(), nn.Conv2d(512, self.output_dim, 4, 2, 1, bias=True), nn.Sigmoid(), ) utils.initialize_weights(self)
def __init__(self, params): super(GenerativeNetwork, self).__init__() self.params = params generative_layers = [ nn.Linear(in_features=self.params.latent_dim, out_features=self.params.hidden_dim), GELU(), UnFlatten(self.params.channels[3], self.params.hidden_height, self.params.hidden_width), nn.ConvTranspose2d(in_channels=self.params.channels[3], out_channels=self.params.channels[4], kernel_size=(self.params.kernel_size[6], self.params.kernel_size[7]), stride=self.params.stride[3]), nn.BatchNorm2d(self.params.channels[4]), GELU(), nn.ConvTranspose2d(in_channels=self.params.channels[4], out_channels=self.params.channels[5], kernel_size=(self.params.kernel_size[8], self.params.kernel_size[9]), stride=self.params.stride[4]), nn.BatchNorm2d(self.params.channels[5]), GELU(), nn.ConvTranspose2d(in_channels=self.params.channels[5], out_channels=self.params.channels[6], kernel_size=(self.params.kernel_size[10], self.params.kernel_size[11]), stride=self.params.stride[5]), nn.Sigmoid(), ] self.decoder = nn.Sequential(*generative_layers) initialize_weights(self)
def __init__(self, channel, kernel, stride, padding): super(resnet_block, self).__init__() self.channel = channel self.kernel = kernel self.strdie = stride self.padding = padding self.conv1 = nn.Conv2d(channel, channel, kernel, stride, padding) self.conv1_norm = nn.InstanceNorm2d(channel) self.conv2 = nn.Conv2d(channel, channel, kernel, stride, padding) self.conv2_norm = nn.InstanceNorm2d(channel) utils.initialize_weights(self)
def __init__(self, in_nc, out_nc, nf=32, nb=6): super(generator, self).__init__() self.input_nc = in_nc self.output_nc = out_nc self.nf = nf self.nb = nb self.down_convs = nn.Sequential( nn.Conv2d(in_nc, nf, 7, 1, 3), #k7n64s1 nn.InstanceNorm2d(nf), nn.ReLU(True), nn.Conv2d(nf, nf * 2, 3, 2, 1), #k3n128s2 nn.Conv2d(nf * 2, nf * 2, 3, 1, 1), #k3n128s1 nn.InstanceNorm2d(nf * 2), nn.ReLU(True), nn.Conv2d(nf * 2, nf * 4, 3, 2, 1), #k3n256s1 nn.Conv2d(nf * 4, nf * 4, 3, 1, 1), #k3n256s1 nn.InstanceNorm2d(nf * 4), nn.ReLU(True), ) self.resnet_blocks = [] for i in range(nb): self.resnet_blocks.append(resnet_block(nf * 4, 3, 1, 1)) self.resnet_blocks = nn.Sequential(*self.resnet_blocks) self.up_convs = nn.Sequential( nn.ConvTranspose2d(nf * 4, nf * 2, 3, 2, 1, 1), #k3n128s1/2 nn.Conv2d(nf * 2, nf * 2, 3, 1, 1), #k3n128s1 nn.InstanceNorm2d(nf * 2), nn.ReLU(True), nn.ConvTranspose2d(nf * 2, nf, 3, 2, 1, 1), #k3n64s1/2 nn.Conv2d(nf, nf, 3, 1, 1), #k3n64s1 nn.InstanceNorm2d(nf), nn.ReLU(True), nn.Conv2d(nf, out_nc, 7, 1, 3), #k7n3s1 nn.Tanh(), ) utils.initialize_weights(self)