コード例 #1
0
	def __init__(self):
		super(discriminator_cls, self).__init__()
		self.image_size = 64
		self.num_channels = 3
		self.embed_dim = 1024
		self.projected_embed_dim = 128
		self.ndf = 64
		self.B_dim = 128
		self.C_dim = 16

		self.netD_1 = nn.Sequential(
			# input is (nc) x 64 x 64
			nn.Conv2d(self.num_channels, self.ndf, 4, 2, 1, bias=False),
			nn.LeakyReLU(0.2, inplace=True),
			# state size. (ndf) x 32 x 32
			nn.Conv2d(self.ndf, self.ndf * 2, 4, 2, 1, bias=False),
			nn.BatchNorm2d(self.ndf * 2),
			nn.LeakyReLU(0.2, inplace=True),
			# state size. (ndf*2) x 16 x 16
			nn.Conv2d(self.ndf * 2, self.ndf * 4, 4, 2, 1, bias=False),
			nn.BatchNorm2d(self.ndf * 4),
			nn.LeakyReLU(0.2, inplace=True),
			# state size. (ndf*4) x 8 x 8
			nn.Conv2d(self.ndf * 4, self.ndf * 8, 4, 2, 1, bias=False),
			nn.BatchNorm2d(self.ndf * 8),
			nn.LeakyReLU(0.2, inplace=True),
		)

		self.projector = Concat_embed(self.embed_dim, self.projected_embed_dim)

		self.netD_2 = nn.Sequential(
			# state size. (ndf*8) x 4 x 4
			nn.Conv2d(self.ndf * 8 + self.projected_embed_dim, 1, 4, 1, 0, bias=False),
			nn.Sigmoid()
			)
コード例 #2
0
	def __init__(self, dataset='youtubers'):
		super(discriminator, self).__init__()
		self.image_size = 64
		self.num_channels = 3
		self.embed_dim = 62
		self.projected_embed_dim = 128
		self.ndf = 64
		self.B_dim = 128
		self.C_dim = 16
		self.dataset_name = dataset
		self.conv1 = SpectralNorm(nn.Conv2d(self.num_channels, self.ndf, 4, 2, 1, bias=False))
		self.conv2 = SpectralNorm(nn.Conv2d(self.ndf, self.ndf * 2, 4, 2, 1, bias=False))
		self.conv3 = SpectralNorm(nn.Conv2d(self.ndf * 2, self.ndf * 4, 4, 2, 1, bias=False))
		self.conv4 = SpectralNorm(nn.Conv2d(self.ndf * 4, self.ndf * 8, 4, 2, 1, bias=False))
		self.disc_linear = nn.Linear(self.ndf * 1, self.ndf)
		self.disc_linear2 = nn.Linear(31, 31)
		self.aux_linear = nn.Linear(4*4*512, self.embed_dim+1)
		self.softmax = nn.Softmax()
		self.sigmoid = nn.Sigmoid()
		self.projector = Concat_embed(self.embed_dim, self.projected_embed_dim)

		self.netD_2 = nn.Sequential(
			# state size. (ndf*8) x 4 x 4
			nn.Conv2d(self.ndf * 8, 1, 4, 1, 0, bias=False),
			#nn.Conv2d(self.ndf * 8 + self.projected_embed_dim, 1, 4, 1, 0, bias=False),
			#nn.Sigmoid()
			)
コード例 #3
0
    def __init__(self, improved=False, dataset='youtubers'):
        super(discriminator, self).__init__()
        self.image_size = 64
        self.num_channels = 3
        self.embed_dim = 1024
        self.projected_embed_dim = 128
        self.ndf = 64
        self.dataset_name = dataset

        if improved:
            self.netD_1 = nn.Sequential(
                # input is (nc) x 64 x 64
                nn.Conv2d(self.num_channels, self.ndf, 4, 2, 1, bias=False),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf) x 32 x 32
                nn.Conv2d(self.ndf, self.ndf * 2, 4, 2, 1, bias=False),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*2) x 16 x 16
                nn.Conv2d(self.ndf * 2, self.ndf * 4, 4, 2, 1, bias=False),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*4) x 8 x 8
                nn.Conv2d(self.ndf * 4, self.ndf * 8, 4, 2, 1, bias=False),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*8) x 4 x 4
            )
        else:
            self.netD_1 = nn.Sequential(
                # input is (nc) x 64 x 64
                nn.Conv2d(self.num_channels, self.ndf, 4, 2, 1, bias=False),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf) x 32 x 32
                SpectralNorm(
                    nn.Conv2d(self.ndf, self.ndf * 2, 4, 2, 1, bias=False)),
                #nn.BatchNorm2d(self.ndf * 2),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*2) x 16 x 16
                SpectralNorm(
                    nn.Conv2d(self.ndf * 2, self.ndf * 4, 4, 2, 1,
                              bias=False)),
                #nn.BatchNorm2d(self.ndf * 4),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*4) x 8 x 8
                SpectralNorm(
                    nn.Conv2d(self.ndf * 4, self.ndf * 8, 4, 2, 1,
                              bias=False)),
                #nn.BatchNorm2d(self.ndf * 8),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*8) x 4 x 4
            )

        self.projector = Concat_embed(self.embed_dim, self.projected_embed_dim)
        #Uncomment first layer for concatenation and comment second. For projection do the opposit
        #TODO: Handle this!!!
        self.netD_2 = nn.Sequential(
            nn.Conv2d(self.ndf * 8 + 64, 1, 4, 1, 0, bias=False)
            #nn.Conv2d(self.ndf * 8 + self.projected_embed_dim, 1, 4, 1, 0, bias=False)
        )
コード例 #4
0
    def __init__(self, dataset='youtubers'):
        super(discriminator, self).__init__()
        self.image_size = 64
        self.num_channels = 3
        self.embed_dim = 62
        self.projected_embed_dim = 128
        self.ndf = 64
        self.B_dim = 128
        self.C_dim = 16
        self.dataset_name = dataset
        self.conv1 = SpectralNorm(
            nn.Conv2d(self.num_channels, self.ndf, 4, 2, 1, bias=False))
        self.conv2 = SpectralNorm(
            nn.Conv2d(self.ndf, self.ndf * 2, 4, 2, 1, bias=False))
        self.conv3 = SpectralNorm(
            nn.Conv2d(self.ndf * 2, self.ndf * 4, 4, 2, 1, bias=False))
        self.conv4 = SpectralNorm(
            nn.Conv2d(self.ndf * 4, self.ndf * 8, 4, 2, 1, bias=False))
        self.netD_1 = nn.Sequential(
            # input is (nc) x 64 x 64
            nn.Conv2d(self.num_channels, self.ndf, 4, 2, 1, bias=False),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf) x 32 x 32
            nn.Conv2d(self.ndf, self.ndf * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(self.ndf * 2),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*2) x 16 x 16
            nn.Conv2d(self.ndf * 2, self.ndf * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(self.ndf * 4),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*4) x 8 x 8
            nn.Conv2d(self.ndf * 4, self.ndf * 8, 4, 2, 1, bias=False),
            nn.BatchNorm2d(self.ndf * 8),
            nn.LeakyReLU(0.2, inplace=True),
        )

        self.projector = Concat_embed(self.embed_dim, self.projected_embed_dim)

        self.netD_2 = nn.Sequential(
            # state size. (ndf*8) x 4 x 4
            #nn.Conv2d(self.ndf * 8 , 1, 4, 1, 0, bias=False),
            #nn.Conv2d(self.ndf * 8 + self.projected_embed_dim, 1, 4, 1, 0, bias=False),
            nn.Conv2d(self.ndf * 8 + self.projected_embed_dim,
                      1,
                      4,
                      1,
                      0,
                      bias=False),
            #nn.Sigmoid()
        )
コード例 #5
0
ファイル: gan_cls.py プロジェクト: junweima/csc2548
    def __init__(self):
        super(discriminator, self).__init__()
        self.image_size = 64
        self.num_channels = 3
        self.embed_dim = 2400  # compatible with skip thought (1024)
        self.projected_embed_dim = 128
        self.ndf = 64
        self.B_dim = 128
        self.C_dim = 16

        self.netD_1 = nn.Sequential(
            # input is (nc) x 64 x 64
            nn.Conv2d(self.num_channels, self.ndf, 4, 2, 1, bias=True),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf) x 32 x 32
            nn.Conv2d(self.ndf, self.ndf * 2, 4, 2, 1, bias=True),
            nn.BatchNorm2d(self.ndf * 2),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*2) x 16 x 16
            nn.Conv2d(self.ndf * 2, self.ndf * 4, 4, 2, 1, bias=True),
            nn.BatchNorm2d(self.ndf * 4),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*4) x 8 x 8
            nn.Conv2d(self.ndf * 4, self.ndf * 8, 4, 2, 1, bias=True),
            nn.BatchNorm2d(self.ndf * 8),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Conv2d(self.ndf * 8, self.ndf * 2, 1, 1, 0),
            nn.BatchNorm2d(self.ndf * 2),
            nn.LeakyReLU(0.2, True),
            nn.Conv2d(self.ndf * 2, self.ndf * 2, 3, 1, 1),
            nn.BatchNorm2d(self.ndf * 2),
            nn.LeakyReLU(0.2, True),
            nn.Conv2d(self.ndf * 2, self.ndf * 8, 3, 1, 1),
            nn.BatchNorm2d(self.ndf * 8),
            nn.LeakyReLU(0.2, True)

            #output size (ndf*8) x 8 x 8
        )

        self.projector = Concat_embed(self.embed_dim, self.projected_embed_dim)

        self.netD_2 = nn.Sequential(
            # state size. (ndf*8) x 4 x 4
            nn.Conv2d(self.ndf * 8 + self.projected_embed_dim, self.ndf * 8,
                      1),
            nn.BatchNorm2d(self.ndf * 8),
            nn.LeakyReLU(0.2, True),
            nn.Conv2d(self.ndf * 8, 1, 4, 1, 0, bias=True),
            nn.Sigmoid())
コード例 #6
0
ファイル: gan_cls.py プロジェクト: junweima/csc2548
    def __init__(self):
        super(discriminator2, self).__init__()
        self.image_size = 64
        self.num_channels = 3
        self.embed_dim = 2400  # compatible with skip thought (1024)
        self.projected_embed_dim = 128
        self.ndf = 64
        self.B_dim = 128
        self.C_dim = 16

        self.encode_img = nn.Sequential(
            # state size = 3 x 128 x 128
            nn.Conv2d(3, self.ndf, 4, 2, 1, bias=True),  # 128 * 128 * ndf
            nn.LeakyReLU(0.2, inplace=True),
            # state size = ndf x 64 x 64
            nn.Conv2d(self.ndf, self.ndf * 2, 3, 1, 1, bias=True),
            nn.BatchNorm2d(self.ndf * 2),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Conv2d(self.ndf * 2, self.ndf * 4, 4, 2, 1, bias=True),
            nn.BatchNorm2d(self.ndf * 4),
            nn.LeakyReLU(0.2, inplace=True),  # 32 * 32 * ndf * 4
            nn.Conv2d(self.ndf * 4, self.ndf * 8, 4, 2, 1, bias=True),
            nn.BatchNorm2d(self.ndf * 8),
            nn.LeakyReLU(0.2, inplace=True),  # 16 * 16 * ndf * 8
            nn.Conv2d(self.ndf * 8, self.ndf * 8, 3, 1, 1),
            nn.BatchNorm2d(self.ndf * 8),
            nn.LeakyReLU(0.2, inplace=True),  # 8 * 8 * ndf * 8
            nn.Conv2d(self.ndf * 8, self.ndf * 2, 4, 2, 1),
            nn.BatchNorm2d(self.ndf * 2),
            nn.LeakyReLU(0.2, inplace=True),  # 4 * 4 * ndf * 2
            nn.Conv2d(self.ndf * 2, self.ndf * 8, 3, 1, 1),
            nn.BatchNorm2d(self.ndf * 8),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Conv2d(self.ndf * 8, self.ndf * 8, 4, 2, 1),
            nn.BatchNorm2d(self.ndf * 8),
            nn.LeakyReLU(0.2, inplace=True)
            # nn.Conv2d(self.ndf * 8, self.ndf * 16, 4, 2, 1, bias=True),
            # nn.BatchNorm2d(self.ndf * 16),
            # nn.LeakyReLU(0.2, inplace=True),  # 8 * 8 * ndf * 16
            # nn.Conv2d(self.ndf * 16, self.ndf * 32, 4, 2, 1, bias=True),
            # nn.BatchNorm2d(self.ndf * 32),
            # nn.LeakyReLU(0.2, inplace=True),  # 4 * 4 * ndf * 32
            # conv3x3(self.ndf * 32, self.ndf * 16),
            # nn.BatchNorm2d(self.ndf * 16),
            # nn.LeakyReLU(0.2, inplace=True),   # 4 * 4 * ndf * 16
            # nn.Conv2d(self.ndf * 16, self.ndf * 8, 3, 1, 1, bias=True),
            # nn.BatchNorm2d(self.ndf * 8),
            # nn.LeakyReLU(0.2, inplace=True)   # 4 * 4 * ndf * 8
        )

        self.outlogitscond = nn.Sequential(
            conv3x3(self.ndf * 8 + self.projected_embed_dim, self.ndf * 8),
            nn.BatchNorm2d(self.ndf * 8), nn.LeakyReLU(0.2, inplace=True),
            nn.Conv2d(self.ndf * 8, 1, kernel_size=4, stride=4), nn.Sigmoid())

        # self.outlogits = nn.Sequential(
        #     nn.Conv2d(self.ndf * 8, 1, kernel_size=4, stride=4),
        #     nn.Sigmoid()
        # )

        self.projector = Concat_embed(self.embed_dim, self.projected_embed_dim)
    def __init__(self):
        super(discriminator, self).__init__()
        self.image_size = 64
        self.num_channels = 3
        self.embed_dim = 1024
        self.projected_embed_dim = 128
        self.ndf = 64
        self.B_dim = 128
        self.C_dim = 16

        self.netD_1 = nn.Sequential(
            # input is (nc) x 64 x 64
            nn.Conv2d(self.num_channels,
                      self.ndf,
                      3,
                      stride=2,
                      padding=1,
                      bias=False),
            nn.BatchNorm2d(self.ndf),
            nn.ReLU(True),
            # state size. (ndf) x 32 x 32
            BasicBlock(self.ndf,
                       self.ndf * 2,
                       stride=2,
                       downsample=make_downsample(self.ndf, self.ndf * 2)),
            BasicBlock(self.ndf * 2, self.ndf * 2),
            # state size. (ndf*2) x 16 x 16
            BasicBlock(self.ndf * 2,
                       self.ndf * 4,
                       stride=2,
                       downsample=make_downsample(self.ndf * 2, self.ndf * 4)),
            BasicBlock(self.ndf * 4, self.ndf * 4),
            # state size. (ndf*4) x 8 x 8
            BasicBlock(self.ndf * 4,
                       self.ndf * 8,
                       stride=2,
                       downsample=make_downsample(self.ndf * 4, self.ndf * 8)),
            BasicBlock(self.ndf * 8, self.ndf * 8),
            # state size. (ndf*8) x 4 x 4
            nn.BatchNorm2d(self.ndf * 8),
            nn.LeakyReLU(0.2, inplace=True),
        )

        self.projector = Concat_embed(self.embed_dim, self.projected_embed_dim)

        self.netD_2 = nn.Sequential(
            # state size. (ndf*8) x 4 x 4
            nn.Conv2d(self.ndf * 8 + self.projected_embed_dim,
                      1,
                      4,
                      1,
                      0,
                      bias=False),
            nn.Sigmoid())

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='relu')
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)