예제 #1
0
    def __init__(self, nc: int = 3, nc_out: int = 10, ndf: int = 32):
        super(DCEncoder, self).__init__()

        self.main = nn.Sequential(
            spectral_norm_init(nn.Conv2d(nc, ndf, 4, 2, 1, bias=False)),
            nn.BatchNorm2d(ndf),
            nn.LeakyReLU(0.2, inplace=True),
            # nn.InstanceNorm2d(ndf, affine=True),
            # input is (ndf) x 64 x 64
            spectral_norm_init(nn.Conv2d(ndf, ndf, 4, 2, 1, bias=False)),
            nn.BatchNorm2d(ndf),
            nn.LeakyReLU(0.2, inplace=True),
            # nn.InstanceNorm2d(ndf, affine=True),
            # state size. (ndf) x 32 x 32
            SelfAttention2d(ndf),
            spectral_norm_init(nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False)),
            nn.BatchNorm2d(ndf * 2),
            nn.LeakyReLU(0.2, inplace=True),
            # nn.InstanceNorm2d(ndf * 2, affine=True),
            # state size. (ndf*2) x 16 x 16
            spectral_norm_init(nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1,
                                         bias=False)),
            nn.BatchNorm2d(ndf * 4),
            nn.LeakyReLU(0.2, inplace=True),
            # nn.InstanceNorm2d(ndf * 4, affine=True),
            # state size. (ndf*4) x 8 x 8
            spectral_norm_init(nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1,
                                         bias=False)),
            nn.BatchNorm2d(ndf * 8),
            nn.LeakyReLU(0.2, inplace=True),
        )

        self.linear = spectral_norm_init(nn.Linear(ndf * 8 * 4 * 4, nc_out))
    def __init__(self, noise_size: int, image_size: int, ngf=64):
        super(MeasureToImage, self).__init__()
        n_up = int(math.log2(image_size / 4))
        assert 4 * (2**n_up) == image_size
        nc = 3

        self.preproc = nn.Sequential(
            nn.Linear(noise_size, noise_size, bias=False),
            nn.LeakyReLU(0.2, inplace=True),
        )
        layers = [
            spectral_norm_init(
                nn.ConvTranspose2d(noise_size, ngf * 8, 4, 1, 0, bias=False)),
            nn.InstanceNorm2d(ngf * 8),
            nn.LeakyReLU(0.2, inplace=True),
        ]

        nc_l_next = -1
        for l in range(n_up):

            nc_l = max(ngf, (ngf * 8) // 2**l)
            nc_l_next = max(ngf, nc_l // 2)

            layers += [
                spectral_norm_init(
                    nn.ConvTranspose2d(nc_l,
                                       nc_l_next,
                                       4,
                                       stride=2,
                                       padding=1,
                                       bias=False)),
                nn.InstanceNorm2d(nc_l_next),
                nn.LeakyReLU(0.2, inplace=True),
            ]

            if l == 2:
                layers += [SelfAttention2d(nc_l_next)]

        layers += [nn.Conv2d(nc_l_next, nc, 3, 1, 1, bias=False)]

        self.main = nn.Sequential(*layers)
예제 #3
0
    def __init__(self, noise: Noise, image_size: int, ngf=64):
        super(DCGenerator, self).__init__()
        n_up = int(math.log2(image_size / 4))
        assert 4 * (2**n_up) == image_size
        nc = 3

        layers = [
            nn.utils.spectral_norm(
                nn.ConvTranspose2d(noise.size(), ngf * 8, 4, 1, 0,
                                   bias=False)),
            nn.BatchNorm2d(ngf * 8),
            nn.ReLU(True),
        ]

        nc_l_next = -1
        for l in range(n_up):

            nc_l = max(ngf, (ngf * 8) // 2**l)
            nc_l_next = max(ngf, nc_l // 2)

            layers += [
                nn.utils.spectral_norm(
                    nn.ConvTranspose2d(nc_l,
                                       nc_l_next,
                                       4,
                                       stride=2,
                                       padding=1,
                                       bias=False)),
                nn.BatchNorm2d(nc_l_next),
                nn.ReLU(True),
            ]

            if l == 2:
                layers += [SelfAttention2d(nc_l_next)]

        layers += [nn.Conv2d(nc_l_next, nc, 3, 1, 1, bias=False)]

        self.main = nn.Sequential(*layers)
예제 #4
0
    def __init__(self, nc=3, ndf=32):
        super(ResDCDiscriminator, self).__init__()

        self.main = nn.Sequential(
            # Down2xResidualBlock(nc, ndf, PaddingType.REFLECT, nn.BatchNorm2d,
            #                     nn.LeakyReLU(0.2, inplace=True), use_spectral_norm=True),
            nn.utils.spectral_norm(nn.Conv2d(nc, ndf, 4, 2, 1, bias=False)),
            nn.BatchNorm2d(ndf),
            nn.LeakyReLU(0.2, inplace=True),
            # Down2xResidualBlock(ndf, ndf, PaddingType.REFLECT, nn.BatchNorm2d,
            #                     nn.LeakyReLU(0.2, inplace=True), use_spectral_norm=True),
            nn.utils.spectral_norm(nn.Conv2d(ndf, ndf, 4, 2, 1, bias=False)),
            nn.BatchNorm2d(ndf),
            nn.LeakyReLU(0.2, inplace=True),
            SelfAttention2d(ndf),
            Down2xResidualBlock(ndf,
                                ndf * 2,
                                PaddingType.REFLECT,
                                nn.BatchNorm2d,
                                nn.LeakyReLU(0.2, inplace=True),
                                use_spectral_norm=True),
            Down2xResidualBlock(ndf * 2,
                                ndf * 4,
                                PaddingType.REFLECT,
                                nn.BatchNorm2d,
                                nn.LeakyReLU(0.2, inplace=True),
                                use_spectral_norm=True),
            Down2xResidualBlock(ndf * 4,
                                ndf * 8,
                                PaddingType.REFLECT,
                                nn.BatchNorm2d,
                                nn.LeakyReLU(0.2, inplace=True),
                                use_spectral_norm=True),
        )

        self.linear = nn.utils.spectral_norm(
            nn.Linear(ndf * 8 * 4 * 4, 10, bias=False))
예제 #5
0
    def __init__(self, noise: Noise, image_size: int, ngf=32):
        super(ResDCGenerator, self).__init__()
        n_up = int(math.log2(image_size / 4))
        assert 4 * (2**n_up) == image_size
        nc = 3

        layers = [
            nn.utils.spectral_norm(
                nn.Linear(noise.size(), noise.size(), bias=False)),
            View(-1, noise.size(), 1, 1),
            nn.utils.spectral_norm(
                nn.ConvTranspose2d(noise.size(), ngf * 8, 4, 1, 0,
                                   bias=False)),
            nn.BatchNorm2d(ngf * 8),
            nn.ReLU(True),
        ]

        nc_l_next = -1
        for l in range(n_up):

            nc_l = max(ngf, (ngf * 8) // 2**l)
            nc_l_next = max(ngf, nc_l // 2)

            layers += [
                Up2xResidualBlock(nc_l,
                                  nc_l_next,
                                  PaddingType.REFLECT,
                                  nn.BatchNorm2d,
                                  use_spectral_norm=True)
            ]

            if l == 2:
                layers += [SelfAttention2d(nc_l_next)]

        layers += [nn.Conv2d(nc_l_next, nc, 3, 1, 1, bias=False)]

        self.main = nn.Sequential(*layers)
    def __init__(self, noise_size: int, image_size: int, ngf=32):
        super(ResMeasureToImage, self).__init__()
        n_up = int(math.log2(image_size / 4))
        assert 4 * (2**n_up) == image_size
        nc = 3

        layers = [
            spectral_norm_init(nn.Linear(noise_size,
                                         noise_size // 2,
                                         bias=False),
                               n_power_iterations=10),
            nn.LeakyReLU(0.2, inplace=True),  #nn.ReLU(inplace=True),
            spectral_norm_init(nn.Linear(noise_size // 2,
                                         noise_size // 2,
                                         bias=False),
                               n_power_iterations=10),
            nn.LeakyReLU(0.2, inplace=True),
            View(-1, noise_size // 2, 1, 1),
            spectral_norm_init(nn.ConvTranspose2d(noise_size // 2,
                                                  ngf * 4,
                                                  4,
                                                  1,
                                                  0,
                                                  bias=False),
                               n_power_iterations=10),
            nn.InstanceNorm2d(ngf * 4),  # nn.InstanceNorm2d(ngf * 8),
            nn.LeakyReLU(0.2, inplace=True),
        ]

        nc_l_next = -1
        for l in range(n_up):

            nc_l = max(ngf // 2, (ngf * 4) // 2**l)
            nc_l_next = max(ngf // 2, nc_l // 2)

            layers += [
                Up2xResidualBlock(nc_l,
                                  nc_l_next,
                                  PaddingType.REFLECT,
                                  nn.InstanceNorm2d,
                                  use_spectral_norm=True,
                                  activation=nn.LeakyReLU(
                                      0.2, inplace=True)),  #nn.InstanceNorm2d
                PooledResidualBlock(nc_l_next,
                                    nc_l_next,
                                    nc_l_next,
                                    nn.Identity(),
                                    PaddingType.REFLECT,
                                    nn.InstanceNorm2d,
                                    use_spectral_norm=True,
                                    activation=nn.LeakyReLU(0.2, inplace=True))
            ]

            if l == 2:
                layers += [
                    # nn.Dropout2d(p=0.5),
                    SelfAttention2d(nc_l_next)
                ]

        layers += [nn.Conv2d(nc_l_next, nc, 3, 1, 1, bias=False)]

        self.main = nn.Sequential(*layers)