Ejemplo n.º 1
0
    def __init__(self, dim, use_bias):
        super(ResnetBlock, self).__init__()
        conv_block = []
        conv_block += [
            nn.ReflectionPad2d(1),
            nn.Conv2d(dim,
                      dim,
                      kernel_size=3,
                      stride=1,
                      padding=0,
                      bias=use_bias),
            nn.InstanceNorm2d(dim, affine=True),
            nn.ReLU(True)
        ]

        conv_block += [
            nn.ReflectionPad2d(1),
            nn.Conv2d(dim,
                      dim,
                      kernel_size=3,
                      stride=1,
                      padding=0,
                      bias=use_bias),
            nn.InstanceNorm2d(dim, affine=True)
        ]

        self.conv_block = nn.Sequential(*conv_block)
Ejemplo n.º 2
0
 def _build_weights(self, dim_in, dim_out):
     self.conv1 = nn.Conv2d(dim_in, dim_in, 3, 1, 1)
     self.conv2 = nn.Conv2d(dim_in, dim_out, 3, 1, 1)
     if self.normalize:
         self.norm1 = nn.InstanceNorm2d(dim_in, affine=True)
         self.norm2 = nn.InstanceNorm2d(dim_in, affine=True)
     if self.learned_sc:
         self.conv1x1 = nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=False)
Ejemplo n.º 3
0
    def __init__(self, dim):
        super(ResNetBlock, self).__init__()
        conv_block = []
        conv_block += [nn.ReflectionPad2d(1),
                       nn.Conv2d(dim, dim, 3, 1, 0, bias=False),
                       nn.InstanceNorm2d(dim,affine=True),
                       nn.ReLU(True)]

        conv_block += [nn.ReflectionPad2d(1),
                       nn.Conv2d(dim, dim, 3, 1, 0, bias=False),
                       nn.InstanceNorm2d(dim,affine=True)]

        self.conv_block = nn.Sequential(*conv_block)
Ejemplo n.º 4
0
    def __init__(self, img_size=256, style_dim=64, max_conv_dim=512, w_hpf=1):
        super().__init__()
        dim_in = 2**14 // img_size
        self.img_size = img_size
        self.from_rgb = nn.Conv2d(3, dim_in, 3, 1, 1)
        self.encode = nn.ModuleList()
        self.decode = nn.ModuleList()
        self.to_rgb = nn.Sequential(nn.InstanceNorm2d(dim_in, affine=True),
                                    nn.LeakyReLU(0.2),
                                    nn.Conv2d(dim_in, 3, 1, 1, 0))

        # down/up-sampling blocks
        repeat_num = int(np.log2(img_size)) - 4
        if w_hpf > 0:
            repeat_num += 1
        for _ in range(repeat_num):
            dim_out = min(dim_in * 2, max_conv_dim)
            self.encode.append(
                ResBlk(dim_in, dim_out, normalize=True, downsample=True))
            self.decode.insert(0,
                               AdainResBlk(dim_out,
                                           dim_in,
                                           style_dim,
                                           w_hpf=w_hpf,
                                           upsample=True))  # stack-like
            dim_in = dim_out

        # bottleneck blocks
        for _ in range(2):
            self.encode.append(ResBlk(dim_out, dim_out, normalize=True))
            self.decode.insert(
                0, AdainResBlk(dim_out, dim_out, style_dim, w_hpf=w_hpf))

        if w_hpf > 0:
            device = porch.device(
                'cuda' if porch.cuda.is_available() else 'cpu')
            self.hpf = HighPass(w_hpf, device)
Ejemplo n.º 5
0
    def __init__(self, input_nc, output_nc, ngf=64, n_blocks=6, img_size=256, light=False):
        super(ResnetGenerator, self).__init__()
        self.n_res=n_blocks
        self.light= light
        down_layer = [
            nn.ReflectionPad2d(3),
            nn.Conv2d(3, ngf, 7, 1, 0, bias=False),
            nn.InstanceNorm2d(ngf,affine=True),
            nn.ReLU(inplace=True),

            # Down-Sampling
            nn.ReflectionPad2d(1),
            nn.Conv2d(ngf, ngf*2, 3, 2, 0, bias=False),
            nn.InstanceNorm2d(ngf*2,affine=True),
            nn.ReLU(inplace=True),
            nn.ReflectionPad2d(1),
            nn.Conv2d(ngf*2, ngf*4, 3, 2, 0, bias=False),
            nn.InstanceNorm2d(ngf*4,affine=True),
            nn.ReLU(inplace=True),

            # Down-Sampling Bottleneck
            ResNetBlock(ngf*4),
            ResNetBlock(ngf*4),
            ResNetBlock(ngf*4),
            ResNetBlock(ngf*4),
        ]

        # Class Activation Map
        self.gap_fc = nn.Linear(ngf*4, 1, bias=False)
        self.gmp_fc = nn.Linear(ngf*4, 1, bias=False)
        self.conv1x1 = nn.Conv2d(ngf*8, ngf*4, 1, 1, bias=True)
        self.relu = nn.ReLU(inplace=True)

        # # Gamma, Beta block
        # fc = [
        #     nn.Linear(image_size * image_size * 16, 256, bias=False),
        #     nn.ReLU(inplace=True),
        #     nn.Linear(256, 256, bias=False),
        #     nn.ReLU(inplace=True)
        # ]
        # Gamma, Beta block
        if self.light:
            fc = [nn.Linear(ngf*4, ngf*4, bias=False),
                  nn.ReLU(True),
                  nn.Linear(ngf*4, ngf*4, bias=False),
                  nn.ReLU(True)]
        else:
            fc = [nn.Linear(img_size * img_size * ngf//4, ngf*4, bias=False),
                  nn.ReLU(True),
                  nn.Linear(ngf*4, ngf*4, bias=False),
                  nn.ReLU(True)]


        self.gamma = nn.Linear(ngf*4, ngf*4, bias=False)
        self.beta = nn.Linear(ngf*4, ngf*4, bias=False)

        # Up-Sampling Bottleneck
        for i in range(self.n_res):
            setattr(self, "ResNetAdaILNBlock_" + str(i + 1), ResNetAdaILNBlock(ngf*4))

        up_layer = [
            nn.Upsample(scale_factor=2, mode="nearest"),
            nn.ReflectionPad2d(1),
            nn.Conv2d(ngf*4, ngf*2, 3, 1, 0, bias=False),
            ILN(ngf*2),
            nn.ReLU(inplace=True),

            nn.Upsample(scale_factor=2, mode="nearest"),
            nn.ReflectionPad2d(1),
            nn.Conv2d(ngf*2, ngf, 3, 1, 0, bias=False),
            ILN(ngf),
            nn.ReLU(inplace=True),

            nn.ReflectionPad2d(3),
            nn.Conv2d(ngf, 3, 7, 1, 0, bias=False),
            nn.Tanh()
        ]

        self.down_layer = nn.Sequential(*down_layer)
        self.fc = nn.Sequential(*fc)
        self.up_layer = nn.Sequential(*up_layer)
Ejemplo n.º 6
0
    def __init__(self,
                 input_nc,
                 output_nc,
                 ngf=64,
                 n_blocks=6,
                 img_size=256,
                 light=False):
        assert (n_blocks >= 0)
        super(ResnetGenerator, self).__init__()
        self.input_nc = input_nc
        self.output_nc = output_nc
        self.ngf = ngf
        self.n_blocks = n_blocks
        self.img_size = img_size
        self.light = light

        DownBlock = []
        DownBlock += [
            nn.ReflectionPad2d(3),
            nn.Conv2d(input_nc,
                      ngf,
                      kernel_size=7,
                      stride=1,
                      padding=0,
                      bias=False),
            nn.InstanceNorm2d(ngf, affine=True),
            nn.ReLU(True)
        ]

        # Down-Sampling
        n_downsampling = 2
        for i in range(n_downsampling):
            mult = 2**i
            DownBlock += [
                nn.ReflectionPad2d(1),
                nn.Conv2d(ngf * mult,
                          ngf * mult * 2,
                          kernel_size=3,
                          stride=2,
                          padding=0,
                          bias=False),
                nn.InstanceNorm2d(ngf * mult * 2, affine=True),
                nn.ReLU(True)
            ]

        # Down-Sampling Bottleneck
        mult = 2**n_downsampling
        for i in range(n_blocks):
            DownBlock += [ResnetBlock(ngf * mult, use_bias=False)]

        # Class Activation Map
        self.gap_fc = nn.Linear(ngf * mult, 1, bias=False)
        self.gmp_fc = nn.Linear(ngf * mult, 1, bias=False)
        self.conv1x1 = nn.Conv2d(ngf * mult * 2,
                                 ngf * mult,
                                 kernel_size=1,
                                 stride=1,
                                 bias=True)
        self.relu = nn.ReLU(True)

        # Gamma, Beta block
        if self.light:
            FC = [
                nn.Linear(ngf * mult, ngf * mult, bias=False),
                nn.ReLU(True),
                nn.Linear(ngf * mult, ngf * mult, bias=False),
                nn.ReLU(True)
            ]
        else:
            FC = [
                nn.Linear(img_size // mult * img_size // mult * ngf * mult,
                          ngf * mult,
                          bias=False),
                nn.ReLU(True),
                nn.Linear(ngf * mult, ngf * mult, bias=False),
                nn.ReLU(True)
            ]
        self.gamma = nn.Linear(ngf * mult, ngf * mult, bias=False)
        self.beta = nn.Linear(ngf * mult, ngf * mult, bias=False)

        # Up-Sampling Bottleneck
        for i in range(n_blocks):
            setattr(self, 'UpBlock1_' + str(i + 1),
                    ResnetAdaILNBlock(ngf * mult, use_bias=False))

        # Up-Sampling
        UpBlock2 = []
        for i in range(n_downsampling):
            mult = 2**(n_downsampling - i)
            UpBlock2 += [
                nn.Upsample(scale_factor=2, mode='nearest'),
                nn.ReflectionPad2d(1),
                nn.Conv2d(ngf * mult,
                          int(ngf * mult / 2),
                          kernel_size=3,
                          stride=1,
                          padding=0,
                          bias=False),
                ILN(int(ngf * mult / 2)),
                nn.ReLU(True)
            ]

        UpBlock2 += [
            nn.ReflectionPad2d(3),
            nn.Conv2d(ngf,
                      output_nc,
                      kernel_size=7,
                      stride=1,
                      padding=0,
                      bias=False),
            nn.Tanh()
        ]

        self.DownBlock = nn.Sequential(*DownBlock)
        self.FC = nn.Sequential(*FC)
        self.UpBlock2 = nn.Sequential(*UpBlock2)
Ejemplo n.º 7
0
 def __init__(self, style_dim, num_features):
     super().__init__()
     self.norm = nn.InstanceNorm2d(num_features, affine=False)
     self.fc = nn.Linear(style_dim, num_features * 2)