예제 #1
0
    def __init__(self, dim, use_bias):
        super(ResnetBlock, self).__init__()
        conv_block = []
        conv_block += [
            #fluid.layers.pad2d(1),
            ReflectionPad2d(1),
            Conv2D(dim,
                   dim,
                   filter_size=3,
                   stride=1,
                   padding=0,
                   bias_attr=use_bias),
            InstanceNorm(dim),
            ReLU(False)
            #BatchNorm(dim,act='relu')
        ]

        conv_block += [
            #fluid.layers.pad2d(1),
            ReflectionPad2d(1),
            Conv2D(dim,
                   dim,
                   filter_size=3,
                   stride=1,
                   padding=0,
                   bias_attr=use_bias),
            InstanceNorm(dim)
        ]

        self.conv_block = Sequential(*conv_block)
예제 #2
0
    def __init__(self, dim, use_bias=True):
        super(ResnetBlock, self).__init__()
        conv_block = []
        conv_block += [
            ReflectionPad2d(1),
            Conv2D(dim,
                   dim,
                   filter_size=3,
                   stride=1,
                   padding=0,
                   param_attr=init_w(),
                   bias_attr=init_bias(use_bias)),
            InstanceNorm(dim),
            ReLU()
        ]

        conv_block += [
            ReflectionPad2d(1),
            Conv2D(dim,
                   dim,
                   filter_size=3,
                   stride=1,
                   padding=0,
                   param_attr=init_w(),
                   bias_attr=init_bias(use_bias)),
            InstanceNorm(dim)
        ]

        self.conv_block = Sequential(*conv_block)
예제 #3
0
    def __init__(self, dim, use_bias):
        super(ResnetBlock, self).__init__()
        conv_block = []
        conv_block += [
            ReflectionPad2d(1),
            Conv2D(dim,
                   dim,
                   filter_size=3,
                   stride=1,
                   padding=0,
                   bias_attr=use_bias),
            InstanceNorm(dim),
            PRelu(mode="all")
        ]

        conv_block += [
            ReflectionPad2d(1),
            Conv2D(dim,
                   dim,
                   filter_size=3,
                   stride=1,
                   padding=0,
                   bias_attr=use_bias),
            InstanceNorm(dim)
        ]

        self.conv_block = Sequential(*conv_block)
    def __init__(self, dim, use_bias):
        super(ResnetBlock, self).__init__()
        conv_block = []
        conv_block += [
            ReflectionPad2d([1, 1, 1, 1]),
            Conv2D(dim,
                   dim,
                   filter_size=3,
                   stride=1,
                   padding=0,
                   bias_attr=use_bias),
            InstanceNorm(dim),
            ReLU(True)
        ]

        #TODO: 这里加一个不带ReLU的有何意义
        conv_block += [
            ReflectionPad2d([1, 1, 1, 1]),
            Conv2D(dim,
                   dim,
                   filter_size=3,
                   stride=1,
                   padding=0,
                   bias_attr=use_bias),
            InstanceNorm(dim)
        ]

        self.conv_block = Sequential(*conv_block)
예제 #5
0
    def __init__(self, dim, use_bias):
        super(ResnetBlock, self).__init__()
        conv_block = []
        conv_block += [
            ReflectionPad2d(pad=1),
            Conv2D(num_channels=dim,
                   num_filters=dim,
                   filter_size=3,
                   stride=1,
                   padding=0,
                   bias_attr=use_bias),
            InstanceNorm(dim),
            ReLU(inplace=True)
        ]

        conv_block += [
            ReflectionPad2d(pad=1),
            Conv2D(num_channels=dim,
                   num_filters=dim,
                   filter_size=3,
                   stride=1,
                   padding=0,
                   bias_attr=use_bias),
            InstanceNorm(dim)
        ]

        self.conv_block = Sequential(*conv_block)
예제 #6
0
    def __init__(self, dim, use_bias):
        super(ResnetBlock, self).__init__()
        conv_block = []
        conv_block += [
            ReflectionPad2D(1),
            Conv2D(dim,
                   dim,
                   filter_size=3,
                   stride=1,
                   padding=0,
                   bias_attr=True),
            InstanceNorm(dim),
            ReLU(inplace=True)
        ]

        conv_block += [
            ReflectionPad2D(1),
            Conv2D(dim,
                   dim,
                   filter_size=3,
                   stride=1,
                   padding=0,
                   bias_attr=True),
            InstanceNorm(dim)
        ]

        self.conv_block = fluid.dygraph.Sequential(*conv_block)
예제 #7
0
    def __init__(self, channels, dlatent_size, use_wscale, use_noise,
                 use_pixel_norm, use_instance_norm, use_styles):
        '''
        noise and style AdaIN operation
        '''
        super(LayerEpilogue, self).__init__()
        #print("channels: ", channels)
        if use_noise:
            self.noise = ApplyNoise(channels)
        self.act = ops.Leaky_ReLU()

        if use_pixel_norm:
            self.pixel_norm = PixelNorm()
        else:
            self.pixel_norm = None

        if use_instance_norm:
            self.instance_norm = InstanceNorm(channels)
        else:
            self.instance_norm = None

        if use_styles:
            self.style_mod = ApplyStyle(dlatent_size,
                                        channels,
                                        use_wscale=use_wscale)
        else:
            self.style_mod = None
예제 #8
0
    def _build_weights(self, dim_in, dim_out):

        self.conv1 = Conv2D(num_channels=dim_in,
                            num_filters=dim_in,
                            filter_size=3,
                            stride=1,
                            padding=1)
        self.conv2 = Conv2D(num_channels=dim_in,
                            num_filters=dim_out,
                            filter_size=3,
                            stride=1,
                            padding=1)
        if self.normalize:
            self.norm1 = InstanceNorm(
                dim_in)  # 没有 `momentum` 部分, 已在github提交issue
            self.norm2 = InstanceNorm(dim_in)
        if self.learned_sc:
            self.conv1x1 = Conv2D(dim_in, dim_out, 1, 1, 0, bias_attr=False)
예제 #9
0
    def __init__(self, img_size=256, style_dim=64, max_conv_dim=512, w_hpf=1):
        super(Generator, self).__init__(self.__class__.__name__)
        dim_in = 2**14 // img_size
        self.img_size = img_size
        self.from_rgb = Conv2D(3, dim_in, 3, 1, 1)

        self.encode = fluid.dygraph.LayerList()
        self.decode = fluid.dygraph.LayerList()
        self.to_rgb = fluid.dygraph.Sequential(InstanceNorm(dim_in),
                                               LeakyRelu(0.2),
                                               Conv2D(dim_in, 3, 1, 1, 0))

        # down/up-sampling blocks
        repeat_num = int(np.log2(img_size)) - 4
        if w_hpf > 0:
            repeat_num += 1
        for _ in range(repeat_num):
            dim_out = min(dim_in * 2, max_conv_dim)
            self.encode.append(
                ResBlk(dim_in, dim_out, normalize=True, downsample=True))
            self.decode.insert(
                0,
                AdainResBlk(dim_out,
                            dim_in,
                            style_dim,
                            w_hpf=w_hpf,
                            upsample=True)  # stack-like
            )
            dim_in = dim_out

        # bottleneck blocks
        for _ in range(2):
            self.encode.append(ResBlk(dim_out, dim_out, normalize=True))
            self.decode.insert(
                0, AdainResBlk(dim_out, dim_out, style_dim, w_hpf=w_hpf))

        if w_hpf > 0:
            self.hpf = HighPass(w_hpf)
예제 #10
0
    def __init__(self, input_nc, output_nc, ngf=64, n_blocks=6, img_size=256, light=False):
        super(ResnetGenerator, self).__init__()
        '''
        Args:
            input_cn: 输入通道数
            output_nc: 输出通道数,此处二者都为3
            ngf: base channel number per layer
            n_blocks: The number of resblock
        '''
        assert(n_blocks >= 0)
        super(ResnetGenerator, self).__init__()
        self.input_nc = input_nc
        self.output_nc = output_nc
        self.ngf = ngf
        self.n_blocks = n_blocks
        self.img_size = img_size
        self.light = light

        DownBlock = []
        # 3 ReflectionPad2d 抵消了紧接着的7*7Conv层
        #TODO 此处由于Paddle的pad2d在fluid.layer中,不能作为对象定义,比较麻烦,因此暂时使用普通padding
        DownBlock += [ReflectionPad2d([1,1,1,1]),
                      Conv2D(input_nc, ngf, filter_size=7, stride=1, padding=0, bias_attr=False),
                      InstanceNorm(ngf),
                      #TODO paddle没有单独的ReLU对象,暂时用PReLU代替,后期将const改成0
                      ReLU(True)]

        # Down-Sampling
        n_downsampling = 2
        for i in range(n_downsampling):
            mult = 2**i
            # 通道以2倍增,stride为2,大小以2减少
            DownBlock += [ReflectionPad2d([1,1,1,1]),
                          Conv2D(ngf * mult, ngf * mult * 2, filter_size=3, stride=2, padding=0, bias_attr=False),
                          InstanceNorm(ngf * mult * 2),
                          ReLU(True)]

        # Down-Sampling Bottleneck
        mult = 2**n_downsampling
        for i in range(n_blocks):
            DownBlock += [ResnetBlock(ngf * mult, use_bias=False)]

        # Class Activation Map
        self.gap_fc = Linear(ngf * mult, 1, bias_attr=False)
        self.gmp_fc = Linear(ngf * mult, 1, bias_attr=False)
        self.conv1x1 = Conv2D(ngf * mult * 2, ngf * mult, filter_size=1, stride=1, padding=0, bias_attr=True)
        self.relu = ReLU(True)

        # Gamma, Beta block
        if self.light:
            FC = [Linear(ngf * mult, ngf * mult, bias_attr=False),
                  ReLU(True),
                  Linear(ngf * mult, ngf * mult, bias_attr=False),
                  ReLU(True)]
        else:
            # TODO 这里没太理解
            FC = [Linear(img_size // mult * img_size // mult * ngf * mult, ngf * mult, bias_attr=False),
                  ReLU(True),
                  Linear(ngf * mult, ngf * mult, bias_attr=False),
                  ReLU(True)]
        self.gamma = Linear(ngf * mult, ngf * mult, bias_attr=False)
        self.beta = Linear(ngf * mult, ngf * mult, bias_attr=False)

        # Up-Sampling Bottleneck
        for i in range(n_blocks):
            setattr(self, 'UpBlock1_' + str(i+1), ResnetAdaILNBlock(ngf * mult, use_bias=False))

        # Up-Sampling
        UpBlock2 = []
        for i in range(n_downsampling):
            mult = 2**(n_downsampling - i)
            UpBlock2 += [Upsample(scale_factor=2),
                         ReflectionPad2d([1,1,1,1]),
                         Conv2D(ngf * mult, int(ngf * mult / 2), filter_size=3, stride=1, padding=0, bias_attr=False),
                         ILN(int(ngf * mult / 2)),
                         ReLU(True)]

        UpBlock2 += [ReflectionPad2d([3,3,3,3]),
                     Conv2D(ngf, output_nc, filter_size=7, stride=1, padding=0, bias_attr=False),
                     Tanh(False)]

        self.DownBlock = Sequential(*DownBlock)
        self.FC = Sequential(*FC)
        self.UpBlock2 = Sequential(*UpBlock2)
예제 #11
0
    def __init__(self,
                 input_nc,
                 output_nc,
                 ngf=64,
                 n_blocks=6,
                 img_size=256,
                 light=False):
        assert (n_blocks >= 0)
        super(ResnetGenerator, self).__init__()
        self.input_nc = input_nc
        self.output_nc = output_nc
        self.ngf = ngf
        self.n_blocks = n_blocks
        self.img_size = img_size
        self.light = light

        DownBlock = []
        DownBlock += [
            #fluid.layers.pad2d(3),
            ReflectionPad2d(3),
            Conv2D(num_channels=input_nc,
                   num_filters=ngf,
                   filter_size=7,
                   stride=1,
                   padding=0,
                   bias_attr=False),
            InstanceNorm(ngf),
            ReLU(False)
            #BatchNorm(ngf,act='relu')
            #fluid.layers.instance_norm(ngf)
        ]
        # self.conv1=Conv2D(input_nc, ngf, 7)
        # self.instance_norm=InstanceNorm(ngf)
        #self.n_downsampling=n_downsampling
        # Down-Sampling
        n_downsampling = 2
        for i in range(n_downsampling):
            mult = 2**i
            DownBlock += [
                #fluid.layers.pad2d(1),
                ReflectionPad2d(1),
                Conv2D(ngf * mult,
                       ngf * mult * 2,
                       filter_size=3,
                       stride=2,
                       padding=0,
                       bias_attr=False),
                InstanceNorm(ngf * mult * 2),
                ReLU(False)
                #BatchNorm(ngf * mult * 2,act='relu')
                #fluid.layers.instance_norm(ngf * mult * 2)
            ]

        # Down-Sampling Bottleneck
        mult = 2**n_downsampling
        for i in range(n_blocks):
            DownBlock += [ResnetBlock(ngf * mult, use_bias=False)]

        #self.renetblock=ResnetBlock(ngf * mult, use_bias=False)
        # Class Activation Map
        self.gap_fc = Linear(ngf * mult, 1, bias_attr=False)
        self.gmp_fc = Linear(ngf * mult, 1, bias_attr=False)
        self.conv1x1 = Conv2D(ngf * mult * 2,
                              ngf * mult,
                              filter_size=1,
                              stride=1,
                              bias_attr=True)
        self.relu = ReLU(False)

        # Gamma, Beta block
        if self.light:
            FC = [
                Linear(ngf * mult, ngf * mult, bias_attr=False, act='relu'),
                Linear(ngf * mult, ngf * mult, bias_attr=False, act='relu')
            ]
        else:
            FC = [
                Linear(img_size // mult * img_size // mult * ngf * mult,
                       ngf * mult,
                       bias_attr=False,
                       act='relu'),
                Linear(ngf * mult, ngf * mult, bias_attr=False, act='relu')
            ]
        self.gamma = Linear(ngf * mult, ngf * mult, bias_attr=False)
        self.beta = Linear(ngf * mult, ngf * mult, bias_attr=False)

        # Up-Sampling Bottleneck
        for i in range(n_blocks):
            setattr(self, 'UpBlock1_' + str(i + 1),
                    ResnetAdaILNBlock(ngf * mult, use_bias=False))

        # Up-Sampling
        UpBlock2 = []
        for i in range(n_downsampling):
            mult = 2**(n_downsampling - i)
            UpBlock2 += [  #nn.Upsample(scale_factor=2, mode='nearest'),
                #fluid.layers.pad2d(1),
                Upsample(),
                ReflectionPad2d(1),
                Conv2D(ngf * mult,
                       int(ngf * mult / 2),
                       filter_size=3,
                       stride=1,
                       padding=0,
                       bias_attr=False),
                ILN(int(ngf * mult / 2)),
                ReLU(False)
            ]

        UpBlock2 += [
            #fluid.layers.pad2d(3),
            ReflectionPad2d(3),
            Conv2D(ngf,
                   output_nc,
                   filter_size=7,
                   stride=1,
                   padding=0,
                   bias_attr=False),
            Tanh()
        ]

        self.DownBlock = Sequential(*DownBlock)
        self.FC = Sequential(*FC)
        self.UpBlock2 = Sequential(*UpBlock2)
예제 #12
0
    def __init__(self, input_nc, output_nc, ngf=64, n_blocks=6, img_size=256, light=False):
        
        assert(n_blocks >= 0)
        super(ResnetGenerator, self).__init__()

        self.input_nc = input_nc
        self.output_nc = output_nc
        self.ngf = ngf
        self.n_blocks = n_blocks
        self.img_size = img_size
        self.light = light

        DownBlock = []
        DownBlock += [
                    ReflectionPad2D(3),
                    Conv2D(num_channels=input_nc, num_filters=ngf, filter_size=7, stride=1, padding=0, bias_attr=False),
                    InstanceNorm(self.ngf),
                    Relu(),
            ]

        # Down-Sampling
        n_downsampling = 2
        for i in range(n_downsampling):
            mult = 2**i
            DownBlock += [
                ReflectionPad2D(1),
                Conv2D(num_channels=ngf * mult, num_filters=ngf * mult * 2, filter_size=3, stride=2, padding=0, bias_attr=False),
                InstanceNorm(ngf * mult * 2),
                Relu(),
            ]

        # Down-Sampling Bottleneck
        mult = 2**n_downsampling
        for i in range(n_blocks):
            DownBlock += [ResnetBlock(ngf * mult, use_bias=False)]

        # Class Activation Map
        self.gap_fc = Linear(ngf * mult, 1, bias_attr=False, act='sigmoid')
        self.gmp_fc = Linear(ngf * mult, 1, bias_attr=False, act='sigmoid')

        self.conv1x1 = Conv2D(ngf * mult * 2, ngf * mult, filter_size=1, stride=1, bias_attr=True)
        self.relu = Relu()

        # Gamma, Beta block
        if self.light:
            FC = [
                Linear(ngf * mult, ngf * mult, bias_attr=False),
                Relu(),
                Linear(ngf * mult, ngf * mult, bias_attr=False),
                Relu(),
            ]
        else:
            FC = [
                Linear(img_size // mult * img_size // mult * ngf * mult, ngf * mult, bias_attr=False),
                Relu(),
                Linear(ngf * mult, ngf * mult, bias_attr=False),
                Relu(),
            ]
        self.gamma = Linear(ngf * mult, ngf * mult, bias_attr=False)
        self.beta = Linear(ngf * mult, ngf * mult, bias_attr=False)

        # Up-Sampling Bottleneck
        for i in range(n_blocks):
            setattr(self, 'UpBlock1_' + str(i+1), ResnetAdaILNBlock(ngf * mult, use_bias=False))

        # Up-Sampling
        UpBlock2 = []
        for i in range(n_downsampling):
            mult = 2**(n_downsampling - i)
            UpBlock2 += [
                    UpSample(2),
                    ReflectionPad2D(1),
                    Conv2D(num_channels=ngf * mult, num_filters=int(ngf * mult / 2),
                        filter_size=3, stride=1, padding=0, bias_attr=False),
                    ILN(int(ngf * mult / 2)),
                    Relu(),
            ]

        UpBlock2 += [
                    ReflectionPad2D(3),
                    Conv2D(num_channels=ngf, num_filters=output_nc,
                        filter_size=7, stride=1, padding=0, bias_attr=False),
                    Tanh(),
            ]

        self.DownBlock = Sequential(*DownBlock)
        self.DownBlock_list = DownBlock
        self.FC = Sequential(*FC)
        self.UpBlock2 = Sequential(*UpBlock2)
예제 #13
0
 def __init__(self, style_dim, num_features):
     super(AdaIN, self).__init__(self.__class__.__name__)
     self.norm = InstanceNorm(num_features)
     self.fc = Linear(style_dim, num_features * 2)
예제 #14
0
    def __init__(self,
                 input_nc,
                 output_nc,
                 ngf=64,
                 n_blocks=6,
                 img_size=256,
                 light=False):
        assert (n_blocks >= 0)
        super(ResnetGenerator, self).__init__()
        self.input_nc = input_nc
        self.output_nc = output_nc
        self.ngf = ngf
        self.n_blocks = n_blocks
        self.img_size = img_size
        self.light = light

        DownBlock = []
        # 先通过一个卷积核尺寸为7的卷积层,图片大小不变,通道数变为64
        DownBlock += [
            ReflectionPad2d(3),
            Conv2D(input_nc,
                   ngf,
                   filter_size=7,
                   stride=1,
                   padding=0,
                   bias_attr=False),
            InstanceNorm(ngf),
            PRelu(mode="all")
        ]

        # Down-Sampling --> 下采样模块
        n_downsampling = 2
        # 两层下采样,img_size缩小4倍(64),通道数扩大4倍(256)
        for i in range(n_downsampling):
            mult = 2**i
            DownBlock += [
                ReflectionPad2d(1),
                Conv2D(ngf * mult,
                       ngf * mult * 2,
                       filter_size=3,
                       stride=2,
                       padding=0,
                       bias_attr=False),
                InstanceNorm(ngf * mult * 2),
                PRelu(mode="all")
            ]

        # Down-Sampling Bottleneck  --> 编码器中的残差模块
        mult = 2**n_downsampling
        # 6个残差块,尺寸和通道数都不变
        for i in range(n_blocks):
            DownBlock += [ResnetBlock(ngf * mult, use_bias=False)]

        # Class Activation Map --> 产生类别激活图
        # 接着global average pooling后的全连接层
        self.gap_fc = Linear(ngf * mult, 1, bias_attr=False)
        # 接着global max pooling后的全连接层
        self.gmp_fc = Linear(ngf * mult, 1, bias_attr=False)
        #下面1x1卷积和激活函数,是为了得到两个pooling合并后的特征图
        self.conv1x1 = Conv2D(ngf * mult * 2,
                              ngf * mult,
                              filter_size=1,
                              stride=1,
                              bias_attr=True,
                              act='relu')
        # self.relu = nn.ReLU(True)

        # Gamma, Beta block --> 生成自适应 L-B Normalization(AdaILN)中的Gamma, Beta
        # 确定轻量级,FC使用的是两个256 --> 256的全连接层
        if self.light:
            FC = [
                Linear(ngf * mult, ngf * mult, bias_attr=False, act='relu'),
                #   nn.ReLU(True),
                Linear(ngf * mult, ngf * mult, bias_attr=False, act='relu'),
                #   nn.ReLU(True)
            ]
        else:
            # 不是轻量级,则下面的1024x1024 --> 256的全连接层和一个256 --> 256的全连接层
            FC = [
                Linear(img_size // mult * img_size // mult * ngf * mult,
                       ngf * mult,
                       bias_attr=False,
                       act='relu'),
                #   nn.ReLU(True),
                Linear(ngf * mult, ngf * mult, bias_attr=False, act='relu'),
                #   nn.ReLU(True)
            ]
        # AdaILN中的Gamma, Beta
        self.gamma = Linear(ngf * mult, ngf * mult, bias_attr=False)
        self.beta = Linear(ngf * mult, ngf * mult, bias_attr=False)

        # Up-Sampling Bottleneck --> 解码器中的自适应残差模块
        for i in range(n_blocks):
            setattr(self, 'UpBlock1_' + str(i + 1),
                    ResnetAdaILNBlock(ngf * mult, use_bias=False))

        # Up-Sampling --> 解码器中的上采样模块
        UpBlock2 = []
        # 上采样与编码器的下采样对应
        for i in range(n_downsampling):
            mult = 2**(n_downsampling - i)
            UpBlock2 += [
                Upsample(),
                ReflectionPad2d(1),
                Conv2D(ngf * mult,
                       int(ngf * mult / 2),
                       filter_size=3,
                       stride=1,
                       padding=0,
                       bias_attr=False,
                       act='relu'),
                ILN(int(ngf * mult / 2)),  # 注:只有自适应残差块使用AdaILN
                #  nn.ReLU(True)
            ]
        # 最后一层卷积层,与最开始的卷积层对应
        UpBlock2 += [
            ReflectionPad2d(3),
            Conv2D(ngf,
                   output_nc,
                   filter_size=7,
                   stride=1,
                   padding=0,
                   bias_attr=False,
                   act='tanh'),
            #  nn.Tanh()
        ]

        self.DownBlock = Sequential(*DownBlock)  # 编码器整个模块
        self.FC = Sequential(*FC)  # 生成gamma,beta的全连接层模块
        self.UpBlock2 = Sequential(*UpBlock2)  # 只包含上采样后的模块,不包含残差块
예제 #15
0
    def __init__(self,
                 input_nc,
                 output_nc,
                 ngf=64,
                 n_blocks=6,
                 img_size=256,
                 light=False):
        super(ResnetGenerator, self).__init__()
        '''
        Args:
            input_cn: 输入通道数
            output_nc: 输出通道数,此处二者都为3
            ngf: base channel number per layer
            n_blocks: The number of resblock
        '''
        assert (n_blocks >= 0)
        super(ResnetGenerator, self).__init__()
        self.input_nc = input_nc
        self.output_nc = output_nc
        self.ngf = ngf
        self.n_blocks = n_blocks
        self.img_size = img_size
        self.light = light

        DownBlock = []

        DownBlock += [
            ReflectionPad2d([3, 3, 3, 3]),
            Conv2D(input_nc,
                   ngf,
                   filter_size=7,
                   stride=1,
                   padding=0,
                   bias_attr=False),
            InstanceNorm(ngf),
            ReLU(True)
        ]

        # Down-Sampling
        n_downsampling = 2
        for i in range(n_downsampling):
            mult = 2**i
            DownBlock += [
                ReflectionPad2d([1, 1, 1, 1]),
                Conv2D(ngf * mult,
                       ngf * mult * 2,
                       filter_size=3,
                       stride=2,
                       padding=0,
                       bias_attr=False),
                InstanceNorm(ngf * mult * 2),
                ReLU(True)
            ]

        # Down-Sampling Bottleneck
        mult = 2**n_downsampling
        for i in range(n_blocks):
            DownBlock += [ResnetBlock(ngf * mult, use_bias=False)]

        # Class Activation Map
        self.gap_fc = Linear(ngf * mult, 1, bias_attr=False)
        self.gmp_fc = Linear(ngf * mult, 1, bias_attr=False)
        self.conv1x1 = Conv2D(ngf * mult * 2,
                              ngf * mult,
                              filter_size=1,
                              stride=1,
                              padding=0,
                              bias_attr=True)
        self.relu = ReLU(True)

        # Gamma, Beta block
        if self.light:
            FC = [
                Linear(ngf * mult, ngf * mult, bias_attr=False),
                ReLU(True),
                Linear(ngf * mult, ngf * mult, bias_attr=False),
                ReLU(True)
            ]
        else:
            FC = [
                Linear(img_size // mult * img_size // mult * ngf * mult,
                       ngf * mult,
                       bias_attr=False),
                ReLU(True),
                Linear(ngf * mult, ngf * mult, bias_attr=False),
                ReLU(True)
            ]
        self.gamma = Linear(ngf * mult, ngf * mult, bias_attr=False)
        self.beta = Linear(ngf * mult, ngf * mult, bias_attr=False)

        # Up-Sampling Bottleneck
        for i in range(n_blocks):
            setattr(self, 'UpBlock1_' + str(i + 1),
                    ResnetAdaILNBlock(ngf * mult, use_bias=False))

        # Up-Sampling
        UpBlock2 = []
        for i in range(n_downsampling):
            mult = 2**(n_downsampling - i)
            UpBlock2 += [
                Upsample(scale_factor=2),
                Debug('Upsample Pass'),
                ReflectionPad2d([1, 1, 1, 1]),
                Debug('ReflectionPad2d Pass'),
                Conv2D(ngf * mult,
                       int(ngf * mult / 2),
                       filter_size=3,
                       stride=1,
                       padding=0,
                       bias_attr=False),
                Debug('Conv2D Pass'),
                ILN(int(ngf * mult / 2)),
                Debug('ILN Pass'),
                ReLU(True)
            ]

        UpBlock2 += [
            ReflectionPad2d([3, 3, 3, 3]),
            Conv2D(ngf,
                   output_nc,
                   filter_size=7,
                   stride=1,
                   padding=0,
                   bias_attr=False),
            Tanh()
        ]

        self.DownBlock = Sequential(*DownBlock)
        self.FC = Sequential(*FC)
        self.UpBlock2 = Sequential(*UpBlock2)