Exemple #1
0
    def __init__(self, opt, in_ch, out_ch, out_conv_initW=HeNormal()):
        super().__init__()

        he_w = HeNormal()
        with self.init_scope():
            self.norm1 = L.BatchNormalization(size=in_ch)
            self.conv1 = define_conv(opt)(in_ch,
                                          out_ch,
                                          ksize=3,
                                          pad=1,
                                          initialW=he_w)

            self.norm2 = L.BatchNormalization(size=out_ch)
            self.conv2 = define_conv(opt)(out_ch,
                                          out_ch,
                                          ksize=3,
                                          pad=1,
                                          initialW=out_conv_initW)

            self.activation = F.leaky_relu

            #if input channel is not equel with output channel, input channel convert to output shape
            if in_ch != out_ch:
                self.reshape_norm = L.BatchNormalization(size=in_ch)
                self.reshape_act = self.activation
                self.reshape_conv = define_conv(opt)(in_ch,
                                                     out_ch,
                                                     ksize=3,
                                                     pad=1,
                                                     initialW=out_conv_initW)
            else:
                self.reshape_norm = lambda x: x
                self.reshape_act = lambda x: x
                self.reshape_conv = lambda x: x
Exemple #2
0
    def __init__(self, opt, ch):
        super().__init__()

        he_w = HeNormal()
        mid_ch = ch // opt.division_ch

        with self.init_scope():
            self.f_conv = define_conv(opt)(ch, mid_ch, ksize=1, initialW=he_w)
            self.g_conv = define_conv(opt)(ch, mid_ch, ksize=1, initialW=he_w)
            self.h_conv = define_conv(opt)(ch, mid_ch, ksize=1, initialW=he_w)
            self.v_conv = define_conv(opt)(mid_ch, ch, ksize=1, initialW=he_w)
            self.gamma = Parameter(initializer=0, shape=1, name='SA-gamma')
Exemple #3
0
    def __init__(self, opt):
        super().__init__()

        xavier_w = Normal()
        he_w = HeNormal()

        C, H, W = opt.img_shape
        ngf = opt.ngf
        label_ch = opt.class_num
        self.opt = opt

        layer_num = 6
        init_shape = (ngf * 8, H // 2**layer_num, W // 2**layer_num)

        with self.init_scope():
            self.w1 = ConstantFCN(opt, opt.class_num)
            self.w2 = ConstantFCN(opt, opt.c_shape[0])

            self.head_reshape = lambda x: F.resize_images(x, init_shape[1:])
            self.head = define_conv(opt)(label_ch,
                                         init_shape[0],
                                         ksize=3,
                                         pad=1,
                                         initialW=he_w)

            #512 x 4 x 4
            self.r1 = SPADEResBlock(opt, ngf * 8, ngf * 8)
            self.up1 = define_upsampling(opt, ngf * 8)

            #512 x 8 x 8
            self.r2 = SPADEResBlock(opt, ngf * 8, ngf * 8)
            self.up2 = define_upsampling(opt, ngf * 8)

            #512 x 16 x 16
            self.r3 = SPADEResBlock(opt, ngf * 8, ngf * 8)
            self.up3 = define_upsampling(opt, ngf * 8)

            #512 x 32 x 32
            self.r4 = SPADEResBlock(opt, ngf * 8, ngf * 4)
            self.up4 = define_upsampling(opt, ngf * 4)

            #256 x 64 x 64
            self.r5 = SPADEResBlock(opt, ngf * 4, ngf * 2)
            self.up5 = define_upsampling(opt, ngf * 2)

            #128 x 128 x 128
            self.r6 = SPADEResBlock(opt, ngf * 2, ngf)
            self.up6 = define_upsampling(opt, ngf)

            #64 x 256 x 256
            self.r7 = SPADEResBlock(opt, ngf, ngf // 2)

            #32 x 256 x 256
            self.to_img = L.Convolution2D(ngf // 2,
                                          3,
                                          ksize=3,
                                          pad=1,
                                          initialW=xavier_w)
Exemple #4
0
    def __init__(self, opt, input_ch, input_resolution=65):
        super().__init__()

        #get options
        #nf = opt.aspp_nf
        nf = 128

        #this rate is dilate size based original paper.
        x65_rate = [6, 12, 18]
        rate = [round(x * input_resolution / 65) for x in x65_rate]

        he_w = HeNormal()
        with self.init_scope():
            self.x1 = define_conv(opt)(input_ch, nf, ksize=1, initialW=he_w)
            self.x1_bn = L.BatchNormalization(nf)

            self.x3_small = define_atrous_conv(opt)(input_ch,
                                                    nf,
                                                    ksize=3,
                                                    rate=rate[0],
                                                    initialW=he_w)
            self.x3_small_bn = L.BatchNormalization(nf)

            self.x3_middle = define_atrous_conv(opt)(input_ch,
                                                     nf,
                                                     ksize=3,
                                                     rate=rate[1],
                                                     initialW=he_w)
            self.x3_middle_bn = L.BatchNormalization(nf)

            self.x3_large = define_atrous_conv(opt)(input_ch,
                                                    nf,
                                                    ksize=3,
                                                    rate=rate[2],
                                                    initialW=he_w)
            self.x3_large_bn = L.BatchNormalization(nf)

            self.sum_func = define_conv(opt)(nf * 4,
                                             input_ch,
                                             ksize=3,
                                             pad=1,
                                             initialW=he_w)

        self.activation = F.leaky_relu
Exemple #5
0
def define_upsampling(opt, input_ch, output_ch=None):
    if opt.upsampling_mode == 'bilinear':
        seq = Sequential(lambda x: F.resize_images(
            x, (x.shape[2] * 2, x.shape[3] * 2), mode='bilinear'))

        if output_ch is not None:
            seq.append(
                define_conv(opt)(input_ch,
                                 output_ch,
                                 ksize=3,
                                 stride=1,
                                 pad=1,
                                 initialW=HeNormal()))

        return seq

    if opt.upsampling_mode == 'nearest':
        seq = Sequential(lambda x: F.resize_images(
            x, (x.shape[2] * 2, x.shape[3] * 2), mode='nearest'))

        if output_ch is not None:
            seq.append(
                define_conv(opt)(input_ch,
                                 output_ch,
                                 ksize=3,
                                 stride=1,
                                 pad=1,
                                 initialW=HeNormal()))

        return seq

    if opt.upsampling_mode == 'deconv':
        return define_deconv(opt)(input_ch,
                                  input_ch if output_ch is None else output_ch,
                                  ksize=3,
                                  stride=1,
                                  pad=1,
                                  initialW=HeNormal())

    if opt.upsampling_mode == 'subpx_conv':
        return PixelShuffler(opt, input_ch,
                             input_ch if output_ch is None else output_ch)
Exemple #6
0
    def __init__(self, opt, in_ch, out_ch, out_conv_initW=HeNormal()):
        super().__init__()

        he_w = HeNormal()
        with self.init_scope():
            self.noise1 = NoiseAdder(in_ch)
            self.norm1 = SPADE(opt, in_ch)
            self.conv1 = define_conv(opt)(in_ch,
                                          out_ch,
                                          ksize=3,
                                          pad=1,
                                          initialW=he_w)

            self.noise2 = NoiseAdder(out_ch)
            self.norm2 = SPADE(opt, out_ch)
            self.conv2 = define_conv(opt)(out_ch,
                                          out_ch,
                                          ksize=3,
                                          pad=1,
                                          initialW=out_conv_initW)

            self.activation = F.leaky_relu

            #if input channel is not equel with output channel,
            #input channel convert to output shape
            if in_ch != out_ch:
                self.reshape_noise = NoiseAdder(in_ch)
                self.reshape_norm = SPADE(opt, in_ch)
                self.reshape_act = self.activation
                self.reshape_conv = define_conv(opt)(in_ch,
                                                     out_ch,
                                                     ksize=1,
                                                     initialW=out_conv_initW,
                                                     nobias=True)

            else:
                self.reshape_noise = lambda x, mean=None, ln_var=None: x
                self.reshape_norm = lambda x, y: x
                self.reshape_act = lambda x: x
                self.reshape_conv = lambda x: x
Exemple #7
0
    def __init__(self, opt, input_ch, output_ch=None, rate=2):
        super().__init__()
        he_w = HeNormal()

        if output_ch is None:
            output_ch = input_ch

        output_ch = output_ch * rate**2

        with self.init_scope():
            self.c = define_conv(opt)(input_ch,
                                      output_ch,
                                      ksize=3,
                                      stride=1,
                                      pad=1,
                                      initialW=he_w)

        self.ps_func = lambda x: F.depth2space(x, rate)
Exemple #8
0
    def __init__(self, opt):
        super().__init__()

        he_w = HeNormal()
        xavier_w = Normal()
        ndf = opt.ndf
        with self.init_scope():
            top_ch = opt.img_shape[0] + opt.class_num + opt.c_shape[0]

            #top_ch x 256 x 256
            self.c1 = define_conv(opt)(top_ch,
                                       ndf,
                                       ksize=4,
                                       stride=2,
                                       pad=1,
                                       initialW=he_w)

            #64 x 128 x 128
            self.c2 = define_conv(opt)(ndf,
                                       ndf * 2,
                                       ksize=4,
                                       stride=2,
                                       pad=1,
                                       initialW=he_w)
            self.n2 = L.BatchNormalization(size=ndf * 2)

            #128 x 64 x 64
            self.c3 = define_conv(opt)(ndf * 2,
                                       ndf * 4,
                                       ksize=4,
                                       stride=2,
                                       pad=1,
                                       initialW=he_w)
            self.att = SelfAttention(opt, ndf * 4)
            self.n3 = L.BatchNormalization(size=(ndf * 4))

            #256 x 32 x 32
            self.c4 = define_conv(opt)(ndf * 4,
                                       ndf * 8,
                                       ksize=4,
                                       stride=1,
                                       pad=1,
                                       initialW=he_w)
            self.n4 = L.BatchNormalization(size=ndf * 8)

            #512 x 31 x 32
            self.head = define_conv(opt)(ndf * 8,
                                         ndf * 8,
                                         ksize=4,
                                         stride=1,
                                         pad=1,
                                         initialW=he_w)

            #512 x 30 x 30
            self.r1 = ResBlock(opt, ndf * 8, ndf * 4)

            #256 x 30 x 30
            self.r2 = ResBlock(opt, ndf * 4, ndf * 2)

            #128 x 30 x 30
            self.r3 = ResBlock(opt, ndf * 2, ndf)

            #64 x 30 x 30
            self.to_patch = define_conv(opt)(ndf,
                                             1,
                                             ksize=3,
                                             stride=1,
                                             pad=1,
                                             initialW=xavier_w)

            #out is 1 x 30 x 30

            self.activation = F.leaky_relu