def __init__(self):
     super(ShallowConv, self).__init__()
     with self.init_scope():
         self.c_1 = L.Convolution2D(1, 3, 7, 2, 3)
         self.i_1 = InstanceNormalization(3)
         self.c_2 = L.Convolution2D(3, 6, 7, 4, 4)
         self.i_2 = InstanceNormalization(6)
         self.l_1 = L.Linear(None, 10)
예제 #2
0
    def __init__(self, in_ch, out_ch):
        super(Resblock, self).__init__()
        w = initializers.Normal(0.02)
        with self.init_scope():
            self.c0 = L.Convolution2D(in_ch, out_ch, 3, 1, 1, initialW=w)
            self.c1 = L.Convolution2D(out_ch, out_ch, 3, 1, 1, initialW=w)

            self.bn0 = InstanceNormalization(out_ch)
            self.bn1 = InstanceNormalization(out_ch)
예제 #3
0
def SepConv_BN(x,
               filters,
               prefix,
               stride=1,
               kernel_size=3,
               rate=1,
               depth_activation=False,
               epsilon=1e-3):
    """ SepConv with BN between depthwise & pointwise. Optionally add activation after BN
        Implements right "same" padding for even kernel sizes
        Args:
            x: input tensor
            filters: num of filters in pointwise convolution
            prefix: prefix before name
            stride: stride at depthwise conv
            kernel_size: kernel size for depthwise convolution
            rate: atrous rate for depthwise convolution
            depth_activation: flag to use activation between depthwise & poinwise convs
            epsilon: epsilon to use in BN layer
    """

    if stride == 1:
        depth_padding = 'same'
    else:
        kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
        pad_total = kernel_size_effective - 1
        pad_beg = pad_total // 2
        pad_end = pad_total - pad_beg
        x = ZeroPadding2D((pad_beg, pad_end))(x)
        depth_padding = 'valid'

    if not depth_activation:
        x = Activation('relu')(x)
    x = DepthwiseConv2D((kernel_size, kernel_size),
                        strides=(stride, stride),
                        dilation_rate=(rate, rate),
                        padding=depth_padding,
                        use_bias=False,
                        name=prefix + '_depthwise')(x)
    x = InstanceNormalization(name=prefix + '_depthwise_BN',
                              epsilon=epsilon)(x)
    if depth_activation:
        x = Activation('relu')(x)
    x = Conv2D(filters, (1, 1),
               padding='same',
               use_bias=False,
               name=prefix + '_pointwise')(x)
    x = InstanceNormalization(name=prefix + '_pointwise_BN',
                              epsilon=epsilon)(x)
    if depth_activation:
        x = Activation('relu')(x)

    return x
예제 #4
0
 def __init__(self, ch, norm='instance', activation=F.relu):
     super(ResBlock, self).__init__()
     self.activation = activation
     w = chainer.initializers.Normal(0.02)
     with self.init_scope():
         self.c0 = L.Convolution2D(ch, ch, 3, 1, 1, initialW=w)
         self.c1 = L.Convolution2D(ch, ch, 3, 1, 1, initialW=w)
         if norm == 'batch':
             self.norm0 = L.BatchNormalization(ch)
             self.norm1 = L.BatchNormalization(ch)
         elif norm == 'instance':
             self.norm0 = InstanceNormalization(ch)
             self.norm1 = InstanceNormalization(ch)
예제 #5
0
 def res_block_content(input_tensor, f):
     x = input_tensor
     x = ReflectPadding2D(x)
     x = Conv2D(f, kernel_size=3, kernel_initializer=conv_init, 
                kernel_regularizer=regularizers.l2(w_l2),
                use_bias=False, padding="valid")(x)
     x = InstanceNormalization(epsilon=1e-5)(x)
     x = Activation('relu')(x)
     x = ReflectPadding2D(x)
     x = Conv2D(f, kernel_size=3, kernel_initializer=conv_init, 
                kernel_regularizer=regularizers.l2(w_l2),
                use_bias=False, padding="valid")(x)
     x = InstanceNormalization(epsilon=1e-5)(x)
     x = add([x, input_tensor])
     return x      
예제 #6
0
    def __init__(self, base=32):
        super(Discriminator, self).__init__()
        w = initializers.Normal(0.02)
        with self.init_scope():
            self.c0 = L.Convolution2D(3, base, 3, 1, 1, initialW=w)
            self.c1 = L.Convolution2D(base, base * 2, 4, 2, 1, initialW=w)
            self.c2 = L.Convolution2D(base * 2, base * 4, 3, 1, 1, initialW=w)
            self.c3 = L.Convolution2D(base * 4, base * 4, 4, 2, 1, initialW=w)
            self.c4 = L.Convolution2D(base * 4, base * 8, 3, 1, 1, initialW=w)
            self.c5 = L.Convolution2D(base * 8, base * 8, 3, 1, 1, initialW=w)
            self.c6 = L.Convolution2D(base * 8, 1, 3, 1, 1, initialW=w)

            self.bn0 = InstanceNormalization(base * 4)
            self.bn1 = InstanceNormalization(base * 8)
            self.bn2 = InstanceNormalization(base * 8)
예제 #7
0
    def __init__(self, base=64):
        w = initializers.Normal(0.02)
        super(Discriminator, self).__init__()
        with self.init_scope():
            self.c0 = L.Convolution2D(6, base, 4, 2, 1, initialW=w)
            self.c1 = L.Convolution2D(base, base * 2, 4, 2, 1, initialW=w)
            self.c2 = L.Convolution2D(base * 2, base * 4, 4, 2, 1, initialW=w)
            self.c3 = L.Convolution2D(base * 4, base * 8, 4, 2, 1, initialW=w)
            self.c4 = L.Linear(None, 1, initialW=w)

            self.in1 = InstanceNormalization(base * 2)
            #self.in1=L.BatchNormalization(base*2)
            self.in2 = InstanceNormalization(base * 4)
            #self.in2=L.BatchNormalization(base*4)
            self.in3 = InstanceNormalization(base * 8)
예제 #8
0
def Encoder_content_MUNIT(nc_in=3, input_size=IMAGE_SHAPE[0], n_downscale_content=n_downscale_content, nc_base=nc_base):
    # Content encoder architecture 
    def res_block_content(input_tensor, f):
        x = input_tensor
        x = ReflectPadding2D(x)
        x = Conv2D(f, kernel_size=3, kernel_initializer=conv_init, 
                   kernel_regularizer=regularizers.l2(w_l2),
                   use_bias=False, padding="valid")(x)
        x = InstanceNormalization(epsilon=1e-5)(x)
        x = Activation('relu')(x)
        x = ReflectPadding2D(x)
        x = Conv2D(f, kernel_size=3, kernel_initializer=conv_init, 
                   kernel_regularizer=regularizers.l2(w_l2),
                   use_bias=False, padding="valid")(x)
        x = InstanceNormalization(epsilon=1e-5)(x)
        x = add([x, input_tensor])
        return x      
    
    inp = Input(shape=(input_size, input_size, nc_in))
    x = ReflectPadding2D(inp, 3)
    x = Conv2D(64, kernel_size=7, kernel_initializer=conv_init, 
               kernel_regularizer=regularizers.l2(w_l2),
               use_bias=False, padding="valid")(x)
    x = InstanceNormalization()(x) #
    x = Activation('relu')(x) # 
    
    dim = 1
    ds = 2**n_downscale_content
    for i in range(n_downscale_content):
        dim = 4 if dim >= 4 else dim*2
        x = conv_block(x, dim*nc_base, use_norm=True)
    for i in range(n_resblocks):
        x = res_block_content(x, dim*nc_base)
    content_code = x # Content code
    return Model(inp, content_code)
예제 #9
0
def conv_block(input_tensor, f, k=3, strides=2, use_norm=False):
    x = input_tensor
    x = ReflectPadding2D(x)
    x = Conv2D(f, kernel_size=k, strides=strides, kernel_initializer=conv_init,
               kernel_regularizer=regularizers.l2(w_l2),
               use_bias=(not use_norm), padding="valid")(x)
    if use_norm:
        x = InstanceNormalization(epsilon=1e-5)(x)
    x = Activation("relu")(x)
    return x
예제 #10
0
    def __init__(self, in_ch, out_ch, up=False, down=False):
        w = initializers.Normal(0.02)
        self.up = up
        self.down = down
        super(CBR, self).__init__()
        with self.init_scope():
            self.cpara = L.Convolution2D(in_ch, out_ch, 3, 1, 1, initialW=w)
            self.cdown = L.Convolution2D(in_ch, out_ch, 4, 2, 1, initialW=w)

            self.bn0 = InstanceNormalization(out_ch)
예제 #11
0
def conv_block_d(input_tensor, f, use_norm=False):
    x = input_tensor
    x = ReflectPadding2D(x, 2)
    x = Conv2D(f, kernel_size=4, strides=2, kernel_initializer=conv_init_dis,
               kernel_regularizer=regularizers.l2(w_l2),
               use_bias=(not use_norm), padding="valid")(x)
    if use_norm:
        x = InstanceNormalization(epsilon=1e-5)(x)
    x = LeakyReLU(alpha=0.2)(x)
    return x
예제 #12
0
    def __init__(self,base=32):
        super(Generator,self).__init__()
        w=initializers.Normal(0.02)
        with self.init_scope():
            self.c0_img=L.Convolution2D(3,base,7,1,3,initialW=w)
            self.cbr0_img=CBR(base,base*2,down=True)
            self.cbr1_img=CBR(base*2,base*4,down=True)
            self.res0_img=ResBlock(base*4,base*4)
            self.res1_img=ResBlock(base*4,base*4)
            self.res2_img=ResBlock(base*4,base*4)
            self.res3_img=ResBlock(base*4,base*4)
            self.res4_img=ResBlock(base*4,base*4)
            self.res5_img=ResBlock(base*4,base*4)
            self.res6_img=ResBlock(base*4,base*4)
            self.res7_img=ResBlock(base*4,base*4)
            self.res8_img=ResBlock(base*4,base*4)
            self.cbr2_img=CBR(base*8,base*2,up=True)
            self.cbr3_img=CBR(base*2,base,up=True)
            self.c1_img=L.Convolution2D(base,3,7,1,3,initialW=w)

            self.bn0_img=L.BatchNormalization(base)
            self.in0_img=InstanceNormalization(base)

            self.c0_mask=L.Convolution2D(1,base,7,1,3,initialW=w)
            self.cbr0_mask=CBR(base,base*2,down=True)
            self.cbr1_mask=CBR(base*2,base*4,down=True)
            self.res0_mask=ResBlock(base*4,base*4)
            self.res1_mask=ResBlock(base*4,base*4)
            self.res2_mask=ResBlock(base*4,base*4)
            self.res3_mask=ResBlock(base*4,base*4)
            self.res4_mask=ResBlock(base*4,base*4)
            self.res5_mask=ResBlock(base*4,base*4)
            self.res6_mask=ResBlock(base*4,base*4)
            self.res7_mask=ResBlock(base*4,base*4)
            self.res8_mask=ResBlock(base*4,base*4)
            self.cbr2_mask=CBR(base*12,base*2,up=True)
            self.cbr3_mask=CBR(base*2,base,up=True)
            self.c1_mask=L.Convolution2D(base,1,7,1,3,initialW=w)

            self.bn0_mask=L.BatchNormalization(base)
            self.in0_mask=InstanceNormalization(base)
예제 #13
0
    def __init__(self, base=64):
        w = initializers.Normal(0.02)
        super(ContentEncoder, self).__init__()

        with self.init_scope():
            self.c0 = L.Convolution2D(3, base, 7, 1, 3, initialW=w)
            self.bn0 = InstanceNormalization(base)
            self.cbr1 = CBR(base, base*2, down=True)
            self.cbr2 = CBR(base*2, base*4, down=True)
            self.cbr3 = CBR(base*4, base*8, down=True)
            self.res0 = ResBlock(base*8, base*8)
            self.res1 = ResBlock(base*8, base*8)
예제 #14
0
    def __init__(self,
                 ch0,
                 ch1,
                 ksize=3,
                 pad=1,
                 norm='instance',
                 sample='down',
                 activation=F.relu,
                 dropout=False,
                 noise=False):
        super(CBR, self).__init__()
        self.activation = activation
        self.dropout = dropout
        self.sample = sample
        self.noise = noise
        w = chainer.initializers.Normal(0.02)

        with self.init_scope():
            if sample == 'down':
                self.c = L.Convolution2D(ch0, ch1, ksize, 2, pad, initialW=w)
            elif sample == 'none-9':
                self.c = L.Convolution2D(ch0, ch1, 9, 1, 4, initialW=w)
            elif sample == 'none-7':
                self.c = L.Convolution2D(ch0, ch1, 7, 1, 3, initialW=w)
            elif sample == 'none-5':
                self.c = L.Convolution2D(ch0, ch1, 5, 1, 2, initialW=w)
            else:
                self.c = L.Convolution2D(ch0, ch1, ksize, 1, pad, initialW=w)
            if norm == 'batch':
                if self.noise:
                    self.norm = L.BatchNormalization(ch1, use_gamma=False)
                else:
                    self.norm = L.BatchNormalization(ch1)
            elif norm == 'instance':
                if self.noise:
                    self.norm = InstanceNormalization(ch1, use_gamma=False)
                else:
                    self.norm = InstanceNormalization(ch1)
            else:
                raise ValueError('invalid norm parameter for CBR')
예제 #15
0
    def __init__(self,in_ch,out_ch,up=False,down=False,predict=False,activation=F.relu):
        super(CBR,self).__init__()
        w=initializers.Normal(0.02)
        self.up=up
        self.down=down
        self.activation=activation
        self.predict=predict
        with self.init_scope():
            self.cpara=L.Convolution2D(in_ch,out_ch,3,1,1,initialW=w)
            self.cdown=L.Convolution2D(in_ch,out_ch,4,2,1,initialW=w)

            self.bn0=L.BatchNormalization(out_ch)
            self.in0=InstanceNormalization(out_ch)
예제 #16
0
    def decoder(self, x, a, b):

        x = UpSampling2D(size=(2, 2))(x)
        x = Conv2D(64, (3, 3), strides=1, padding='same')(x)
        x = InstanceNormalization()(x)
        x = Activation('relu')(x)
        x1 = concatenate([x, b])
        x = concatenate([x, x1])

        x = UpSampling2D(size=(2, 2))(x)
        x = Conv2D(64, (3, 3), strides=1, padding='same')(x)
        x = InstanceNormalization()(x)
        x = Activation('relu')(x)
        x2 = concatenate([x, a])
        x = concatenate([x, x2])

        x = Conv2D(64, (3, 3), strides=1, padding='same')(x)
        x = InstanceNormalization()(x)
        x = Activation('relu')(x)

        x = Conv2D(1, 1, padding='same', activation='sigmoid')(x)
        return x
예제 #17
0
def add_conv(x,
             channel,
             ksize,
             strides=(1, 1),
             padding='valid',
             dilation_rate=(1, 1)):
    x = Conv2D(channel,
               kernel_size=ksize,
               strides=strides,
               padding=padding,
               dilation_rate=dilation_rate)(x)
    x = InstanceNormalization()(x)
    x = Activation('relu')(x)
    return x
예제 #18
0
    def __init__(self, base=32):
        w = initializers.Normal(0.02)
        super(Local_Enhancer, self).__init__()
        with self.init_scope():
            self.c0 = L.Convolution2D(3, base, 3, 1, 1, initialW=w)
            self.down0 = Down(base, base * 2)
            self.res0 = ResBlock(base * 2, base * 2)
            self.res1 = ResBlock(base * 2, base * 2)
            self.res2 = ResBlock(base * 2, base * 2)
            self.res3 = ResBlock(base * 2, base * 2)
            self.up0 = Up(base * 2, base)
            self.c1 = L.Convolution2D(base, 3, 7, 1, 3, initialW=w)

            self.in0 = InstanceNormalization(base)
예제 #19
0
    def __init__(self, ch, norm="instance", activation=F.relu, noise=False):
        self.use_norm = False if norm is None else True
        self.activation = activation
        layers = {}
        w = chainer.initializers.Uniform(scale=math.sqrt(
            1 / ch / 3 / 3))  #same to pytorch conv2d initializaiton
        layers['c0'] = L.Convolution2D(ch,
                                       ch,
                                       3,
                                       1,
                                       1,
                                       initialW=w,
                                       nobias=True)
        layers['c1'] = L.Convolution2D(ch,
                                       ch,
                                       3,
                                       1,
                                       1,
                                       initialW=w,
                                       nobias=True)
        if norm == "batch":
            layers['norm0'] = L.BatchNormalization(ch,
                                                   use_gamma=noise,
                                                   use_beta=noise)
            layers['norm1'] = L.BatchNormalization(ch,
                                                   use_gamma=noise,
                                                   use_beta=noise)
        elif norm == "instance":
            layers['norm0'] = InstanceNormalization(ch,
                                                    use_gamma=noise,
                                                    use_beta=noise)
            layers['norm1'] = InstanceNormalization(ch,
                                                    use_gamma=noise,
                                                    use_beta=noise)

        super(ResBlock, self).__init__(**layers)
예제 #20
0
    def __init__(self,base=64):
        super(UNet,self).__init__()
        w=initializers.Normal(0.02)
        with self.init_scope():
            self.c0=L.Convolution2D(6,base,3,1,1,initialW=w)
            self.cbr0=CBR(base,base*2,down=True,predict=True)
            self.cbr1=CBR(base*2,base*4,down=True,predict=True)
            self.cbr2=CBR(base*4,base*8,down=True,predict=True)
            self.cbr3=CBR(base*8,base*8,down=True,predict=True)
            self.cbr4=CBR(base*16,base*8,up=True,predict=True)
            self.cbr5=CBR(base*16,base*4,up=True,predict=True)
            self.cbr6=CBR(base*8,base*2,up=True,predict=True)
            self.cbr7=CBR(base*4,base*1,up=True,predict=True)
            self.c1=L.Convolution2D(base*2,3,3,1,1,initialW=w)

            self.bn0=InstanceNormalization(base)
예제 #21
0
    def __init__(self,base=32):
        super(Generator,self).__init__()
        w=initializers.Normal(0.02)
        with self.init_scope():
            self.c0=L.Convolution2D(3,base,7,1,3,initialW=w)
            self.cbr0=CBR(base,base*2,down=True)
            self.cbr1=CBR(base*2,base*4,down=True)
            self.res0=ResBlock(base*4,base*4)
            self.res1=ResBlock(base*4,base*4)
            self.res2=ResBlock(base*4,base*4)
            self.res3=ResBlock(base*4,base*4)
            self.res4=ResBlock(base*4,base*4)
            self.res5=ResBlock(base*4,base*4)
            self.cbr2=CBR(base*4,base*2,up=True)
            self.cbr3=CBR(base*2,base,up=True)
            self.c1=L.Convolution2D(base,3,7,1,3,initialW=w)

            self.bn0=L.BatchNormalization(base)
            self.in0=InstanceNormalization(base)
예제 #22
0
    def __init__(self, base=64):
        super(Generator, self).__init__()
        w = initializers.Normal(0.02)
        with self.init_scope():
            self.c0 = L.Convolution2D(3, base, 7, 1, 3, initialW=w)
            self.down0 = CCBR_down(base, base * 2)
            self.down1 = CCBR_down(base * 2, base * 4)
            self.res0 = Resblock(base * 4, base * 4)
            self.res1 = Resblock(base * 4, base * 4)
            self.res2 = Resblock(base * 4, base * 4)
            self.res3 = Resblock(base * 4, base * 4)
            self.res4 = Resblock(base * 4, base * 4)
            self.res5 = Resblock(base * 4, base * 4)
            self.res6 = Resblock(base * 4, base * 4)
            self.res7 = Resblock(base * 4, base * 4)
            self.up0 = CCBR_up(base * 4, base * 2)
            self.up1 = CCBR_up(base * 2, base)
            self.c1 = L.Convolution2D(base, 3, 7, 1, 3, initialW=w)

            self.bn0 = InstanceNormalization(base)
예제 #23
0
    def __init__(self, base=64):
        w = initializers.Normal(0.02)
        super(Global_Generator, self).__init__()
        with self.init_scope():
            self.c0 = L.Convolution2D(3, base, 7, 1, 3, initialW=w)
            self.down0 = Down(base, base * 2)
            self.down1 = Down(base * 2, base * 4)
            self.down2 = Down(base * 4, base * 8)
            self.down3 = Down(base * 8, base * 16)
            self.res0 = ResBlock(base * 16, base * 16)
            self.res1 = ResBlock(base * 16, base * 16)
            self.res2 = ResBlock(base * 16, base * 16)
            self.res3 = ResBlock(base * 16, base * 16)
            self.res4 = ResBlock(base * 16, base * 16)
            self.res5 = ResBlock(base * 16, base * 16)
            self.up0 = Up(base * 16, base * 8)
            self.up1 = Up(base * 8, base * 4)
            self.up2 = Up(base * 4, base * 2)
            self.up3 = Up(base * 2, base)
            self.c1 = L.Convolution2D(base, 3, 7, 1, 3, initialW=w)

            self.in0 = InstanceNormalization(base)