Beispiel #1
0
def Generator(inputs, reuse, is_training):
    def residual_blocks(inputs, output_channel, stride):
        net = Conv2D(inputs, 3, output_channel, stride)
        net = BatchNormal(net)
        net = PReLU(net)
        net = Conv2D(net, 3, output_channel, stride)
        return net + inputs

    def B_residual_block(net, output_channel, stride, is_training):
        for i in range(config.resblock_num):
            net = residual_blocks(net, output_channel, stride)

        net = Conv2D(net, 3, 64, 1)
        net = BatchNormal(net, is_training)
        return net

    with tf.variable_scope('generator', reuse=reuse):

        net = Conv2D(inputs, 9, 64, 1)
        net = PReLU(net)

        net = net + B_residual_block(net, 64, 1, is_training)

        for i in range(config.subpixel_num):
            net = SubPixelConv2d(net, 3, 256, 1)

        net = Conv2D(net, 9, 3, 1)

        return net
Beispiel #2
0
    def __init__(self, inchannels=21, channels=21, kernel_size=(3, 3)):
        super(BoundaryRefinement, self).__init__()

        self.conv_1 = Conv2D(in_channels=inchannels,
                             out_channels=channels,
                             kernel_size=kernel_size,
                             padding='same')
        self.conv_2 = Conv2D(in_channels=inchannels,
                             out_channels=channels,
                             kernel_size=kernel_size,
                             padding='same')
Beispiel #3
0
    def B_residual_block(net, output_channel, stride, is_training):
        for i in range(config.resblock_num):
            net = residual_blocks(net, output_channel, stride)

        net = Conv2D(net, 3, 64, 1)
        net = BatchNormal(net, is_training)
        return net
Beispiel #4
0
def Discriminator(inputs, reuse, is_training):
    def discriminator_block(inputs, output_channel, kernel_size, stride,
                            is_training):
        net = Conv2D(inputs, kernel_size, output_channel, stride)
        net = BatchNormal(net, is_training)
        net = LeakyReLU(net, 0.2)
        return net

    with tf.variable_scope('discriminator', reuse=reuse):
        net = Conv2D(inputs, 3, 64, 1)
        net = LeakyReLU(net)

        net = discriminator_block(net, 64, 3, 2, is_training)
        net = discriminator_block(net, 128, 3, 1, is_training)
        net = discriminator_block(net, 128, 3, 2, is_training)
        net = discriminator_block(net, 256, 3, 1, is_training)
        net = discriminator_block(net, 256, 3, 2, is_training)
        net = discriminator_block(net, 512, 3, 1, is_training)
        net = discriminator_block(net, 512, 3, 2, is_training)

        net = Dense(Flatten(net), 1024)
        net = LeakyReLU(net, 0.2)

        net = Dense(net, 1)

        return net
Beispiel #5
0
    def __init__(self, inchannels, channels=21, k=3):
        super(GCN, self).__init__()

        self.conv_l1 = Conv2D(in_channels=inchannels,
                              out_channels=channels,
                              kernel_size=(k, 1),
                              padding='same')
        self.conv_l2 = Conv2D(in_channels=channels,
                              out_channels=channels,
                              kernel_size=(1, k),
                              padding='same')

        self.conv_r1 = Conv2D(in_channels=inchannels,
                              out_channels=channels,
                              kernel_size=(1, k),
                              padding='same')
        self.conv_r2 = Conv2D(in_channels=channels,
                              out_channels=channels,
                              kernel_size=(k, 1),
                              padding='same')
Beispiel #6
0
def derain_net(is_train, input_x, num_frame, reuse = False, scope='DerainNet'): 
    growth_k = 12
    stride_hw = 1
    padding = 'SAME'
    nb_layers=6
    with tf.variable_scope(scope, reuse=reuse):
        # feature extration
        c1 = Conv2D(input_x, [7, 7, num_frame, 2*growth_k], stride_hw, padding, name=scope+'_conv1')
        
        # complex feature extraction
        c2 = DenseBlock(c1, is_train, nb_layers, 2*growth_k, growth_k, stride_hw, padding, block_name=scope+'_DenseBlock')
        
        # non-linear mapping
        c2 = BatchNorm(c2, is_train, name=scope+'_BN1')
        c2 = tf.nn.relu(c2)
        c3 = Conv2D(c2, [1, 1, growth_k, growth_k/2], stride_hw, padding, name=scope+'_conv2')
        
        
        # residual reconstruction
        c3 = BatchNorm(c3, is_train, name=scope+'_BN2')
        c3 = tf.nn.relu(c3)
        res = Conv2D(c3, [5, 5, growth_k/2 ,1], stride_hw, padding, name=scope+'_conv3')
    
    return res
Beispiel #7
0
 def residual_blocks(inputs, output_channel, stride):
     net = Conv2D(inputs, 3, output_channel, stride)
     net = BatchNormal(net)
     net = PReLU(net)
     net = Conv2D(net, 3, output_channel, stride)
     return net + inputs
Beispiel #8
0
 def discriminator_block(inputs, output_channel, kernel_size, stride,
                         is_training):
     net = Conv2D(inputs, kernel_size, output_channel, stride)
     net = BatchNormal(net, is_training)
     net = LeakyReLU(net, 0.2)
     return net
Beispiel #9
0
 def inference(self,h,scope_name):
     
     def dynamic_shift(inp, pad_size):
         x1 =tf.pad(inp, [[0,0], [pad_size,0], [0,0], [0,0]], mode='CONSTANT')
         x1 = x1[:,:-pad_size,:]
         return x1
         
     with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE) as scope:
         
         F=64
         
         intermediates = []
         
         
         for j in range(4):
             x1=tf.image.rot90(h,k=j,name=None)
             
         
             if j in [0,2]:
                 with tf.variable_scope('nety', reuse=tf.AUTO_REUSE) as scope:
                     sp = [[0,0], [1,0], [0,0], [0,0]]
                     x1 = Conv2D(tf.pad(x1, sp, mode='CONSTANT'), [3,3,self.channels,F], [1,1,1,1], 'SAME', scope_name='conv_0')
                     x1 = tf.nn.leaky_relu(x1)
                     #Remove last row
                     x1 = x1[:,:-1,:]
                     
                     # 15 layers,Conv+BN+relu
                     for i in range(15):
                         x1 = Conv2D(tf.pad(x1, sp, mode='CONSTANT'), [3,3,F,F], [1,1,1,1], 'SAME', scope_name='conv_{0}'.format(i+1))
                         x1 = tf.layers.batch_normalization(x1, axis=-1,training=self.is_train,name='bn_{0}'.format(i+1))
                         x1 = tf.nn.leaky_relu(x1)
                         x1 = x1[:,:-1,:]
                      
                     # last layer, Conv
                     x1 = Conv2D(tf.pad(x1, sp, mode='CONSTANT'), [3,3,F,F], [1,1,1,1], 'SAME', scope_name='conv_last')
                     x1 = x1[:,:-1,:] 
                     
                     #Computing the shift to apply to the receptive fields
                     shift = tf.cond(tf.equal(self.shift, 1), 
                                lambda: dynamic_shift(x1,1), 
                                lambda: dynamic_shift(x1,2))
                     #Applying the computed shift only during training otherwise the canonical shift by 1 is applied
                     x1 = tf.cond(tf.equal(self.is_train, True), 
                                lambda: shift, 
                                lambda: dynamic_shift(x1,1))
                     
                     
                     #Rotating back
                     x1 = tf.image.rot90(x1,k=4-j,name=None)
                     intermediates.append(x1)
             else:
                 with tf.variable_scope('netx', reuse=tf.AUTO_REUSE) as scope:
                     sp = [[0,0], [1,0], [0,0], [0,0]]
                     x1 = Conv2D(tf.pad(x1, sp, mode='CONSTANT'), [3,3,self.channels,F], [1,1,1,1], 'SAME', scope_name='conv_0')
                     x1 = tf.nn.leaky_relu(x1)
                     #Remove last row
                     x1 = x1[:,:-1,:]
                     
                     # 15 layers, Conv+BN+relu
                     for i in range(15):
                         x1 = Conv2D(tf.pad(x1, sp, mode='CONSTANT'), [3,3,F,F], [1,1,1,1], 'SAME', scope_name='conv_{0}'.format(i+1))
                         x1 = tf.layers.batch_normalization(x1, axis=-1,training=self.is_train,name='bn_{0}'.format(i+1))
                         x1 = tf.nn.leaky_relu(x1)
                         x1 = x1[:,:-1,:]
                      
                     # last layer, Conv
                     x1 = Conv2D(tf.pad(x1, sp, mode='CONSTANT'), [3,3,F,F], [1,1,1,1], 'SAME', scope_name='conv_last')
                     x1 = x1[:,:-1,:] 
                     
                     #Applying the canonical shift for the horizontally extending receptive fields
                     x1 = dynamic_shift(x1,1)
                     
                     #Rotating back
                     x1 = tf.image.rot90(x1,k=4-j,name=None)
                     intermediates.append(x1)
                 
             
         images_to_combine=tf.stack(intermediates,axis=1)
         
         x1 = Conv3D(images_to_combine, [4,1,1,F,F], [1,1,1,1,1], 'VALID', scope_name='conv_comb_0')
         x1 = tf.nn.leaky_relu(x1)
         x1 = tf.squeeze(x1,axis=1)
         x1 = Conv2D(x1                , [1,1,F,F], [1,1,1,1], 'SAME', scope_name='conv_comb_1')
         x1 = tf.nn.leaky_relu(x1)
         x1 = Conv2D(x1                , [1,1,F,2], [1,1,1,1], 'SAME', scope_name='conv_comb_2')
         x1 = tf.nn.relu(x1)
     
 
     return x1