예제 #1
0
 def __call__(self, inputs, train_phase):
     with tf.variable_scope(self.name):
         inputs = conv("conv1", inputs, 64, 9)
         inputs = prelu("alpha1", inputs)
         skip_connection = tf.identity(inputs)
         #The paper has 16 residual blocks
         for b in range(1, self.B + 1):
             inputs = B_residual_blocks("B"+str(b), inputs, train_phase)
         # inputs = B_residual_blocks("B2", inputs, train_phase)
         # inputs = B_residual_blocks("B3", inputs, train_phase)
         # inputs = B_residual_blocks("B4", inputs, train_phase)
         # inputs = B_residual_blocks("B5", inputs, train_phase)
         inputs = conv("conv2", inputs, 64, 3)
         inputs = batchnorm(inputs, train_phase, "BN")
         inputs = inputs + skip_connection
         inputs = conv("conv3", inputs, 256, 3)
         inputs = pixelshuffler(inputs, 2)
         inputs = prelu("alpha2", inputs)
         inputs = conv("conv4", inputs, 256, 3)
         inputs = pixelshuffler(inputs, 2)
         inputs = prelu("alpha3", inputs)
         inputs = conv("conv5", inputs, 3, 9)
     return tf.nn.tanh(inputs)
예제 #2
0
파일: parts_3.py 프로젝트: HamSade/codes
def encoder(noisy_w, is_ref, scope, z_on=False, do_prelu=False):
    
    #is_ref : Creation phase
    
    skips = []
    
    h_i = noisy_w
#    h_i = tf.expand_dims(noisy_w, -1)
    

    for layer_idx, layer_depth in enumerate(g_enc_depths):
        
        print(layer_idx)
        
        bias_init = tf.constant_initializer(0.)
        
        h_i_dwn = downconv(h_i, layer_depth, kwidth=31,
                                   init=tf.truncated_normal_initializer(stddev=0.02),
                                   bias_init=bias_init, name='downconv_{}_{}_{}'.format(layer_idx,
                                                                       layer_depth, scope))   
        
        h_i = h_i_dwn
        
        if layer_idx < len(g_enc_depths) - 1:

                    skips.append(h_i)
                                        
        if do_prelu:
            h_i = prelu(h_i, name='enc_prelu_{}_{}_{}'.format(layer_idx,layer_depth, scope)) # default: ref=False
        else:
            h_i = leakyrelu(h_i)       
    #end_for :) 
    
    
    # Adding z to c   
    if z_on:
    # random code is fused with intermediate representation
        z = make_z([batch_size, h_i.get_shape().as_list()[1],g_enc_depths[-1]])
        h_i = tf.concat([z, h_i], 2)  
    
    return h_i, skips
예제 #3
0
파일: parts.py 프로젝트: HamSade/codes
def decoder(h_i, skips, z=None, z_on=False):

    # z+c is called h_i here

    g_dec_depths = g_enc_depths[:-1][::-1] + [1]

    for layer_idx, layer_depth in enumerate(g_dec_depths):

        h_i_dim = h_i.get_shape().as_list()

        out_shape = [h_i_dim[0], h_i_dim[1] * 2,
                     layer_depth]  #2 because of skip connections

        bias_init = None

        #############
        if deconv_type == 'deconv':

            bias_init = tf.constant_initializer(0.)

            h_i_dcv = deconv(h_i,
                             out_shape,
                             kwidth=kwidth,
                             dilation=2,
                             init=tf.truncated_normal_initializer(stddev=0.02),
                             bias_init=bias_init)

        elif deconv_type == 'nn_deconv':

            bias_init = 0.0

            h_i_dcv = nn_deconv(
                h_i,
                kwidth=kwidth,
                dilation=2,
                init=tf.truncated_normal_initializer(stddev=0.02),
                bias_init=bias_init)

        h_i = h_i_dcv

        if layer_idx < len(g_dec_depths) - 1:

            if do_prelu:

                h_i = prelu(h_i)

            else:

                h_i = leakyrelu(h_i)

                # fuse skip connection
                skip_ = skips[-(layer_idx + 1)]

                h_i = tf.concat([h_i, skip_], 2)

        else:
            h_i = tf.tanh(h_i)

    wave = h_i

    # Not sure abotu the following

    ret_feats = [wave]

    if z_on:
        ret_feats.append(z)

    return ret_feats