コード例 #1
0
ファイル: conv_GAN.py プロジェクト: HamSade/codes
def encoder(x, comp_ratio, noise_std, activation='prelu'):     
#    input_dim = x.shape[-1] #x.get_shape().as_list()[-1]
    num_filters = int(np.log2(comp_ratio))       
    h = x;
    if len(x.get_shape().as_list())<3:
        h = tf.expand_dims(h, axis =2)
    skips = []
    for i in range(num_filters):
        print('enc_layer_{}'.format( i))
        with tf.variable_scope('enc'):
            h = mf_downconv(h, output_dim = 2**(i + 1) , stride = 2,
                         filter_width=filter_width, name='downconv_{}'.format(i)) 
            print('h_dim= ', h.get_shape().as_list())
            # Skip connections
            if i < num_filters - 1:
                skips.append(h)
                print('skip_dim= ', h.get_shape().as_list())
            if apply_BN:
                h = segan.vbn( h, 'ae_enc_vbn_{}'.format(i))
            if activation=='leakyrelu':
                h = leakyrelu(h)
            else:
                with tf.variable_scope('enc'):
                    h , _ = prelu(h, name='prelu_{}'.format(i))
    z = make_z(h.get_shape().as_list(), std = noise_std)
#    h = h + z
    h = tf.concat([z, h], 2)  
    print('h_and_z_dim', h.get_shape().as_list())                
    return h, skips
コード例 #2
0
ファイル: conv_gen.py プロジェクト: HamSade/codes
def decoder(x, comp_ratio,  activation='leakyrelu', nonlin='segan'): #gated_conv'):    
    input_dim = x.shape[-1] #tf.shape(x)[-1]
    num_filters = int(np.log2(comp_ratio))     
    h=x;
    if apply_BN:
        h = batch_norm (h, 'dec_batch_norm_{}'.format(0))
#    with tf.variable_scope('g_dec'):
#        w_full_dec = tf.get_variable('w_full_dec', [input_dim ,
#                                                    input_dim * comp_ratio / (2**num_filters) + 1],
#                                  initializer= tf.random_normal_initializer(stddev=0.02))  
#        h = full_layer(x, w_full_dec, 'g_dec')
    for i in range(num_filters):
        print('dec_layer_number = ', i)
        print('h_dim= ', h.get_shape().as_list())
        with tf.variable_scope('g_dec'):
            if nonlin=='segan':
                #original segan
                h = nn_deconv(h, filter_width=filter_width, name='nn_deconv_{}'.format(i),
                      dilation=2, init=tf.truncated_normal_initializer(stddev=0.02)) 
                if apply_BN:
                    h = batch_norm (h, 'batch_norm_{}'.format(i))
                if activation=='leakyrelu':
                    h = leakyrelu(h)
                else:
                    with tf.variable_scope('g_dec'):
                        h,_ = prelu(h, name='prelu_{}'.format(i))
            else:
            # Wavenet gated convolutions
                h = gated_deconv(h, filter_width=filter_width, name='gated_deconv_{}'.format(i),
                          dilation=2, init=tf.truncated_normal_initializer(stddev=0.02))
    return h
コード例 #3
0
ファイル: conv_gen_4.py プロジェクト: HamSade/codes
def decoder(x, activation='leakyrelu', nonlin='segan'):  #gated_conv'):
    #    input_dim = x.shape[-1]
    num_filters = int(np.log2(comp_ratio))
    h = x
    for i in range(num_filters):
        print('dec_layer_number = ', i)
        with tf.variable_scope('g_dec'):
            if nonlin == 'segan':
                #original segan
                h = nn_deconv(
                    h,
                    filter_width=filter_width,
                    name='nn_deconv_{}'.format(i),
                    dilation=2,
                    init=tf.truncated_normal_initializer(stddev=0.02))
                h = batch_norm(h, 'batch_norm_{}'.format(i))
                if activation == 'leakyrelu':
                    h = leakyrelu(h)
                else:
                    with tf.variable_scope('g_dec'):
                        h, _ = prelu(h, name='prelu_{}'.format(i))
            else:
                # Wavenet gated convolutions
                h = gated_deconv(
                    h,
                    filter_width=filter_width,
                    name='gated_deconv_{}'.format(i),
                    dilation=2,
                    init=tf.truncated_normal_initializer(stddev=0.02))
    return h
コード例 #4
0
ファイル: conv_gen.py プロジェクト: HamSade/codes
def encoder(x, comp_ratio, activation='prelu'):     
    input_dim = x.shape[-1] #tf.shape(x)[-1]
    num_filters = int(np.log2(comp_ratio))         
    h = x; 
    if apply_BN:
        h = batch_norm(h, 'enc_batch_norm_{}'.format(0))
     
    for i in range(num_filters):
        print('enc_layer_number = ', i)
        print('h_dim= ', h.get_shape().as_list())
        
        with tf.variable_scope('g_enc'):
            h = downconv(h, filter_width=filter_width, name='downconv_{}'.format(i)) 
            if apply_BN:
                h = batch_norm(h, 'batch_norm_{}'.format(i))
        if activation=='leakyrelu':
            h = leakyrelu(h)
        else:
            with tf.variable_scope('g_enc'):
                h , _ = prelu(h, name='prelu_{}'.format(i))                  
#    with tf.variable_scope('g_enc'):
#        w_full_enc = tf.get_variable('w_full_enc', [ input_dim / (2**num_filters), input_dim / comp_ratio],
#                                  initializer= tf.random_normal_initializer(stddev=0.01))
#        h = full_layer(h, w_full_enc, 'g_enc')        
    return h
コード例 #5
0
ファイル: tacotron.py プロジェクト: HamSade/codes
def conv_net(x, num_layers, activation='prelu'):
    #    input_dim = x.shape[-1] #x.get_shape().as_list()[-1]
    h = x
    if len(x.get_shape().as_list()) < 3:
        h = tf.expand_dims(h, axis=2)
#    skips = []
    for i in range(num_layers):
        print('conv_layer_{}'.format(i))
        with tf.variable_scope('conv'):
            h = mf_downconv(h,
                            output_dim=1,
                            stride=1,
                            filter_width=31,
                            name='downconv_{}'.format(i))
            print('h_dim= ', h.get_shape().as_list())
            # Skip connections
            #            if i < num_filters - 1:
            #                skips.append(h)
            #                print('skip_dim= ', h.get_shape().as_list())
            if apply_BN:
                h = segan.vbn(h, 'ae_enc_vbn_{}'.format(i))
            if activation == 'leakyrelu':
                h = leakyrelu(h)
            else:
                with tf.variable_scope('enc'):
                    h, _ = prelu(h, name='prelu_{}'.format(i))

    h = tf.squeeze(h)
    return h  #, skips
コード例 #6
0
def decoder(x, activation='leakyrelu'):
    num_filters = int(np.log2(comp_ratio))
    h = x
    for i in range(num_filters):
        print('dec_layer_number = ', i)
        with tf.variable_scope('g_dec'):
            h = nn_deconv(h,
                          filter_width=filter_width,
                          name='nn_deconv_{}'.format(i),
                          dilation=2,
                          init=tf.truncated_normal_initializer(stddev=0.02))
        if activation == 'leakyrelu':
            h = leakyrelu(h)
        else:
            with tf.variable_scope('g_dec'):
                h, _ = prelu(h, name='prelu_{}'.format(i))
    return h
コード例 #7
0
ファイル: conv_gen_4.py プロジェクト: HamSade/codes
def encoder(x, activation='prelu'):
    num_filters = int(np.log2(comp_ratio))
    h = x
    hs = []
    for i in range(num_filters):
        print('enc_layer_number = ', i)
        hs.append(h)
        with tf.variable_scope('g_enc'):
            h = downconv(h,
                         filter_width=filter_width,
                         name='downconv_{}'.format(i))
            h = batch_norm(h, 'batch_norm_{}'.format(i))
        if activation == 'leakyrelu':
            h = leakyrelu(h)
        else:
            with tf.variable_scope('g_enc'):
                h, _ = prelu(h, name='prelu_{}'.format(i))
    return h, hs
コード例 #8
0
def decoder(x,
            comp_ratio,
            activation='leakyrelu',
            nonlin='segan'):  #gated_conv'):
    input_dim = x.shape[-1]  #tf.shape(x)[-1]
    num_filters = int(np.log2(comp_ratio))
    h = x
    if apply_BN:
        h = batch_norm(h, 'dec_batch_norm_{}'.format(0))
    for i in range(num_filters):
        print('dec_layer_number = ', i)
        print('h_dim= ', h.get_shape().as_list())
        with tf.variable_scope('g_dec'):
            if nonlin == 'segan':
                #original segan
                h = mf_nn_deconv(
                    h,
                    output_dim=2**(num_filters - i - 1),
                    filter_width=filter_width,
                    name='nn_deconv_{}'.format(i),
                    dilation=4,
                    init=tf.truncated_normal_initializer(stddev=0.02))
                if apply_BN:
                    h = batch_norm(h, 'batch_norm_{}'.format(i))
                if activation == 'leakyrelu':
                    h = leakyrelu(h)
                else:
                    with tf.variable_scope('g_dec'):
                        h, _ = prelu(h, name='prelu_{}'.format(i))
            else:
                # Wavenet gated convolutions
                h = gated_deconv(
                    h,
                    filter_width=filter_width,
                    name='gated_deconv_{}'.format(i),
                    dilation=2,
                    init=tf.truncated_normal_initializer(stddev=0.02))

    # making h of size [num_batch, input_dim]
    h = tf.squeeze(h, axis=-1)
    return h
コード例 #9
0
ファイル: conv_GAN.py プロジェクト: HamSade/codes
def discriminator(x, comp_ratio):
#    input_dim = x.get_shape().as_list()[-1] #x.shape[-1]
    num_filters = int(np.log2(comp_ratio))       
    h = x;
    with tf.variable_scope('disc'):
#        if apply_BN:
#            h = segan.vbn(h, 'disc_batch_norm_{}'.format(0))
        for i in range(num_filters):
            print('disc_layer_{} = '.format( i))
            h = mf_downconv(h, output_dim = 2 ** (i + 1) , stride = 2,
                            filter_width=filter_width, name='disc_downconv_{}'.format(i)) 
            print('h_dim= ', h.get_shape().as_list())
            if apply_BN:
                h = segan.vbn( h, 'disc_vbn_{}'.format(i))
            h = leakyrelu(h)   
        # Last layer
        h = flatten(h)
        h_logit_out = conv1d(h, filter_width=1, output_dim=1,
                             w_init = tf.truncated_normal_initializer(stddev=0.02),
                             name='disc_logits_conv')  # 1 x 1 convolution
        d_logit_out = tf.squeeze(h_logit_out) # Remove dimension of 1 comming from conv1d
        disc_output = fully_connected(d_logit_out, 1, activation_fn = None) #tf.tanh)
        return disc_output
コード例 #10
0
def encoder(x, comp_ratio, activation='prelu'):
    input_dim = x.shape[-1]  #x.get_shape().as_list()[-1]
    num_filters = int(np.log2(comp_ratio))
    h = x
    if apply_BN:
        h = batch_norm(h, 'enc_batch_norm_{}'.format(0))
    for i in range(num_filters):
        print('enc_layer_number = ', i)
        print('h_dim= ', h.get_shape().as_list())
        with tf.variable_scope('g_enc'):
            h = mf_downconv(h,
                            output_dim=2**i,
                            stride=4,
                            filter_width=filter_width,
                            name='downconv_{}'.format(i))
            if apply_BN:
                h = batch_norm(h, 'batch_norm_{}'.format(i))
        if activation == 'leakyrelu':
            h = leakyrelu(h)
        else:
            with tf.variable_scope('g_enc'):
                h, _ = prelu(h, name='prelu_{}'.format(i))
    return h