Ejemplo n.º 1
0
def encoder(x, comp_ratio, activation='prelu'):     
    input_dim = x.shape[-1] #tf.shape(x)[-1]
    num_filters = int(np.log2(comp_ratio))         
    h = x; 
    if apply_BN:
        h = batch_norm(h, 'enc_batch_norm_{}'.format(0))
     
    for i in range(num_filters):
        print('enc_layer_number = ', i)
        print('h_dim= ', h.get_shape().as_list())
        
        with tf.variable_scope('g_enc'):
            h = downconv(h, filter_width=filter_width, name='downconv_{}'.format(i)) 
            if apply_BN:
                h = batch_norm(h, 'batch_norm_{}'.format(i))
        if activation=='leakyrelu':
            h = leakyrelu(h)
        else:
            with tf.variable_scope('g_enc'):
                h , _ = prelu(h, name='prelu_{}'.format(i))                  
#    with tf.variable_scope('g_enc'):
#        w_full_enc = tf.get_variable('w_full_enc', [ input_dim / (2**num_filters), input_dim / comp_ratio],
#                                  initializer= tf.random_normal_initializer(stddev=0.01))
#        h = full_layer(h, w_full_enc, 'g_enc')        
    return h
Ejemplo n.º 2
0
def decoder(x, comp_ratio,  activation='leakyrelu', nonlin='segan'): #gated_conv'):    
    input_dim = x.shape[-1] #tf.shape(x)[-1]
    num_filters = int(np.log2(comp_ratio))     
    h=x;
    if apply_BN:
        h = batch_norm (h, 'dec_batch_norm_{}'.format(0))
#    with tf.variable_scope('g_dec'):
#        w_full_dec = tf.get_variable('w_full_dec', [input_dim ,
#                                                    input_dim * comp_ratio / (2**num_filters) + 1],
#                                  initializer= tf.random_normal_initializer(stddev=0.02))  
#        h = full_layer(x, w_full_dec, 'g_dec')
    for i in range(num_filters):
        print('dec_layer_number = ', i)
        print('h_dim= ', h.get_shape().as_list())
        with tf.variable_scope('g_dec'):
            if nonlin=='segan':
                #original segan
                h = nn_deconv(h, filter_width=filter_width, name='nn_deconv_{}'.format(i),
                      dilation=2, init=tf.truncated_normal_initializer(stddev=0.02)) 
                if apply_BN:
                    h = batch_norm (h, 'batch_norm_{}'.format(i))
                if activation=='leakyrelu':
                    h = leakyrelu(h)
                else:
                    with tf.variable_scope('g_dec'):
                        h,_ = prelu(h, name='prelu_{}'.format(i))
            else:
            # Wavenet gated convolutions
                h = gated_deconv(h, filter_width=filter_width, name='gated_deconv_{}'.format(i),
                          dilation=2, init=tf.truncated_normal_initializer(stddev=0.02))
    return h
Ejemplo n.º 3
0
def decoder(x, activation='leakyrelu', nonlin='segan'):  #gated_conv'):
    #    input_dim = x.shape[-1]
    num_filters = int(np.log2(comp_ratio))
    h = x
    for i in range(num_filters):
        print('dec_layer_number = ', i)
        with tf.variable_scope('g_dec'):
            if nonlin == 'segan':
                #original segan
                h = nn_deconv(
                    h,
                    filter_width=filter_width,
                    name='nn_deconv_{}'.format(i),
                    dilation=2,
                    init=tf.truncated_normal_initializer(stddev=0.02))
                h = batch_norm(h, 'batch_norm_{}'.format(i))
                if activation == 'leakyrelu':
                    h = leakyrelu(h)
                else:
                    with tf.variable_scope('g_dec'):
                        h, _ = prelu(h, name='prelu_{}'.format(i))
            else:
                # Wavenet gated convolutions
                h = gated_deconv(
                    h,
                    filter_width=filter_width,
                    name='gated_deconv_{}'.format(i),
                    dilation=2,
                    init=tf.truncated_normal_initializer(stddev=0.02))
    return h
Ejemplo n.º 4
0
def decoder(x,
            comp_ratio,
            activation='leakyrelu',
            nonlin='segan'):  #gated_conv'):
    input_dim = x.shape[-1]  #tf.shape(x)[-1]
    num_filters = int(np.log2(comp_ratio))
    h = x
    if apply_BN:
        h = batch_norm(h, 'dec_batch_norm_{}'.format(0))
    for i in range(num_filters):
        print('dec_layer_number = ', i)
        print('h_dim= ', h.get_shape().as_list())
        with tf.variable_scope('g_dec'):
            if nonlin == 'segan':
                #original segan
                h = mf_nn_deconv(
                    h,
                    output_dim=2**(num_filters - i - 1),
                    filter_width=filter_width,
                    name='nn_deconv_{}'.format(i),
                    dilation=4,
                    init=tf.truncated_normal_initializer(stddev=0.02))
                if apply_BN:
                    h = batch_norm(h, 'batch_norm_{}'.format(i))
                if activation == 'leakyrelu':
                    h = leakyrelu(h)
                else:
                    with tf.variable_scope('g_dec'):
                        h, _ = prelu(h, name='prelu_{}'.format(i))
            else:
                # Wavenet gated convolutions
                h = gated_deconv(
                    h,
                    filter_width=filter_width,
                    name='gated_deconv_{}'.format(i),
                    dilation=2,
                    init=tf.truncated_normal_initializer(stddev=0.02))

    # making h of size [num_batch, input_dim]
    h = tf.squeeze(h, axis=-1)
    return h
Ejemplo n.º 5
0
def encoder(x, comp_ratio, activation='prelu'):
    input_dim = x.shape[-1]  #x.get_shape().as_list()[-1]
    num_filters = int(np.log2(comp_ratio))
    h = x
    if apply_BN:
        h = batch_norm(h, 'enc_batch_norm_{}'.format(0))
    for i in range(num_filters):
        print('enc_layer_number = ', i)
        print('h_dim= ', h.get_shape().as_list())
        with tf.variable_scope('g_enc'):
            h = mf_downconv(h,
                            output_dim=2**i,
                            stride=4,
                            filter_width=filter_width,
                            name='downconv_{}'.format(i))
            if apply_BN:
                h = batch_norm(h, 'batch_norm_{}'.format(i))
        if activation == 'leakyrelu':
            h = leakyrelu(h)
        else:
            with tf.variable_scope('g_enc'):
                h, _ = prelu(h, name='prelu_{}'.format(i))
    return h
Ejemplo n.º 6
0
def encoder(x, activation='prelu'):
    num_filters = int(np.log2(comp_ratio))
    h = x
    hs = []
    for i in range(num_filters):
        print('enc_layer_number = ', i)
        hs.append(h)
        with tf.variable_scope('g_enc'):
            h = downconv(h,
                         filter_width=filter_width,
                         name='downconv_{}'.format(i))
            h = batch_norm(h, 'batch_norm_{}'.format(i))
        if activation == 'leakyrelu':
            h = leakyrelu(h)
        else:
            with tf.variable_scope('g_enc'):
                h, _ = prelu(h, name='prelu_{}'.format(i))
    return h, hs
Ejemplo n.º 7
0
def side_net(hs, mode):
    #    input_dim = hs[0].get_shape().as_list()[-1]
    num_filters = len(hs)
    #    code_lengths = input_dim/50 * np.power(2, range(num_filters,0,-1))
    #    print('code_lengths', code_lengths)
    ps_q = []
    for i in range(len(hs)):
        with tf.variable_scope('g_side_' + str(i)):
            h_temp = downconv(hs[i],
                              filter_width=filter_width /
                              (2**(num_filters - i)),
                              stride=2**(comp_ratio - i),
                              name='downconv')
            #            h_temp = downconv(hs[i], filter_width=filter_width, stride=2,
            #                         name='downconv')
            h_temp = batch_norm(h_temp, 'batch_norm_{}'.format(i))
            ps_q.append(binary_quantizer(h_temp, mode))
    return ps_q