Example #1
0
    else:
        #embedding = tf.get_variable('embedding', [vocab_size, embedding_size], trainable=False)
        embedding = tf.get_variable('embedding', initializer = emb, trainable = finetune_emb)
    
    X_embed = tf.nn.embedding_lookup(embedding, X) # None, doc_s, sen_s, embed_s
'''

is_training = True
#with tf.device('/gpu:1'):
with tf.name_scope('sen_rnn'):
    X_embed_reshape = tf.reshape(X_emb, [-1, sen_len, embedding_size])
    sen_rnn_outputs, sen_rnn_states = rnn_layer.bi_rnn(X_embed_reshape,
                                                       n_hidden=n_hidden,
                                                       seq_len=tf.reshape(
                                                           sen_seq_length,
                                                           [-1]),
                                                       n_layer=n_layer,
                                                       is_train=is_training,
                                                       keep_prob=keep_prob,
                                                       scope='sen_rnn_block')

with tf.name_scope('sen_attn'):
    sen_atten_out, sen_atten_w = attn_layer.atten_layer_project(
        sen_rnn_outputs,
        atten_size,
        n_layer=n_layer,
        l2reg=l2reg,
        seq_len=tf.reshape(sen_seq_length, [-1]),
        use_mask=use_mask,
        sen_CLS=sen_CLS,
        scope='sen_attn_block')
with tf.name_scope('embedding'):
    # no pretrained_emb
    if pretrained_emb is False:
        embedding = tf.Variable(tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0), 
                                trainable = True)
    # load pretrained_emb
    ## see the backup: how to deal with too large emb
    else:
        #embedding = tf.get_variable('embedding', [vocab_size, embedding_size], trainable=False)
        embedding = tf.get_variable('embedding', initializer = emb, trainable = finetune_emb)
    
    X_embed = tf.nn.embedding_lookup(embedding, X) # None, doc_s, sen_s, embed_s


with tf.name_scope('rnn_layer'):
    rnn_outputs, rnn_states = rnn_layer.bi_rnn(X_embed, n_hidden = n_hidden, seq_len = seq_length, n_layer = n_layer, is_train = is_training, 
                                               keep_prob = keep_prob) #### need seq_length??
    
with tf.name_scope('attention_layer'):
    atten_out, soft_atten_weights = attn_layer.atten_layer_project(rnn_outputs, atten_size, n_layer = n_layer, 
                                                                   l2reg = l2reg, seq_len = seq_length, use_mask = use_mask)
    # Dropout
    atten_out_drop = tf.nn.dropout(atten_out, keep_prob)
    
    
with tf.name_scope('logits'):
    optimizer, logits, cost, accuracy, Y_proba = model.clf_train_op(atten_out_drop, y, ac_fn = tf.nn.relu, 
                                                                    lr = lr, l2reg = l2reg, n_class = n_class) 
    init, saver = model.initializer()