Пример #1
0
def senEncode_softmax(s_senEncode, w_varible, b_varible, n_feature, doc_len):
    s = tf.reshape(s_senEncode, [-1, n_feature])
    s = tf.nn.dropout(s, keep_prob=FLAGS.keep_prob2)
    w = func.get_weight_varible(w_varible, [n_feature, FLAGS.n_class])
    b = func.get_weight_varible(b_varible, [FLAGS.n_class])
    pred = tf.matmul(s, w) + b
    pred *= func.getmask(doc_len, FLAGS.max_doc_len, [-1, 1])
    pred = tf.nn.softmax(pred)
    pred = tf.reshape(pred, [-1, FLAGS.max_doc_len, FLAGS.n_class])
    reg = tf.nn.l2_loss(w) + tf.nn.l2_loss(b)
    return pred, reg
Пример #2
0
    def get_s(inputs, name):
        with tf.name_scope('word_encode'):
            inputs = RNN(inputs, sen_len, n_hidden=FLAGS.n_hidden, scope=FLAGS.scope + 'word_layer'  + name)
        inputs = tf.reshape(inputs, [-1, FLAGS.max_sen_len, sh2])

        with tf.name_scope('word_attention'):
            w1 = func.get_weight_varible('word_att_w1'+ name, [sh2, sh2])
            b1 = func.get_weight_varible('word_att_b1'+ name, [sh2])
            w2 = func.get_weight_varible('word_att_w2'+ name, [sh2, 1])
            senEncode = func.att_var(inputs, sen_len, w1, b1, w2)
        senEncode = tf.reshape(senEncode, [-1, FLAGS.max_doc_len, sh2])
        return senEncode
Пример #3
0
def build_model(word_embedding,
                pos_embedding,
                x,
                word_dis,
                sen_len,
                doc_len,
                keep_prob1,
                keep_prob2,
                RNN=func.biLSTM):
    x = tf.nn.embedding_lookup(word_embedding, x)
    inputs = tf.reshape(x, [-1, FLAGS.max_sen_len, FLAGS.embedding_dim])
    word_dis = tf.nn.embedding_lookup(pos_embedding, word_dis)
    word_dis = tf.reshape(word_dis,
                          [-1, FLAGS.max_sen_len, FLAGS.embedding_dim_pos])
    inputs = tf.concat([inputs, word_dis],
                       axis=2)  # [-1, max_sen_len, dim + dim_pos]
    inputs = tf.nn.dropout(inputs, keep_prob=keep_prob1)
    sen_len = tf.reshape(sen_len, [-1])
    with tf.name_scope('word_encode'):
        lstm_wordEncode = RNN(inputs,
                              sen_len,
                              n_hidden=FLAGS.n_hidden,
                              scope=FLAGS.scope + 'word_layer')
    lstm_wordEncode = tf.reshape(lstm_wordEncode,
                                 [-1, FLAGS.max_sen_len, 2 * FLAGS.n_hidden])
    with tf.name_scope('word_attention'):
        sh2 = 2 * FLAGS.n_hidden
        w1 = func.get_weight_varible('word_att_w1', [sh2, sh2])
        b1 = func.get_weight_varible('word_att_b1', [sh2])
        w2 = func.get_weight_varible('word_att_w2', [sh2, 1])
        s_wordEncode = func.att_var(lstm_wordEncode, sen_len, w1, b1, w2)
    s_senEncode = tf.reshape(s_wordEncode,
                             [-1, FLAGS.max_doc_len, 2 * FLAGS.n_hidden])

    for i in range(FLAGS.clause_layer):
        s_senEncode = RNN(s_senEncode,
                          doc_len,
                          n_hidden=FLAGS.n_hidden,
                          scope=FLAGS.scope + 'sentence_layer' + str(i))
    n_feature = 2 * FLAGS.n_hidden

    with tf.name_scope('softmax'):
        s = tf.reshape(s_senEncode, [-1, n_feature])
        s = tf.nn.dropout(s, keep_prob=keep_prob2)
        w = func.get_weight_varible('softmax_w', [n_feature, FLAGS.n_class])
        b = func.get_weight_varible('softmax_b', [FLAGS.n_class])
        pred = tf.matmul(s, w) + b
        pred = tf.nn.softmax(pred)
        pred = tf.reshape(pred, [-1, FLAGS.max_doc_len, FLAGS.n_class],
                          name="pred")
    reg = tf.nn.l2_loss(w) + tf.nn.l2_loss(b)
    return pred, reg
Пример #4
0
 def get_s(inputs, name):  #xianzai的inputs 不是文档级 batchsize级别的
     with tf.name_scope('word_encode'):
         inputs = RNN(inputs,
                      sen_len,
                      n_hidden=FLAGS.n_hidden,
                      scope=FLAGS.scope + 'word_layer' + name)  #30 200
     with tf.name_scope('word_attention'):
         sh2 = 2 * FLAGS.n_hidden
         w1 = get_weight_varible('word_att_w1' + name, [sh2, sh2])
         b1 = get_weight_varible('word_att_b1' + name, [sh2])
         w2 = get_weight_varible('word_att_w2' + name, [sh2, 1])
         s = att_var(inputs, sen_len, w1, b1, w2)  #(?,200)
     s = tf.reshape(
         s, [-1, FLAGS.max_doc_len, 2 * FLAGS.n_hidden])  #(?,75,200)
     return s
Пример #5
0
def build_model(x,
                sen_len,
                doc_len,
                word_dis,
                word_embedding,
                pos_embedding,
                keep_prob1,
                keep_prob2,
                RNN=func.biLSTM):
    x = tf.nn.embedding_lookup(word_embedding, x)  #选取wordembedding中x对应的元素
    inputs = tf.reshape(x, [-1, FLAGS.max_sen_len, FLAGS.embedding_dim])
    # print("word_dis:{}".format(word_dis))
    word_dis = tf.nn.embedding_lookup(pos_embedding, word_dis)
    sh2 = 2 * FLAGS.n_hidden

    inputs = tf.nn.dropout(inputs, keep_prob=keep_prob1)
    sen_len = tf.reshape(sen_len, [-1])
    # print("sen_len:{}".format(sen_len))
    with tf.name_scope('word_encode'):
        wordEncode = RNN(inputs,
                         sen_len,
                         n_hidden=FLAGS.n_hidden,
                         scope=FLAGS.scope + 'word_layer')
    wordEncode = tf.reshape(wordEncode, [-1, FLAGS.max_sen_len, sh2])

    with tf.name_scope('attention'):
        w1 = func.get_weight_varible('word_att_w1', [sh2, sh2])
        b1 = func.get_weight_varible('word_att_b1', [sh2])
        w2 = func.get_weight_varible('word_att_w2', [sh2, 1])
        senEncode = func.att_var(wordEncode, sen_len, w1, b1, w2)
    senEncode = tf.reshape(senEncode, [-1, FLAGS.max_doc_len, sh2])

    word_dis = tf.reshape(word_dis[:, :, 0, :],
                          [-1, FLAGS.max_doc_len, FLAGS.embedding_dim_pos])

    n_feature = 2 * FLAGS.n_hidden + FLAGS.embedding_dim_pos
    out_units = 2 * FLAGS.n_hidden
    for i in range(1, FLAGS.n_layers):
        senEncode_dis = tf.concat([senEncode, word_dis], axis=2)  # 距离拼在子句上
        senEncode = trans_func(senEncode_dis, senEncode, n_feature, out_units,
                               'layer' + str(i))

    pred, reg = senEncode_softmax(senEncode, 'softmax_w', 'softmax_b',
                                  out_units, doc_len)
    return pred, reg
Пример #6
0
def build_model(word_embedding,
                pos_embedding,
                word_dis,
                x,
                sen_len,
                doc_len,
                keep_prob1,
                keep_prob2,
                RNN=func.biLSTM):
    x = tf.nn.embedding_lookup(word_embedding, x)
    inputs = tf.reshape(x, [-1, FLAGS.max_sen_len, FLAGS.embedding_dim])
    inputs = tf.nn.dropout(inputs, keep_prob=keep_prob1)
    word_dis = tf.nn.embedding_lookup(pos_embedding, word_dis)
    word_dis = tf.reshape(word_dis[:, :, 0, :],
                          [-1, FLAGS.max_doc_len, FLAGS.embedding_dim])
    sen_len = tf.reshape(sen_len, [-1])
    with tf.name_scope('word_encode'):
        lstm_wordEncode = RNN(inputs,
                              sen_len,
                              n_hidden=FLAGS.n_hidden,
                              scope=FLAGS.scope + 'word_layer')
    lstm_wordEncode = tf.reshape(lstm_wordEncode,
                                 [-1, FLAGS.max_sen_len, 2 * FLAGS.n_hidden])
    with tf.name_scope('word_attention'):
        sh2 = 2 * FLAGS.n_hidden
        w1 = func.get_weight_varible('word_att_w1', [sh2, sh2])
        b1 = func.get_weight_varible('word_att_b1', [sh2])
        w2 = func.get_weight_varible('word_att_w2', [sh2, 1])
        s_wordEncode = func.att_var(lstm_wordEncode, sen_len, w1, b1, w2)
    senEncode = tf.reshape(s_wordEncode,
                           [-1, FLAGS.max_doc_len, 2 * FLAGS.n_hidden])

    n_feature = 2 * FLAGS.n_hidden
    out_units = 2 * FLAGS.n_hidden
    for i in range(1, FLAGS.n_layers):
        senEncode_dis = senEncode + word_dis
        senEncode = trans_func(senEncode_dis, senEncode, n_feature, out_units,
                               'layer' + str(i))

    pred, reg = senEncode_softmax(senEncode, 'softmax_w', 'softmax_b',
                                  out_units, doc_len)
    return pred, reg
Пример #7
0
def build_model(x,
                sen_len,
                doc_len,
                word_dis,
                word_embedding,
                pos_embedding,
                keep_prob1,
                keep_prob2,
                RNN=func.biLSTM):
    x = tf.nn.embedding_lookup(word_embedding, x)
    inputs = tf.reshape(x, [-1, FLAGS.max_sen_len, FLAGS.embedding_dim])
    word_dis = tf.nn.embedding_lookup(pos_embedding, word_dis)
    sh2 = 2 * FLAGS.n_hidden

    inputs = tf.nn.dropout(inputs, keep_prob=keep_prob1)
    sen_len = tf.reshape(sen_len, [-1])
    with tf.name_scope('word_encode'):
        wordEncode = RNN(inputs,
                         sen_len,
                         n_hidden=FLAGS.n_hidden,
                         scope=FLAGS.scope + 'word_layer')
    wordEncode = tf.reshape(wordEncode, [-1, FLAGS.max_sen_len, sh2])

    with tf.name_scope('attention'):
        w1 = func.get_weight_varible('word_att_w1', [sh2, sh2])
        b1 = func.get_weight_varible('word_att_b1', [sh2])
        w2 = func.get_weight_varible('word_att_w2', [sh2, 1])
        senEncode = func.att_var(wordEncode, sen_len, w1, b1, w2)
    senEncode = tf.reshape(senEncode, [-1, FLAGS.max_doc_len, sh2])
    word_dis = tf.reshape(word_dis[:, :, 0, :],
                          [-1, FLAGS.max_doc_len, FLAGS.embedding_dim_pos])
    senEncode_dis = tf.concat([senEncode, word_dis], axis=2)  # 距离拼在子句上

    n_feature = 2 * FLAGS.n_hidden + FLAGS.embedding_dim_pos
    out_units = 2 * FLAGS.n_hidden
    batch = tf.shape(senEncode)[0]
    pred_zeros = tf.zeros(([batch, FLAGS.max_doc_len, FLAGS.max_doc_len]))
    pred_ones = tf.ones_like(pred_zeros)
    pred_two = tf.fill([batch, FLAGS.max_doc_len, FLAGS.max_doc_len], 2.)
    matrix = tf.reshape((1 - tf.eye(FLAGS.max_doc_len)),
                        [1, FLAGS.max_doc_len, FLAGS.max_doc_len]) + pred_zeros
    pred_assist_list, reg_assist_list, pred_assist_label_list = [], [], []
    if FLAGS.n_layers > 1:
        '''*******GL1******'''
        senEncode = trans_func(senEncode_dis, senEncode, n_feature, out_units,
                               'layer1')
        pred_assist, reg_assist = senEncode_softmax(senEncode,
                                                    'softmax_assist_w1',
                                                    'softmax_assist_b1',
                                                    out_units, doc_len)

        pred_assist_label = tf.cast(
            tf.reshape(tf.argmax(pred_assist, axis=2),
                       [-1, 1, FLAGS.max_doc_len]), tf.float32)
        # masked the prediction at the current position
        pred_assist_label = pred_assist_label * pred_two - pred_ones
        pred_assist_label = (pred_assist_label + pred_zeros) * matrix
        # feedforward
        w_for = func.get_weight_varible('w_for1',
                                        [FLAGS.max_doc_len, FLAGS.max_doc_len])
        b_for = func.get_weight_varible('b_for1', [FLAGS.max_doc_len])
        pred_assist_label = tf.tanh(
            tf.matmul(tf.reshape(pred_assist_label, [-1, FLAGS.max_doc_len]),
                      w_for) + b_for)
        pred_assist_label = tf.reshape(
            pred_assist_label, [batch, FLAGS.max_doc_len, FLAGS.max_doc_len])

        pred_assist_label_list.append(pred_assist_label)
        pred_assist_list.append(pred_assist)
        reg_assist_list.append(reg_assist)
    '''*******GL n******'''
    for i in range(2, FLAGS.n_layers):
        senEncode_assist = tf.concat([senEncode, pred_assist_label], axis=2)
        n_feature = out_units + FLAGS.max_doc_len
        senEncode = trans_func(senEncode_assist, senEncode, n_feature,
                               out_units, 'layer' + str(i))

        pred_assist, reg_assist = senEncode_softmax(
            senEncode, 'softmax_assist_w' + str(i),
            'softmax_assist_b' + str(i), out_units, doc_len)
        pred_assist_label = tf.cast(
            tf.reshape(tf.argmax(pred_assist, axis=2),
                       [-1, 1, FLAGS.max_doc_len]), tf.float32)
        # masked the prediction at the current position
        pred_assist_label = pred_assist_label * pred_two - pred_ones
        pred_assist_label = (pred_assist_label + pred_zeros) * matrix
        # feedforward
        w_for = func.get_weight_varible('w_for' + str(i),
                                        [FLAGS.max_doc_len, FLAGS.max_doc_len])
        b_for = func.get_weight_varible('b_for' + str(i), [FLAGS.max_doc_len])
        pred_assist_label = tf.tanh(
            tf.matmul(tf.reshape(pred_assist_label, [-1, FLAGS.max_doc_len]),
                      w_for) + b_for)
        pred_assist_label = tf.reshape(
            pred_assist_label, [batch, FLAGS.max_doc_len, FLAGS.max_doc_len])

        pred_assist_label_list.append(pred_assist_label)
        pred_assist_label = tf.divide(
            tf.reduce_sum(pred_assist_label_list, axis=0), i)

        pred_assist_list.append(pred_assist)
        reg_assist_list.append(reg_assist)
    '''*******Main******'''
    if FLAGS.n_layers > 1:
        senEncode_dis_GL = tf.concat([senEncode, pred_assist_label], axis=2)
        n_feature = out_units + FLAGS.max_doc_len
        senEncode_main = trans_func(senEncode_dis_GL, senEncode, n_feature,
                                    out_units, 'block_main')
    else:
        senEncode_main = trans_func(senEncode_dis, senEncode, n_feature,
                                    out_units, 'block_main')
    pred, reg = senEncode_softmax(senEncode_main, 'softmax_w', 'softmax_b',
                                  out_units, doc_len)
    return pred, reg, pred_assist_list, reg_assist_list
Пример #8
0
def build_model(x, sen_len, doc_len, word_dis, word_embedding, pos_embedding, keep_prob1, keep_prob2, RNN=func.biLSTM):
    x = tf.nn.embedding_lookup(word_embedding, x)#选取wordembedding中x对应的元素
    inputs = tf.reshape(x, [-1, FLAGS.max_sen_len, FLAGS.embedding_dim])
    sh2 = 2 * FLAGS.n_hidden
    inputs = tf.nn.dropout(inputs, keep_prob=keep_prob1)
    sen_len = tf.reshape(sen_len, [-1])
    # print("sen_len:{}

    def get_s(inputs, name):
        with tf.name_scope('word_encode'):
            inputs = RNN(inputs, sen_len, n_hidden=FLAGS.n_hidden, scope=FLAGS.scope + 'word_layer'  + name)
        inputs = tf.reshape(inputs, [-1, FLAGS.max_sen_len, sh2])

        with tf.name_scope('word_attention'):
            w1 = func.get_weight_varible('word_att_w1'+ name, [sh2, sh2])
            b1 = func.get_weight_varible('word_att_b1'+ name, [sh2])
            w2 = func.get_weight_varible('word_att_w2'+ name, [sh2, 1])
            senEncode = func.att_var(inputs, sen_len, w1, b1, w2)
        senEncode = tf.reshape(senEncode, [-1, FLAGS.max_doc_len, sh2])
        return senEncode

    s = get_s(inputs, name='pos_word_encode')
    s = RNN(s, doc_len, n_hidden=FLAGS.n_hidden, scope=FLAGS.scope + 'pos_sentence_layer')

    with tf.name_scope('sequence_prediction'):
        s1 = tf.reshape(s, [-1, 2 * FLAGS.n_hidden])
        s1 = tf.nn.dropout(s1, keep_prob=keep_prob2)

        w_pos = func.get_weight_varible('softmax_w_pos', [2 * FLAGS.n_hidden, FLAGS.n_class])
        b_pos = func.get_weight_varible('softmax_b_pos', [FLAGS.n_class])
        pred_pos = tf.nn.softmax(tf.matmul(s1, w_pos) + b_pos)
        pred_pos = tf.reshape(pred_pos, [-1, FLAGS.max_doc_len, FLAGS.n_class])

    # 形成相对位置向量
    word_dis = tf.reshape(word_dis[:, :, 0], [-1, FLAGS.max_doc_len]) # shape=(?, 75)
    pred_y_pos_op = tf.argmax(pred_pos, 2)  # shape=(?, 75)
    cla_ind = tf.argmax(pred_y_pos_op, 1)# shape=(?,)
    cla_ind = tf.reshape(tf.to_int32(cla_ind), [-1, 1])
    cla_ind = tf.tile(cla_ind, [1,75])# shape=(?, 75)
    m_69 = 69 * tf.ones_like(cla_ind)
    cla_ind =  tf.subtract(cla_ind , m_69)
    cla_ind_add_1 = tf.multiply(cla_ind , word_dis)
    i = tf.constant([x for x in range(0,FLAGS.max_doc_len)], dtype=tf.int32)
    i = tf.reshape(i, [1, 75])
    cla_ind_add_2 = tf.multiply(i, word_dis)# shape=(?, 75)
    pos = tf.subtract(cla_ind_add_2 , cla_ind_add_1)
    word_dis = tf.nn.embedding_lookup(pos_embedding, pos)  # 选取pos_embedding中word_dis对应的元素

    senEncode = get_s(inputs, name='cause_word_encode')
    senEncode_dis = tf.concat([senEncode, word_dis], axis=2)  # 距离拼在子句上

    n_feature = 2 * FLAGS.n_hidden + FLAGS.embedding_dim_pos
    out_units = 2 * FLAGS.n_hidden
    batch = tf.shape(senEncode)[0]
    pred_zeros = tf.zeros(([batch, FLAGS.max_doc_len, FLAGS.max_doc_len]))
    pred_ones = tf.ones_like(pred_zeros)
    pred_two = tf.fill([batch, FLAGS.max_doc_len, FLAGS.max_doc_len], 2.)
    matrix = tf.reshape((1 - tf.eye(FLAGS.max_doc_len)), [1, FLAGS.max_doc_len, FLAGS.max_doc_len]) + pred_zeros
    pred_assist_list, reg_assist_list, pred_assist_label_list = [], [], []
    if FLAGS.n_layers > 1:
        '''*******GL1******'''
        senEncode = trans_func(senEncode_dis, senEncode, n_feature, out_units, 'layer1')
        pred_assist, reg_assist = senEncode_softmax(senEncode, 'softmax_assist_w1', 'softmax_assist_b1', out_units, doc_len)
        reg_assist += tf.nn.l2_loss(w_pos) + tf.nn.l2_loss(b_pos)

        pred_assist_label = tf.cast(tf.reshape(tf.argmax(pred_assist, axis=2), [-1, 1, FLAGS.max_doc_len]), tf.float32)
        # masked the prediction at the current position
        pred_assist_label = pred_assist_label * pred_two - pred_ones
        pred_assist_label = (pred_assist_label + pred_zeros) * matrix
        # feedforward
        w_for = func.get_weight_varible('w_for1', [FLAGS.max_doc_len, FLAGS.max_doc_len])
        b_for = func.get_weight_varible('b_for1', [FLAGS.max_doc_len])
        pred_assist_label = tf.tanh(tf.matmul(tf.reshape(pred_assist_label, [-1, FLAGS.max_doc_len]), w_for) + b_for)
        pred_assist_label = tf.reshape(pred_assist_label, [batch, FLAGS.max_doc_len, FLAGS.max_doc_len])

        pred_assist_label_list.append(pred_assist_label)
        pred_assist_list.append(pred_assist)
        reg_assist_list.append(reg_assist)
    '''*******GL n******'''
    for i in range(2, FLAGS.n_layers):
        senEncode_assist = tf.concat([senEncode, pred_assist_label], axis=2)
        n_feature = out_units + FLAGS.max_doc_len
        senEncode = trans_func(senEncode_assist, senEncode, n_feature, out_units, 'layer' + str(i))

        pred_assist, reg_assist = senEncode_softmax(senEncode, 'softmax_assist_w' + str(i), 'softmax_assist_b' + str(i), out_units, doc_len)

        pred_assist_label = tf.cast(tf.reshape(tf.argmax(pred_assist, axis=2), [-1, 1, FLAGS.max_doc_len]), tf.float32)
        # masked the prediction at the current position
        pred_assist_label = pred_assist_label * pred_two - pred_ones
        pred_assist_label = (pred_assist_label + pred_zeros) * matrix
        # feedforward
        w_for = func.get_weight_varible('w_for' + str(i), [FLAGS.max_doc_len, FLAGS.max_doc_len])
        b_for = func.get_weight_varible('b_for' + str(i), [FLAGS.max_doc_len])
        pred_assist_label = tf.tanh(tf.matmul(tf.reshape(pred_assist_label, [-1, FLAGS.max_doc_len]), w_for) + b_for)
        pred_assist_label = tf.reshape(pred_assist_label, [batch, FLAGS.max_doc_len, FLAGS.max_doc_len])

        pred_assist_label_list.append(pred_assist_label)
        pred_assist_label = tf.divide(tf.reduce_sum(pred_assist_label_list, axis=0), i)

        pred_assist_list.append(pred_assist)
        reg_assist_list.append(reg_assist)

    '''*******Main******'''
    with tf.name_scope('main'):
        if FLAGS.n_layers > 1:
            senEncode_dis_GL = tf.concat([senEncode, pred_assist_label], axis=2)
            n_feature = out_units + FLAGS.max_doc_len
            senEncode_main = trans_func(senEncode_dis_GL, senEncode, n_feature, out_units, 'block_main')
        else:
            senEncode_main = trans_func(senEncode_dis, senEncode, n_feature, out_units, 'block_main')
        pred, reg = senEncode_softmax(senEncode_main, 'softmax_w', 'softmax_b', out_units, doc_len)
        reg += tf.nn.l2_loss(w_pos) + tf.nn.l2_loss(b_pos)
    return  pos, pred_pos, pred, reg, pred_assist_list, reg_assist_list
Пример #9
0
Файл: ecjc.py Проект: LeMei/ecjd
def build_model(x,
                sen_len,
                doc_len,
                word_embedding,
                clause_position,
                embedding_pos,
                keep_prob1,
                keep_prob2,
                RNN=func.biLSTM):
    x = tf.nn.embedding_lookup(word_embedding, x)
    inputs = tf.reshape(x, [-1, FLAGS.max_sen_len, FLAGS.embedding_dim])
    n_hidden = 2 * FLAGS.n_hidden

    inputs = tf.nn.dropout(inputs, keep_prob=keep_prob1)
    sen_len = tf.reshape(sen_len, [-1])
    with tf.name_scope('word_encode'):
        wordEncode = RNN(inputs,
                         sen_len,
                         n_hidden=FLAGS.n_hidden,
                         scope=FLAGS.scope + 'word_layer')
    wordEncode = tf.reshape(wordEncode, [-1, FLAGS.max_sen_len, n_hidden])

    with tf.name_scope('attention'):
        w1 = func.get_weight_varible('word_att_w1', [n_hidden, n_hidden])
        b1 = func.get_weight_varible('word_att_b1', [n_hidden])
        w2 = func.get_weight_varible('word_att_w2', [n_hidden, 1])
        senEncode = func.att_var(wordEncode, sen_len, w1, b1, w2)
        # (32*75,200)
    senEncode = tf.reshape(senEncode,
                           [-1, FLAGS.max_doc_len, n_hidden])  #(32, 75, 200)

    n_feature = 2 * FLAGS.n_hidden
    out_units = 2 * FLAGS.n_hidden  #200
    batch = tf.shape(senEncode)[0]  #32
    pred_zeros = tf.zeros(([batch, FLAGS.max_doc_len,
                            FLAGS.max_doc_len]))  #(32,75,75)
    matrix = tf.reshape(
        (1 - tf.eye(FLAGS.max_doc_len)),
        [1, FLAGS.max_doc_len, FLAGS.max_doc_len]) + pred_zeros  # 构造单位矩阵
    pred_emotion_assist_list, reg_emotion_assist_list, pred_emotion_assist_label_list = [], [], []
    pred_cause_assist_list, reg_cause_assist_list, pred_cause_assist_label_list = [], [], []

    if FLAGS.assist_n_layers > 1:
        '''******* emotion layer 1******'''
        emotion_senEncode = trans_func(senEncode, senEncode, n_feature,
                                       out_units,
                                       'emotion_layer1')  #(32,75,200)
        pred_emotion_assist, reg_emotion_assist = senEncode_emotion_softmax(
            emotion_senEncode, 'softmax_assist_w1', 'softmax_assist_b1',
            out_units, doc_len)
        #(32, 75,2)
        pred_emotion_assist_label = tf.cast(
            tf.reshape(tf.argmax(pred_emotion_assist, axis=2),
                       [-1, 1, FLAGS.max_doc_len]), tf.float32)
        #(32, 75, 1)=>(32, 1, 75)

        pred_emotion_assist_position = tf.cast(
            tf.reshape(tf.argmax(pred_emotion_assist_label, axis=2), [-1, 1]) +
            1, tf.int32)  #emotion clause的所在位置,辅助clause的提取
        pred_clause_relative_position = tf.cast(
            tf.reshape(clause_position - pred_emotion_assist_position + 69,
                       [-1, FLAGS.max_doc_len]),
            tf.float32)  #基于emotion clause的相对位置 (32, 1, 75)
        pred_clause_relative_position *= func.getmask(doc_len,
                                                      FLAGS.max_doc_len,
                                                      [-1, FLAGS.max_doc_len])
        pred_clause_relative_position = tf.cast(pred_clause_relative_position,
                                                tf.int32)
        pred_clause_rep_embed = tf.nn.embedding_lookup(
            embedding_pos, pred_clause_relative_position)  #(32, 75, 50)

        pred_emotion_assist_label = (pred_emotion_assist_label +
                                     pred_zeros) * matrix  # 屏蔽预测为1的标签
        #matrix=>(32, 75, 75)
        #pred_assist_label=>(32, 75, 75)
        pred_emotion_assist_label_list.append(pred_emotion_assist_label)
        pred_emotion_assist_list.append(pred_emotion_assist)
        reg_emotion_assist_list.append(reg_emotion_assist)
        '''******* cause layer 1******'''
        cause_senEncode_assist = tf.concat([senEncode, pred_clause_rep_embed],
                                           axis=2)
        n_feature = out_units + FLAGS.embedding_dim_pos
        cause_senEncode = trans_func(cause_senEncode_assist, senEncode,
                                     n_feature, out_units, 'cause_layer')

        pred_cause_assist, reg_cause_assist = senEncode_cause_softmax(
            cause_senEncode, 'cause_softmax_assist_w1',
            'cause_softmax_assist_b1', out_units, doc_len)
        # (32, 75,2)
        pred_cause_assist_label = tf.cast(
            tf.reshape(tf.argmax(pred_cause_assist, axis=2),
                       [-1, 1, FLAGS.max_doc_len]), tf.float32)
        # (32, 75, 1)=>(32, 1, 75)
        pred_cause_assist_label = (pred_cause_assist_label +
                                   pred_zeros) * matrix  # 屏蔽预测为1的标签
        # matrix=>(32, 75, 75)
        # pred_assist_label=>(32, 75, 75)
        pred_cause_assist_label_list.append(pred_cause_assist_label)
        pred_cause_assist_list.append(pred_cause_assist)
        reg_cause_assist_list.append(reg_cause_assist)

    for i in range(2, FLAGS.assist_n_layers):
        emotion_senEncode_assist = tf.concat([
            emotion_senEncode, pred_emotion_assist_label,
            pred_cause_assist_label
        ],
                                             axis=2)  # (32, 75, 275)
        n_feature = out_units + 2 * FLAGS.max_doc_len  # 275
        emotion_senEncode = trans_func(emotion_senEncode_assist,
                                       emotion_senEncode, n_feature, out_units,
                                       'emotion_layer' + str(i))  # (32,75,200)

        pred_emotion_assist, reg_emotion_assist = senEncode_emotion_softmax(
            emotion_senEncode, 'emotion_softmax_assist_w' + str(i),
            'emotion_softmax_assist_b' + str(i), out_units, doc_len)
        pred_emotion_assist_label = tf.cast(
            tf.reshape(tf.argmax(pred_emotion_assist, axis=2),
                       [-1, 1, FLAGS.max_doc_len]), tf.float32)

        # pred_emotion_assist_position = tf.cast(tf.reshape(tf.argmax(pred_emotion_assist_label, axis=2), [-1, 1]),
        #                                        tf.float32) + 1  # emotion clause的所在位置,辅助clause的提取
        # pred_clause_relative_position = tf.reshape(clause_position - pred_emotion_assist_position,
        #                                            [-1, FLAGS.max_doc_len])  # 基于emotion clause的相对位置 (32, 1, 75)
        # pred_clause_relative_position *= func.getmask(doc_len, FLAGS.max_doc_len, [-1, FLAGS.max_doc_len])
        # pred_clause_rep_embed = tf.nn.embedding_lookup(embedding_pos, pred_clause_relative_position)  # (32, 75, 50)

        pred_emotion_assist_label = (pred_emotion_assist_label +
                                     pred_zeros) * matrix
        pred_emotion_assist_label_list.append(pred_emotion_assist_label)

        pred_emotion_assist_label = tf.reduce_sum(
            pred_emotion_assist_label_list, axis=0)
        # 不同layer加和 pred_assist_label=>(32,75,75)

        pred_emotion_assist_list.append(pred_emotion_assist)
        reg_emotion_assist_list.append(reg_emotion_assist)

        cause_senEncode_assist = tf.concat([
            cause_senEncode, pred_cause_assist_label, pred_emotion_assist_label
        ],
                                           axis=2)  #(32, 75, 275)
        n_feature = out_units + 2 * FLAGS.max_doc_len  #275
        cause_senEncode = trans_func(cause_senEncode_assist, cause_senEncode,
                                     n_feature, out_units,
                                     'cause_layer' + str(i))  #(32,75,200)

        pred_cause_assist, reg_cause_assist = senEncode_cause_softmax(
            cause_senEncode, 'cause_softmax_assist_w' + str(i),
            'cause_softmax_assist_b' + str(i), out_units, doc_len)
        pred_cause_assist_label = tf.cast(
            tf.reshape(tf.argmax(pred_cause_assist, axis=2),
                       [-1, 1, FLAGS.max_doc_len]), tf.float32)
        pred_cause_assist_label = (pred_cause_assist_label +
                                   pred_zeros) * matrix
        pred_cause_assist_label_list.append(pred_cause_assist_label)

        pred_cause_assist_label = tf.reduce_sum(pred_cause_assist_label_list,
                                                axis=0)
        #不同layer加和 pred_assist_label=>(32,75,75)

        pred_cause_assist_list.append(pred_cause_assist)
        reg_cause_assist_list.append(reg_cause_assist)
    '''*******Main******'''

    if FLAGS.main_n_layers > 1:
        senEncode_main = tf.concat([emotion_senEncode, cause_senEncode],
                                   axis=2)
        n_feature = 2 * out_units
        senEncode_main = trans_func(senEncode_main, senEncode_main, n_feature,
                                    out_units, 'main_layer1')
        senEncode_main = tf.concat([
            senEncode_main, pred_emotion_assist_label, pred_cause_assist_label
        ],
                                   axis=2)
        n_feature = out_units + 2 * FLAGS.max_doc_len
        senEncode_main = trans_func(senEncode_main, senEncode_main, n_feature,
                                    out_units, 'main_layer2')
    else:
        senEncode_main = tf.concat([emotion_senEncode, cause_senEncode],
                                   axis=2)
        n_feature = 2 * out_units
        senEncode_main = trans_func(senEncode_main, senEncode_main, n_feature,
                                    out_units, 'main_layer1')
    pred, reg = senEncode_main_softmax(senEncode_main, 'softmax_w',
                                       'softmax_b', out_units, doc_len)

    return pred, reg, pred_emotion_assist_list, reg_emotion_assist_list, pred_cause_assist_list, reg_cause_assist_list