Esempio n. 1
0
def create_citation2(in_dim, em_dim):
    # Building 'penNet'
    input_layer = tflearn.input_data(shape=[
        None, in_dim + G_NUM + SUBC_NUM + C_NUM + SUBS_NUM + Citation_Dim
    ],
                                     name='input')
    input_p = input_layer[:, 0:in_dim]
    input_ss = input_layer[:, in_dim:in_dim + SUBS_NUM]
    input_c = input_layer[:, in_dim + SUBS_NUM:in_dim + SUBS_NUM + C_NUM]
    input_sc = input_layer[:, in_dim + SUBS_NUM + C_NUM:in_dim + SUBS_NUM +
                           C_NUM + SUBC_NUM]
    input_g = input_layer[:, in_dim + SUBS_NUM + C_NUM + SUBC_NUM:in_dim +
                          SUBS_NUM + C_NUM + SUBC_NUM + G_NUM]
    input_citation = input_layer[:,
                                 in_dim + SUBS_NUM + C_NUM + SUBC_NUM + G_NUM:]
    group_embedding = tf.Variable(tf.random_normal([G_NUM, em_dim]),
                                  name='group_embedding')
    subclass_embedding = tf.Variable(tf.random_normal([SUBC_NUM, em_dim]),
                                     name='subclass_embedding')
    class_embedding = tf.Variable(tf.random_normal([C_NUM, em_dim]),
                                  name='class_embedding')
    subsection_embedding = tf.Variable(tf.random_normal([SUBS_NUM, em_dim]),
                                       name='subsection_embedding')
    section_embedding = tf.Variable(tf.random_normal([SEC_NUM, em_dim]),
                                    name='section_embedding')
    # SG_output = _cat_weighted(X, subgroup_embedding, W_SG)
    textual_inf = tflearn.embedding(input_p,
                                    input_dim=110240,
                                    output_dim=128,
                                    name='word_embedding')
    textual_embedding = tflearn.lstm(textual_inf,
                                     em_dim,
                                     dropout=0.8,
                                     name='lstm_weight')
    network = tf.concat([textual_embedding, input_citation],
                        1)  # dimension=256=(d(t) + d(c))
    network = tflearn.fully_connected(network, em_dim, activation='softmax')
    # network = tflearn.bidirectional_rnn(network, tflearn.BasicLSTMCell(128), tflearn.BasicLSTMCell(128),
    #                                     name='bilstm_weight')
    # network = tflearn.dropout(network, 0.8)
    G_output = _cat_weighted2(network, group_embedding, input_g)
    SC_output = _cat_weighted2(G_output, subclass_embedding, input_sc)
    C_output = _cat_weighted2(SC_output, class_embedding, input_c)
    SS_output = _cat_weighted2(C_output, subsection_embedding, input_ss)
    network = tf.matmul(SS_output,
                        tf.transpose(section_embedding),
                        name='section_weight')
    # network = tflearn.fully_connected(network, SEC_NUM, activation='softmax')  # not sure
    network = tflearn.softmax(network)
    network = tflearn.regression(network,
                                 optimizer='adam',
                                 learning_rate=0.001,
                                 loss='categorical_crossentropy')
    return network
Esempio n. 2
0
    def create_critic_network(self, Scope):
        inputs = tf.placeholder(shape=[1, self.max_lenth], dtype=tf.int32, name="inputs")
        action = tf.placeholder(shape=[1, self.max_lenth], dtype=tf.int32, name="action")
        action_pos = tf.placeholder(shape=[1, None], dtype=tf.int32, name="action_pos")
        lenth = tf.placeholder(shape=[1], dtype=tf.int32, name="lenth")
        lenth_up = tf.placeholder(shape=[1], dtype=tf.int32, name="lenth_up")
       
        #Lower network
        if Scope[-1] == 'e':
            vec = tf.nn.embedding_lookup(self.wordvector, inputs)
            print "active"
        else:
            vec = tf.nn.embedding_lookup(self.target_wordvector, inputs)
            print "target"
        cell = LSTMCell(self.dim, initializer=self.init, state_is_tuple=False)
        self.state_size = cell.state_size
        actions = tf.to_float(action)
        h = cell.zero_state(1, tf.float32)
        embedding = []
        for step in range(self.max_lenth):
            with tf.variable_scope("Lower/"+Scope, reuse=True):
                o, h = cell(vec[:,step,:], h)
            embedding.append(o[0])
            h = h *(1.0 - actions[0,step])

        #Upper network
        embedding = tf.stack(embedding)
        embedding = tf.gather(embedding, action_pos, name="Upper_input")
        with tf.variable_scope("Upper", reuse=True):
            out, _ = tf.nn.bidirectional_dynamic_rnn(cell, cell, embedding, lenth_up, dtype=tf.float32, scope=Scope)

        if self.isAttention:
            out = tf.concat(out, 2)
            out = out[0,:,:]
            tmp = tflearn.fully_connected(out, self.dim, scope=Scope, name="att")
            tmp = tflearn.tanh(tmp)
            with tf.variable_scope(Scope):
                v_T = tf.get_variable("v_T", dtype=tf.float32, shape=[self.dim, 1], trainable=True)
            a = tflearn.softmax(tf.matmul(tmp,v_T))
            out = tf.reduce_sum(out * a, 0)
            out = tf.expand_dims(out, 0)
        else:
            #out = embedding[:, -1, :]
            out = tf.concat((out[0][:,-1,:], out[1][:,0,:]), 1)

        out = tflearn.dropout(out, self.keep_prob)
        out = tflearn.fully_connected(out, self.grained, scope=Scope+"/pred", name="get_pred")
        return inputs, action, action_pos, lenth, lenth_up, out
Esempio n. 3
0
def create_citation(in_dim, em_dim):
    # Building 'citation'
    input_layer = tflearn.input_data(
        shape=[None, in_dim + G_NUM + SUBC_NUM + C_NUM + SUBS_NUM],
        name='input')
    input_p = input_layer[:, 0:in_dim]
    input_ss = input_layer[:, in_dim:in_dim + SUBS_NUM]
    input_c = input_layer[:, in_dim + SUBS_NUM:in_dim + SUBS_NUM + C_NUM]
    input_sc = input_layer[:, in_dim + SUBS_NUM + C_NUM:in_dim + SUBS_NUM +
                           C_NUM + SUBC_NUM]
    input_g = input_layer[:, in_dim + SUBS_NUM + C_NUM + SUBC_NUM:]
    group_embedding = tf.Variable(tf.random_normal([G_NUM, em_dim]),
                                  name='group_embedding')
    subclass_embedding = tf.Variable(tf.random_normal([SUBC_NUM, em_dim]),
                                     name='subclass_embedding')
    class_embedding = tf.Variable(tf.random_normal([C_NUM, em_dim]),
                                  name='class_embedding')
    subsection_embedding = tf.Variable(tf.random_normal([SUBS_NUM, em_dim]),
                                       name='subsection_embedding')
    section_embedding = tf.Variable(tf.random_normal([SEC_NUM, em_dim]),
                                    name='section_embedding')
    # SG_output = _cat_weighted(X, subgroup_embedding, W_SG)
    network = tflearn.embedding(input_p,
                                input_dim=110240,
                                output_dim=128,
                                name='word_embedding')
    network = tflearn.lstm(network, em_dim, dropout=0.8, name='lstm_weight')
    G_output = _cat_weighted1(network, group_embedding, input_g)
    SC_output = _cat_weighted1(G_output, subclass_embedding, input_sc)
    C_output = _cat_weighted1(SC_output, class_embedding, input_c)
    SS_output = _cat_weighted1(C_output, subsection_embedding, input_ss)
    network = tf.matmul(SS_output,
                        tf.transpose(section_embedding),
                        name='section_weight')
    # network = tflearn.fully_connected(network, SEC_NUM, activation='softmax')  # not sure
    network = tflearn.softmax(network)
    network = tflearn.regression(network,
                                 optimizer='adam',
                                 learning_rate=0.001,
                                 loss='categorical_crossentropy')
    return network