예제 #1
0
    def test_sample(self, word_list, sess):
        # generate sequence structure and input dictionary
        state, feed_dict = rnn.sequence_rnn(self.rnn_cell, word_list, self.word_model, self.first_train)

        H = tf.matmul(state, self.W) + self.B
        output = tf.nn.softmax(H)
        return sess.run(output, feed_dict=feed_dict)
예제 #2
0
    def train_sample(self, word_list, label, sess):
        # generate sequence structure and input dictionary
        state, feed_dict = rnn.sequence_rnn(self.rnn_cell, word_list, self.word_model, self.first_train)

        # init label vector
        label_vec = np.zeros((1, self.class_num))
        label_vec[0, label] = 1
        feed_dict[self.target] = label_vec

        # latest layer (softmax)
        #with tf.device('/cpu:0'):
        H = tf.matmul(state, self.W) + self.B
        output = tf.nn.softmax(H)
        cost = tf.reduce_sum(-tf.log(output+0.00001)*self.target)
        optimizer = tf.train.GradientDescentOptimizer(self.learn_rate).minimize(cost)

        if self.first_train:
            sess.run(tf.initialize_all_variables())
            self.first_train = False
        #print sess.run(output, feed_dict=feed_dict)
        sess.run(optimizer, feed_dict=feed_dict)