Example #1
0
    def run(self):
        print "step 1 : load data ..."
        self.x1, self.y1, self.x2, self.y2 = load_data(self.input_file[0],
                                                       self.test_index[0],
                                                       self.n_class,
                                                       self.embedding_dim)

        # print self.x1[0]
        # print len(self.x2)
        # print len(self.y2)

        print "step 2 : scale data ..."
        self.scale()

        # # print self.x1[0]

        print "step 3 : logistic regression ..."
        lr = LogisticRegression()
        lr.fit(self.x1, self.y1)

        print "step 4 : predict ... "
        # print len(self.x2)
        y_pred_cause = lr.predict(self.x2)
        # print len(y_pred_cause)
        y_true_cause = self.y2
        # print len(self.y2)
        Emo_eval_cause(y_true_cause, y_pred_cause)
Example #2
0
    def run(self):
        #   x_i   tmp_x = np.zeros((max_doc_len, max_sen_len), dtype=np.int)
        inputs = tf.nn.embedding_lookup(self.word_embedding, self.x)
        # 去掉max_doc_len 这一层次结构
        # inputs 有空句子,传到doc层的时候,可以根据doc_len去除
        inputs = tf.reshape(inputs,
                            [-1, self.max_sentence_len, self.embedding_dim])
        # prob_doc, prob_sen = self.model(inputs)
        prob_sen = self.model(inputs, self.aspect_id)

        prob_sen_op = prob_sen

        #定义损失和l2正则化项
        with tf.name_scope('loss'):
            # doc层的损失
            # cost_doc = - tf.reduce_mean(self.y_doc * tf.log(prob_doc)) * 100
            # sen层的损失
            # prob_sen  [-1, self.max_doc_len, self.n_sentence_class]
            # y_sen
            # prob_sen 和 y_sen 中需要去掉[0, 0]的情况

            y_sen_for_loss = tf.reshape(self.y_sen,
                                        [-1, self.n_sentence_class])
            prob_sen_for_loss = tf.reshape(prob_sen,
                                           [-1, self.n_sentence_class])

            valid_num = tf.cast(tf.reduce_sum(self.doc_len), dtype=tf.float32)

            cost_sen = -tf.reduce_sum(
                y_sen_for_loss * tf.log(prob_sen_for_loss)) / valid_num * 100

            # cost_joint = self.alpha*cost_doc + (1-self.alpha)*cost_sen

            reg, variables = tf.nn.l2_loss(self.word_embedding), []

            # variables.append('softmax_sentence')
            # variables.append('softmax_doc')
            variables.append('softmax_sen')

            for vari in variables:
                reg += tf.nn.l2_loss(self.weights[vari]) + \
                    tf.nn.l2_loss(self.biases[vari])

            # # add attention parameters
            # reg += tf.nn.l2_loss(self.weights['w_1']) + tf.nn.l2_loss(self.biases['w_1'])
            # reg += tf.nn.l2_loss(self.weights['u_1'])

            # reg += tf.nn.l2_loss(self.weights['w_2']) + tf.nn.l2_loss(self.biases['w_2'])
            # reg += tf.nn.l2_loss(self.weights['u_2'])

            # cost_joint += reg * self.l2_reg
            cost_sen += reg * self.l2_reg

        #定义optimizer,即优化cost的节点
        with tf.name_scope('train'):
            global_step = tf.Variable(0,
                                      name="tr_global_step",
                                      trainable=False)
            optimizer = tf.train.AdamOptimizer(
                learning_rate=self.learning_rate).minimize(
                    cost_sen, global_step=global_step)

        #计算模型预测准确率
        # ACC 都在 session中进行计算
        # 两个不同的任务,在计算准确率的时候暂时分开来计算
        # doc层的准确率     情绪分类的准确率
        # sen层的准确率      emotion cause 识别的准确率
        # with tf.name_scope('predict'):

        #     #  doc 层
        #     correct_pred_doc = tf.equal(tf.argmax(prob_doc, 1), tf.argmax(self.y_doc, 1))
        #     accuracy_doc = tf.reduce_mean(tf.cast(correct_pred_doc, tf.float32))
        #     correct_num_doc = tf.reduce_sum(tf.cast(correct_pred_doc, tf.int32))

        # # sen层
        # y_sen_for_acc =  tf.reshape(self.y_sen, [-1, self.n_sentence_class])
        # prob_sen_for_acc =  tf.reshape(prob_sen, [-1, self.n_sentence_class])

        # # 把 [1,0]  转换成  [true, false]
        # keep_row = tf.reduce_any(tf.cast(y_sen_for_acc, dtype = tf.bool), axis = 1)

        # # 返回keep_row中值为True对应的索引
        # 这里很多numpy的函数都没有办法用
        # indices = []

        # y_sen_for_acc = tf.gather(y_sen_for_acc, indices)
        # prob_sen_for_acc = tf.gather(prob_sen_for_acc, indices)

        # correct_pred_sen = tf.equal(tf.argmax(prob_sen_for_acc, 1), tf.argmax(y_sen_for_acc, 1))

        # accuracy_sen = tf.reduce_mean(tf.cast(correct_pred_sen, tf.float32))

        # correct_num_sen = tf.reduce_sum(tf.cast(correct_pred_sen, tf.int32))

        # # 模型整体ACC
        # correct_pred_joint = tf.concat([correct_pred_doc, correct_pred_sen], axis = 0)
        # accuracy_joint = tf.reduce_mean(tf.cast(correct_pred_joint, tf.float32))
        # correct_num_joint = tf.reduce_sum(tf.cast(correct_pred_joint, tf.int32))

        # #启用sunmary,可视化训练过程
        with tf.name_scope('summary'):
            localtime = time.strftime("%X %Y-%m-%d", time.localtime())
            Summary_dir = 'Summary/' + localtime

            info = 'batch-{}, lr-{}, kb-{}, l2_reg-{}'.format(
                self.batch_size, self.learning_rate, self.keep_prob1,
                self.l2_reg)
            info = info + '\ntrain_file_path:' + self.train_file_path + '\ntest_index:' + str(
                self.test_index) + '\nembedding_type:' + str(
                    self.embedding_type) + '\nMethod: BiLSTM_ATT_dis_one-hot'

            # accuracy_doc
            # summary_acc_doc = tf.summary.scalar('ACC_doc ' + info, accuracy_doc)

            # summary_loss_doc = tf.summary.scalar('LOSS_doc ' + info, cost_doc)
            summary_loss_sen = tf.summary.scalar('LOSS_sen ' + info, cost_sen)
            # summary_loss_joint = tf.summary.scalar('LOSS_joint ' + info, cost_joint)

            # summary_op = tf.summary.merge([summary_acc_doc, summary_loss_doc, summary_loss_sen, summary_loss_joint])
            summary_op = tf.summary.merge([summary_loss_sen])

            # test_acc_doc = tf.placeholder(tf.float32)
            # test_acc_sen = tf.placeholder(tf.float32)
            # test_acc_joint = tf.placeholder(tf.float32)

            test_loss_doc = tf.placeholder(tf.float32)
            test_loss_sen = tf.placeholder(tf.float32)
            test_loss_joint = tf.placeholder(tf.float32)

            # summary_test_acc_doc = tf.summary.scalar('test_ACC_doc ' + info, test_acc_doc)
            # summary_test_acc_sen = tf.summary.scalar('test_ACC_sen ' + info, test_acc_sen)
            # summary_test_acc_joint = tf.summary.scalar('test_ACC_joint ' + info, test_acc_joint)

            # summary_test_loss_doc = tf.summary.scalar('test_LOSS_doc ' + info, test_loss_doc)
            summary_test_loss_sen = tf.summary.scalar('test_LOSS_sen ' + info,
                                                      test_loss_sen)
            # summary_test_loss_joint = tf.summary.scalar('test_LOSS_joint ' + info, test_loss_joint)

            summary_test = tf.summary.merge([summary_test_loss_sen])

            train_summary_writer = tf.summary.FileWriter(Summary_dir +
                                                         '/train')
            test_summary_writer = tf.summary.FileWriter(Summary_dir + '/test')

        with tf.name_scope('saveModel'):
            saver = tf.train.Saver(write_version=tf.train.SaverDef.V2)
            save_dir = 'Models/' + localtime + '/'
            if not os.path.exists(save_dir):
                os.makedirs(save_dir)

        with tf.name_scope('readData'):
            print '----------{}----------'.format(
                time.strftime("%Y-%m-%d %X", time.localtime()))
            tr_x, tr_y, tr_y_sen, tr_sen_len, tr_doc_len, tr_aspect_id, te_x, te_y, te_y_sen, te_sen_len, te_doc_len, te_aspect_id = load_data(
                self.train_file_path, self.word_id_mapping,
                self.max_sentence_len, self.max_doc_len, self.test_index,
                self.n_doc_class)

            print 'train docs: {}    test docs: {}'.format(
                len(tr_y), len(te_y))
            print 'training_iter:', self.training_iter
            print info
            print '----------{}----------'.format(
                time.strftime("%Y-%m-%d %X", time.localtime()))

        tf_config = tf.ConfigProto()
        tf_config.gpu_options.allow_growth = True

        with tf.Session(config=tf_config) as sess:

            sess.run(tf.initialize_all_variables())

            max_f, bestIter = 0., 0

            def test():
                feed_dict = {
                    self.x: te_x,
                    self.y_doc: te_y,
                    self.y_sen: te_y_sen,
                    self.sen_len: te_sen_len,
                    self.doc_len: te_doc_len,
                    self.aspect_id: te_aspect_id,
                    # self.keep_prob: 1.0,
                    self.keep_prob1: FLAGS.keep_prob1,
                    self.keep_prob2: FLAGS.keep_prob2,
                }

                loss_sen, prob_sen = sess.run([cost_sen, prob_sen_op],
                                              feed_dict=feed_dict)

                # doc
                # correct_pred_doc = np.equal(np.argmax(prob_doc, 1), np.argmax(te_y, 1))
                # acc_doc = np.mean(correct_pred_doc)

                # sen
                # prob_sen  [-1, self.max_doc_len, self.n_sentence_class]
                # reshape成[-1, self.n_sentence_class]
                te_pred_sen_p = np.reshape(prob_sen,
                                           [-1, self.n_sentence_class])
                te_true_sen = np.reshape(te_y_sen, [-1, self.n_sentence_class])

                # print "len te_true_sen",len(te_true_sen)

                # 去掉[0,0]
                keep_row = np.any(te_true_sen, axis=1)
                te_pred_sen_p = te_pred_sen_p[keep_row]
                te_true_sen = te_true_sen[keep_row]

                # print "after rmv [0,0] len te_true_sen",len(te_true_sen)

                # 得到预测值
                te_pred_sen = np.argmax(te_pred_sen_p, axis=1)
                te_true_sen = np.argmax(te_true_sen, axis=1)
                te_pred_sen_p = te_pred_sen_p[:, 1]

                correct_pred_sen = np.equal(te_pred_sen, te_true_sen)
                acc_sen = np.mean(correct_pred_sen)

                # joint
                # correct_pred_joint = np.concatenate([correct_pred_doc, correct_pred_sen], axis = 0)
                # acc_joint = np.mean(correct_pred_joint)

                y_pred_cause = te_pred_sen
                y_true_cause = te_true_sen

                class_dict = {0: 0, 1: 1}

                fscore_dict = calc_fscore(y_pred_cause, y_true_cause,
                                          class_dict)

                f_sen = fscore_dict[class_dict[1]]

                return loss_sen, acc_sen, f_sen, y_pred_cause, y_true_cause

            def new_test(y_pred_sen, y_true_sen):
                feed_dict = {
                    self.x: te_x,
                    self.y_doc: te_y,
                    self.y_sen: te_y_sen,
                    self.sen_len: te_sen_len,
                    self.doc_len: te_doc_len,
                    self.aspect_id: te_aspect_id,
                    # self.keep_prob: 1.0,
                    self.keep_prob1: FLAGS.keep_prob1,
                    self.keep_prob2: FLAGS.keep_prob2,
                }

                # # # doc层
                # # y_pred_doc_p = sess.run(prob_doc_op, feed_dict=feed_dict)

                # # y_pred_doc = prob2one_hot(y_pred_doc_p, self.n_doc_class)
                # # y_true_doc = te_y

                # # ''' test '''
                # # print "output sample of doc..."

                # # print('y_true_doc[0] : {}'.format(y_true_doc[0]))
                # # print('y_pred_doc_p[0] : {}'.format(y_pred_doc_p[0]))

                # # print('y_pred_doc[0] : {}'.format(y_pred_doc[0]))

                # # Emo_eval_emotion(y_true_doc, y_pred_doc)

                # # sen层
                # # rob_sen  [-1, self.max_doc_len, self.n_sentence_class]
                # y_pred_sen_p = sess.run(prob_sen_op, feed_dict=feed_dict)
                # y_true_sen = te_y_sen

                # # reshape成[-1, self.n_sentence_class]
                # y_pred_sen_p = np.reshape(y_pred_sen_p, [-1, self.n_sentence_class])
                # y_true_sen = np.reshape(y_true_sen, [-1, self.n_sentence_class])

                # # print "len y_true_sen",len(y_true_sen)

                # # 去掉[0,0]
                # keep_row = np.any(y_true_sen, axis = 1)
                # y_pred_sen_p = y_pred_sen_p[keep_row]
                # y_true_sen = y_true_sen[keep_row]

                # # print "after rmv [0,0] len y_true_sen",len(y_true_sen)

                # print "\n output sample of sen..."
                # print('y_true_sen[0] : {}'.format(y_true_sen[0]))
                # print('y_pred_sen_p[0] : {}'.format(y_pred_sen_p[0]))

                # # 得到预测值
                # y_true_sen = np.argmax(y_true_sen, axis = 1)

                # y_pred_sen = np.argmax(y_pred_sen_p, axis = 1)

                # y_pred_sen_p = y_pred_sen_p[:,1]

                # print('y_pred_sen_p1[0] : {}'.format(y_pred_sen_p[0]))

                Emo_eval_cause(y_true_sen, y_pred_sen)

                # add error analysis
                # 输出实际为Y但是模型预测为0的样例

                error_index = []

                for i in range(len(y_true_sen)):
                    if y_true_sen[i] == 1 and y_pred_sen[i] == 0:
                        error_index.append(i)

                return np.asarray(error_index)

            error_index = []
            best_y_pred_cause = []
            best_y_true_cause = []

            for i in xrange(self.training_iter):

                # starttime = datetime.datetime.now()

                for train, _ in self.get_batch_data(tr_x,
                                                    tr_y,
                                                    tr_y_sen,
                                                    tr_sen_len,
                                                    tr_doc_len,
                                                    tr_aspect_id,
                                                    self.batch_size,
                                                    FLAGS.keep_prob1,
                                                    FLAGS.keep_prob2,
                                                    test=False):

                    _, step, summary, loss_sen = sess.run(
                        [optimizer, global_step, summary_op, cost_sen],
                        feed_dict=train)
                    train_summary_writer.add_summary(summary, step)
                    print 'Iter {}: mini-batch loss_sen={:.6f}'.format(
                        step, loss_sen)

                # endtime = datetime.datetime.now()
                # runtime = (endtime-starttime).seconds
                # print "time cost = {}".format(runtime)

                if i % self.display_step == 0:

                    loss_sen, acc_sen, f_sen, y_pred_cause, y_true_cause = test(
                    )

                    if f_sen > max_f:

                        error_index = new_test(y_pred_cause, y_true_cause)
                        best_y_pred_cause = y_pred_cause
                        best_y_true_cause = y_true_cause

                        max_f = f_sen
                        bestIter = step
                        saver.save(sess, save_dir, global_step=step)


                    summary = sess.run(summary_test, feed_dict={
                                        \
                                        test_loss_sen: loss_sen, \
                                        })

                    test_summary_writer.add_summary(summary, step)

                    print '----------{}----------'.format(
                        time.strftime("%Y-%m-%d %X", time.localtime()))

                    print 'Iter {}: test loss_sen={:.6f}, test acc_sen={:.6f}'.format(
                        step, loss_sen, acc_sen)

                    print 'round {}: max_f_cause={} BestIter={}\n'.format(
                        i, max_f, bestIter)

            # new_test()
            # print "len error_index ",len(error_index)
            # np.savetxt('error_index_output.txt', error_index, fmt="%d", delimiter="")
            # np.savetxt('best_y_pred_cause_output.txt', best_y_pred_cause, fmt="%d", delimiter="")
            # np.savetxt('best_y_true_cause_output.txt', best_y_true_cause, fmt="%d", delimiter="")

            print "Evalue for best_y_pred_cause ..."
            fold_result = Emo_eval_cause(best_y_true_cause, best_y_pred_cause)

            print 'Optimization Finished!'

            return fold_result
Example #3
0
    def run(self):
        #   x_i   tmp_x = np.zeros((max_doc_len, max_sen_len), dtype=np.int)
        inputs = tf.nn.embedding_lookup(self.word_embedding, self.x)
        # 去掉max_doc_len 这一层次结构
        inputs = tf.reshape(inputs,
                            [-1, self.max_sentence_len, self.embedding_dim])
        prob_doc = self.model(inputs)

        #定义损失和l2正则化项
        with tf.name_scope('loss'):
            cost = -tf.reduce_mean(self.y_doc * tf.log(prob_doc))

            reg, variables = tf.nn.l2_loss(self.word_embedding), []

            # variables.append('softmax_sentence')
            variables.append('softmax_doc')

            for vari in variables:
                reg += tf.nn.l2_loss(self.weights[vari]) + \
                    tf.nn.l2_loss(self.biases[vari])

            # add attention parameters
            reg += tf.nn.l2_loss(self.weights['w_1']) + tf.nn.l2_loss(
                self.biases['w_1'])
            reg += tf.nn.l2_loss(self.weights['u_1'])

            reg += tf.nn.l2_loss(self.weights['w_2']) + tf.nn.l2_loss(
                self.biases['w_2'])
            reg += tf.nn.l2_loss(self.weights['u_2'])

            cost += reg * self.l2_reg

        #定义optimizer,即优化cost的节点
        with tf.name_scope('train'):
            global_step = tf.Variable(0,
                                      name="tr_global_step",
                                      trainable=False)
            optimizer = tf.train.AdamOptimizer(
                learning_rate=self.learning_rate).minimize(
                    cost, global_step=global_step)

        #计算模型预测准确率
        with tf.name_scope('predict'):
            correct_pred = tf.equal(tf.argmax(prob_doc, 1),
                                    tf.argmax(self.y_doc, 1))
            accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
            correct_num = tf.reduce_sum(tf.cast(correct_pred, tf.int32))

        #启用sunmary,可视化训练过程
        with tf.name_scope('summary'):
            localtime = time.strftime("%X %Y-%m-%d", time.localtime())
            Summary_dir = 'Summary/' + localtime

            info = 'batch-{}, lr-{}, kb-{}, l2_reg-{}'.format(
                self.batch_size, self.learning_rate, self.keep_prob1,
                self.l2_reg)
            info = info + '\ntrain_file_path:' + self.train_file_path + '\ntest_index:' + str(
                self.test_index) + '\nembedding_type:' + str(
                    self.embedding_type) + '\nMethod: Emotion_CNN_prob'
            summary_acc = tf.summary.scalar('ACC ' + info, accuracy)
            summary_loss = tf.summary.scalar('LOSS ' + info, cost)
            summary_op = tf.summary.merge([summary_loss, summary_acc])

            test_acc = tf.placeholder(tf.float32)
            test_loss = tf.placeholder(tf.float32)
            summary_test_acc = tf.summary.scalar('ACC ' + info, test_acc)
            summary_test_loss = tf.summary.scalar('LOSS ' + info, test_loss)
            summary_test = tf.summary.merge(
                [summary_test_loss, summary_test_acc])

            train_summary_writer = tf.summary.FileWriter(Summary_dir +
                                                         '/train')
            test_summary_writer = tf.summary.FileWriter(Summary_dir + '/test')

        with tf.name_scope('saveModel'):
            saver = tf.train.Saver(write_version=tf.train.SaverDef.V2)
            save_dir = 'Models/' + localtime + '/'
            if not os.path.exists(save_dir):
                os.makedirs(save_dir)

        with tf.name_scope('readData'):
            print '----------{}----------'.format(
                time.strftime("%Y-%m-%d %X", time.localtime()))
            tr_x, tr_y, tr_sen_len, tr_doc_len, te_x, te_y, te_sen_len, te_doc_len = load_data(
                self.train_file_path, self.word_id_mapping,
                self.max_sentence_len, self.max_doc_len, self.test_index,
                self.n_doc_class)
            print 'train docs: {}    test docs: {}'.format(
                len(tr_y), len(te_y))
            print 'training_iter:', self.training_iter
            print info
            print '----------{}----------'.format(
                time.strftime("%Y-%m-%d %X", time.localtime()))

        tf_config = tf.ConfigProto()
        tf_config.gpu_options.allow_growth = True

        with tf.Session(config=tf_config) as sess:
            sess.run(tf.initialize_all_variables())
            max_acc, bestIter = 0., 0

            def test():
                feed_dict = {
                    self.x: te_x,
                    self.y_doc: te_y,
                    self.sen_len: te_sen_len,
                    self.doc_len: te_doc_len,
                    # self.keep_prob: 1.0,
                    self.keep_prob1: FLAGS.keep_prob1,
                    self.keep_prob2: FLAGS.keep_prob2,
                }
                loss, acc = sess.run([cost, accuracy], feed_dict=feed_dict)
                return loss, acc

            def new_test():
                feed_dict = {
                    self.x: te_x,
                    self.y_doc: te_y,
                    self.sen_len: te_sen_len,
                    self.doc_len: te_doc_len,
                    # self.keep_prob: 1.0,
                    self.keep_prob1: FLAGS.keep_prob1,
                    self.keep_prob2: FLAGS.keep_prob2,
                }

                y_pred_doc_p = sess.run(prob_doc, feed_dict=feed_dict)

                y_pred_doc = prob2one_hot(y_pred_doc_p, self.n_doc_class)

                y_true_doc = te_y
                ''' test '''

                print('y_true_doc[0] : {}'.format(y_true_doc[0]))
                print('y_pred_doc_p[0] : {}'.format(y_pred_doc_p[0]))

                print('y_pred_doc[0] : {}'.format(y_pred_doc[0]))

                Emo_eval_emotion(y_true_doc, y_pred_doc)

            for i in xrange(self.training_iter):

                # starttime = datetime.datetime.now()

                for train, _ in self.get_batch_data(tr_x,
                                                    tr_y,
                                                    tr_sen_len,
                                                    tr_doc_len,
                                                    self.batch_size,
                                                    FLAGS.keep_prob1,
                                                    FLAGS.keep_prob2,
                                                    test=False):
                    # print train
                    _, step, summary, loss, acc = sess.run(
                        [optimizer, global_step, summary_op, cost, accuracy],
                        feed_dict=train)
                    train_summary_writer.add_summary(summary, step)
                    print 'Iter {}: mini-batch loss={:.6f}, acc={:.6f}'.format(
                        step, loss, acc)

                # endtime = datetime.datetime.now()
                # runtime = (endtime-starttime).seconds
                # print "time cost = {}".format(runtime)

                if i % self.display_step == 0:

                    loss, acc = test()

                    if acc > max_acc:
                        new_test()
                        max_acc = acc
                        bestIter = step
                        saver.save(sess, save_dir, global_step=step)

                    summary = sess.run(summary_test,
                                       feed_dict={
                                           test_loss: loss,
                                           test_acc: acc
                                       })
                    test_summary_writer.add_summary(summary, step)
                    print '----------{}----------'.format(
                        time.strftime("%Y-%m-%d %X", time.localtime()))
                    print 'Iter {}: test loss={:.6f}, test acc={:.6f}'.format(
                        step, loss, acc)
                    print 'round {}: max_acc={} BestIter={}\n'.format(
                        i, max_acc, bestIter)

            # new_test()

            print 'Optimization Finished!'
    def run(self):
        inputs = tf.nn.embedding_lookup(self.word_embedding, self.x)
        prob = self.model(inputs)

        with tf.name_scope('loss'):
            cost = -tf.reduce_mean(self.y * tf.log(prob))
            reg, variables = tf.nn.l2_loss(self.word_embedding), ['softmax']
            for vari in variables:
                reg += tf.nn.l2_loss(self.weights[vari]) + \
                    tf.nn.l2_loss(self.biases[vari])
            # add attention parameters
            reg += tf.nn.l2_loss(self.weights['w_1']) + tf.nn.l2_loss(
                self.biases['w_1'])
            reg += tf.nn.l2_loss(self.weights['u_1'])
            cost += reg * self.l2_reg

        with tf.name_scope('train'):
            global_step = tf.Variable(0,
                                      name="tr_global_step",
                                      trainable=False)
            optimizer = tf.train.AdamOptimizer(
                learning_rate=self.learning_rate).minimize(
                    cost, global_step=global_step)

        with tf.name_scope('predict'):
            correct_pred = tf.equal(tf.argmax(prob, 1), tf.argmax(self.y, 1))
            accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
            correct_num = tf.reduce_sum(tf.cast(correct_pred, tf.int32))

        with tf.name_scope('summary'):
            localtime = time.strftime("%X %Y-%m-%d", time.localtime())
            Summary_dir = 'Summary/' + localtime

            info = 'batch-{}, lr-{}, kb-{}, l2_reg-{}'.format(
                self.batch_size, self.learning_rate, self.Keep_Prob,
                self.l2_reg)
            info = info + '\ntrain_file_path:' + self.train_file_path + '\ntest_index:' + str(
                self.test_index) + '\nembedding_type:' + str(
                    self.embedding_type) + '\nMethod: Emotion_GRU'
            summary_acc = tf.summary.scalar('ACC ' + info, accuracy)
            summary_loss = tf.summary.scalar('LOSS ' + info, cost)
            summary_op = tf.summary.merge([summary_loss, summary_acc])

            test_acc = tf.placeholder(tf.float32)
            test_loss = tf.placeholder(tf.float32)
            summary_test_acc = tf.summary.scalar('ACC ' + info, test_acc)
            summary_test_loss = tf.summary.scalar('LOSS ' + info, test_loss)
            summary_test = tf.summary.merge(
                [summary_test_loss, summary_test_acc])

            train_summary_writer = tf.summary.FileWriter(Summary_dir +
                                                         '/train')
            test_summary_writer = tf.summary.FileWriter(Summary_dir + '/test')

        with tf.name_scope('saveModel'):
            saver = tf.train.Saver(write_version=tf.train.SaverDef.V2)
            save_dir = 'Models/' + localtime + '/'
            if not os.path.exists(save_dir):
                os.makedirs(save_dir)

        with tf.name_scope('readData'):
            print '----------{}----------'.format(
                time.strftime("%Y-%m-%d %X", time.localtime()))
            tr_x, tr_y, tr_doc_len, te_x, te_y, te_doc_len = load_data(
                self.train_file_path,
                self.word_id_mapping,
                self.max_doc_len,
                self.test_index,
                self.n_class,
            )
            print 'train docs: {}    test docs: {}'.format(
                len(tr_y), len(te_y))
            print 'training_iter:', self.training_iter
            print info
            print '----------{}----------'.format(
                time.strftime("%Y-%m-%d %X", time.localtime()))

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True

        with tf.Session() as sess:
            sess.run(tf.initialize_all_variables())
            max_acc, bestIter = 0., 0
            max_AP = 0.

            def test():
                feed_dict = {
                    self.x: te_x,
                    self.doc_len: te_doc_len,
                    self.keep_prob: 1.0,
                }
                y_true = te_y
                y_pred_p = sess.run(prob, feed_dict=feed_dict)
                AP = label_ranking_average_precision_score(y_true, y_pred_p)
                return AP
                # acc, loss, cnt = 0., 0., 0
                # for test, num in self.get_batch_data(te_x, te_y, te_doc_len, 20, keep_prob=1.0, test=True):
                #     _loss, _acc = sess.run([cost, correct_num], feed_dict=test)
                #     acc += _acc
                #     loss += _loss * num
                #     cnt += num
                # loss = loss / cnt
                # acc = acc / cnt
                # return loss, acc

            def new_test():
                feed_dict = {
                    self.x: te_x,
                    self.doc_len: te_doc_len,
                    self.keep_prob: 1.0,
                }
                y_true = te_y
                y_pred_p = sess.run(prob, feed_dict=feed_dict)
                # y_pred = calibrated_label_ranking(y_pred_p, For_calibrated_B)
                y_pred = np.ceil(y_pred_p - 1.0 / 9)
                Emotion_eval(y_true, y_pred, y_pred_p)

            # if self.training_iter==0:
            #     # saver.restore(sess, 'Models/10:01:44 2017-03-11/-856')
            #     # loss, acc=test()
            #     AP = test()
            #     # print loss,acc
            #     print "AP : ",AP
            #     new_test()

            # For_calibrated_B = np.loadtxt('For_calibrated_B'+str(self.test_index)+'.txt', delimiter=',')

            for i in xrange(self.training_iter):

                starttime = datetime.datetime.now()

                for train, _ in self.get_batch_data(tr_x,
                                                    tr_y,
                                                    tr_doc_len,
                                                    self.batch_size,
                                                    self.Keep_Prob,
                                                    test=False):
                    _, step, summary, loss, acc = sess.run(
                        [optimizer, global_step, summary_op, cost, accuracy],
                        feed_dict=train)
                    train_summary_writer.add_summary(summary, step)
                    print 'Iter {}: mini-batch loss={:.6f}, acc={:.6f}'.format(
                        step, loss, acc)

                endtime = datetime.datetime.now()
                runtime = (endtime - starttime).seconds
                print "time cost = {}".format(runtime)
                break

                if i % self.display_step == 0:
                    # loss, acc=test()
                    AP = test()

                    # if acc > max_acc:
                    #     max_acc = acc
                    #     bestIter = step
                    #     saver.save(sess, save_dir, global_step=step)
                    #     new_test()
                    if AP > max_AP:
                        max_AP = AP
                        bestIter = step
                        saver.save(sess, save_dir, global_step=step)
                        new_test()

                    summary = sess.run(summary_test,
                                       feed_dict={
                                           test_loss: loss,
                                           test_acc: acc
                                       })
                    test_summary_writer.add_summary(summary, step)
                    print '----------{}----------'.format(
                        time.strftime("%Y-%m-%d %X", time.localtime()))
                    # print 'Iter {}: test loss={:.6f}, test acc={:.6f}'.format(step, loss, acc)
                    # print 'round {}: max_acc={} BestIter={}\n'.format(i, max_acc, bestIter)
                    print 'Iter {}: test AP={:.6f}'.format(step, AP)
                    print 'round {}: max_AP={} BestIter={}\n'.format(
                        i, max_AP, bestIter)

            print 'Optimization Finished!'
Example #5
0
    def run(self):
        inputs = tf.nn.embedding_lookup(self.word_embedding, self.x)
        prob = self.model(inputs)

        with tf.name_scope('loss'):
            cost = - tf.reduce_mean(self.y * tf.log(prob))
            #  try cc loss   binary CE
            # cost = - tf.reduce_mean(self.y * tf.log(prob) + (1 - self.y)*tf.log(1-prob))  
            

            reg, variables = tf.nn.l2_loss(self.word_embedding), []

            for  i in range(1,10):
                variables.append('softmax'+str(i))
                
            for vari in variables:
                reg += tf.nn.l2_loss(self.weights[vari]) + \
                    tf.nn.l2_loss(self.biases[vari])

            # add attention parameters
            reg += tf.nn.l2_loss(self.weights['w_1']) + tf.nn.l2_loss(self.biases['w_1'])
            reg += tf.nn.l2_loss(self.weights['u_1'])                     
            cost += reg * self.l2_reg

        with tf.name_scope('train'):
            global_step = tf.Variable(
                0, name="tr_global_step", trainable=False)
            optimizer = tf.train.AdamOptimizer(
                learning_rate=self.learning_rate).minimize(cost, global_step=global_step)

        def get9b(predict):
            ret = tf.reshape(predict, [-1,2])
            ret = tf.argmax(ret, 1)
            ret = tf.reshape(ret, [-1,9])
            return ret
            
        with tf.name_scope('predict'):
            correct_pred = tf.equal(get9b(prob), get9b(self.y))
            accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
            correct_num = tf.reduce_sum(tf.cast(correct_pred, tf.int32))

        with tf.name_scope('summary'):
            localtime = time.strftime("%X %Y-%m-%d", time.localtime())
            Summary_dir = 'Summary/' + localtime

            info = 'batch-{}, lr-{}, kb-{}, l2_reg-{}'.format(
                self.batch_size,  self.learning_rate, self.Keep_Prob, self.l2_reg)
            info = info + '\ntrain_file_path:' + self.train_file_path + '\ntest_index:' + str(self.test_index) + '\nembedding_type:' + str(self.embedding_type) + '\nMethod: Emotion_CNN_prob'
            summary_acc = tf.summary.scalar('ACC ' + info, accuracy)
            summary_loss = tf.summary.scalar('LOSS ' + info, cost)
            summary_op = tf.summary.merge([summary_loss, summary_acc])

            test_acc = tf.placeholder(tf.float32)
            test_loss = tf.placeholder(tf.float32)
            summary_test_acc = tf.summary.scalar('ACC ' + info, test_acc)
            summary_test_loss = tf.summary.scalar('LOSS ' + info, test_loss)
            summary_test = tf.summary.merge(
                [summary_test_loss, summary_test_acc])

            train_summary_writer = tf.summary.FileWriter(
                Summary_dir + '/train')
            test_summary_writer = tf.summary.FileWriter(Summary_dir + '/test')

        with tf.name_scope('saveModel'):
            saver = tf.train.Saver(write_version=tf.train.SaverDef.V2)
            save_dir = 'Models/' + localtime + '/'
            if not os.path.exists(save_dir):
                os.makedirs(save_dir)

        with tf.name_scope('readData'):
            print '----------{}----------'.format(time.strftime("%Y-%m-%d %X", time.localtime()))
            tr_x, tr_y, tr_doc_len, te_x, te_y, te_doc_len= load_data(
                self.train_file_path,
                self.word_id_mapping,
                self.max_doc_len,
                self.test_index,
                self.n_class
            )
            print 'train docs: {}    test docs: {}'.format(len(tr_y), len(te_y))
            print 'training_iter:', self.training_iter
            print info
            print '----------{}----------'.format(time.strftime("%Y-%m-%d %X", time.localtime()))

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True

        with tf.Session() as sess:
            sess.run(tf.initialize_all_variables())
            max_acc, bestIter = 0., 0
            max_AP = 0.


            def test():
                feed_dict = {
                    self.x: te_x,
                    self.y: te_y,
                    self.doc_len: te_doc_len,
                    self.keep_prob: 1.0,
                }
                # loss, acc = sess.run([cost, accuracy], feed_dict=feed_dict)
                # return loss, acc
                y_true = te_y[:, 1 : : 2]

                y_pred_p = sess.run(prob, feed_dict=feed_dict)[:, 1 : : 2]

                y_pred = np.ceil(y_pred_p-1.0/2)
                AP = label_ranking_average_precision_score(y_true, y_pred_p) 
                return AP                  

            def new_test():
                feed_dict = {
                    self.x: te_x,
                    self.doc_len: te_doc_len,
                    self.keep_prob: 1.0,
                }
                y_true = te_y[:, 1 : : 2]

                y_pred_p = sess.run(prob, feed_dict=feed_dict)[:, 1 : : 2]

                y_pred = np.ceil(y_pred_p-1.0/2)

                ''' test '''

                # print('y_true[0] : {}'.format(y_true[0]))
                # print('y_pred_p[0] : {}'.format(y_pred_p[0]))
                # print('y_pred[0] : {}'.format(y_pred[0]))

                Emotion_eval(y_true, y_pred, y_pred_p)




            for i in xrange(self.training_iter):
                
                starttime = datetime.datetime.now()

                for train, _ in self.get_batch_data(tr_x, tr_y, tr_doc_len, self.batch_size, self.Keep_Prob, test=False):
                    _, step, summary, loss, acc = sess.run(
                        [optimizer, global_step, summary_op, cost, accuracy], feed_dict=train)
                    #train_summary_writer.add_summary(summary, step)
                    print 'Iter {}: mini-batch loss={:.6f}, acc={:.6f}'.format(step, loss, acc)
                
                endtime = datetime.datetime.now()
                runtime = (endtime-starttime).seconds
                print "time cost = {}".format(runtime)

                if i % self.display_step == 0:
                    
                    # loss, acc= test()
                    AP = test()

                    # if acc > max_acc:
                    #     new_test()
                    #     max_acc = acc
                    #     bestIter = step
                        #saver.save(sess, save_dir, global_step=step)
                    if AP > max_AP:
                        max_AP = AP
                        bestIter = step
                        saver.save(sess, save_dir, global_step=step)
                        new_test()                        

                    summary = sess.run(summary_test, feed_dict={
                                       test_loss: loss, test_acc: acc})
                    test_summary_writer.add_summary(summary, step)
                    print '----------{}----------'.format(time.strftime("%Y-%m-%d %X", time.localtime()))
                    # print 'Iter {}: test loss={:.6f}, test acc={:.6f}'.format(step, loss, acc)
                    print 'Iter {}: test AP={:.6f}'.format(step, AP)                    
                    print 'round {}: max_AP={} BestIter={}\n'.format(i, max_AP, bestIter)
            
            # new_test()

            print 'Optimization Finished!'