コード例 #1
0
ファイル: predict.py プロジェクト: hanghang2333/scoreclassify
def main(_):
    out = codecs.open('out1.csv', 'w', 'utf8')
    f = h5py.File('../datautils/charTest50.hdf5', 'r')
    X = f['X'].value
    d = shelve.open('../datautils/charData50.data')
    Vocab, vocabulary_index2word, vocabulary_word2index = d['Vocab'], d[
        'id2c'], d['c2id']
    d = shelve.open('../datautils/idTest50')
    idx = d['id']
    vocab_size = len(vocabulary_word2index) + 1
    with tf.Session() as sess:
        #Instantiate Model
        textCNN = TextCNN(filter_sizes, FLAGS.num_filters, FLAGS.num_classes,
                          FLAGS.learning_rate, FLAGS.batch_size,
                          FLAGS.decay_steps, FLAGS.decay_rate,
                          FLAGS.sentence_len, vocab_size, FLAGS.embed_size,
                          FLAGS.is_training)
        #Initialize Save
        saver = tf.train.Saver()
        saver.restore(sess, 'checkpoint/model.ckpt-17')
        for index, i in enumerate(X):
            pred = sess.run([textCNN.predictions],
                            feed_dict={
                                textCNN.input_x: [i],
                                textCNN.dropout_keep_prob: 1
                            })
            out.write(str(idx[index]) + ',' + str(pred[0][0]) + '\n')
コード例 #2
0
def main(_):
    # 1.load data with vocabulary of words and labels
    vocabulary_word2index, vocabulary_index2word = create_voabulary(simple='simple',word2vec_model_path=FLAGS.word2vec_model_path,name_scope="cnn2")
    vocab_size = len(vocabulary_word2index)
    vocabulary_word2index_label, vocabulary_index2word_label = create_voabulary_label(name_scope="cnn2")
    questionid_question_lists=load_final_test_data(FLAGS.predict_source_file)
    test= load_data_predict(vocabulary_word2index,vocabulary_word2index_label,questionid_question_lists)
    testX=[]
    question_id_list=[]
    for tuple in test:
        question_id,question_string_list=tuple
        question_id_list.append(question_id)
        testX.append(question_string_list)
    # 2.Data preprocessing: Sequence padding
    print("start padding....")
    testX2 = pad_sequences(testX, maxlen=FLAGS.sentence_len, value=0.)  # padding to max length
    print("end padding...")
   # 3.create session.
    config=tf.ConfigProto()
    config.gpu_options.allow_growth=True
    with tf.Session(config=config) as sess:
        # 4.Instantiate Model
        textCNN=TextCNN(filter_sizes,FLAGS.num_filters,FLAGS.num_classes, FLAGS.learning_rate, FLAGS.batch_size, FLAGS.decay_steps,FLAGS.decay_rate,
                        FLAGS.sentence_len,vocab_size,FLAGS.embed_size,FLAGS.is_training)
        saver=tf.train.Saver()
        if os.path.exists(FLAGS.ckpt_dir+"checkpoint"):
            print("Restoring Variables from Checkpoint")
            saver.restore(sess,tf.train.latest_checkpoint(FLAGS.ckpt_dir))
        else:
            print("Can't find the checkpoint.going to stop")
            return
        # 5.feed data, to get logits
        number_of_training_data=len(testX2);print("number_of_training_data:",number_of_training_data)
        index=0
        predict_target_file_f = codecs.open(FLAGS.predict_target_file, 'a', 'utf8')
        for start, end in zip(range(0, number_of_training_data, FLAGS.batch_size),range(FLAGS.batch_size, number_of_training_data+1, FLAGS.batch_size)):
            logits=sess.run(textCNN.logits,feed_dict={textCNN.input_x:testX2[start:end],textCNN.dropout_keep_prob:1}) #'shape of logits:', ( 1, 1999)
            # 6. get lable using logtis
            predicted_labels=get_label_using_logits(logits[0],vocabulary_index2word_label)
            # 7. write question id and labels to file system.
            write_question_id_with_labels(question_id_list[index],predicted_labels,predict_target_file_f)
            index=index+1
        predict_target_file_f.close()
コード例 #3
0
def main(_):
    #trainX, trainY, testX, testY = None, None, None, None
    #vocabulary_word2index, vocabulary_index2word, vocabulary_label2index, _= create_vocabulary(FLAGS.traning_data_path,FLAGS.vocab_size,name_scope=FLAGS.name_scope)
    word2index, label2index, trainX, trainY, vaildX, vaildY, testX, testY = load_data(
        FLAGS.cache_file_h5py, FLAGS.cache_file_pickle)
    vocab_size = len(word2index)
    print("cnn_model.vocab_size:", vocab_size)
    num_classes = len(label2index)
    print("num_classes:", num_classes)
    num_examples, FLAGS.sentence_len = trainX.shape
    print("num_examples of training:", num_examples, ";sentence_len:",
          FLAGS.sentence_len)
    #train, test= load_data_multilabel(FLAGS.traning_data_path,vocabulary_word2index, vocabulary_label2index,FLAGS.sentence_len)
    #trainX, trainY = train;testX, testY = test
    #print some message for debug purpose
    print("trainX[0:10]:", trainX[0:10])
    print("trainY[0]:", trainY[0:10])
    train_y_short = get_target_label_short(trainY[0])
    print("train_y_short:", train_y_short)

    #2.create session.
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        #Instantiate Model
        textCNN = TextCNN(filter_sizes,
                          FLAGS.num_filters,
                          num_classes,
                          FLAGS.learning_rate,
                          FLAGS.batch_size,
                          FLAGS.decay_steps,
                          FLAGS.decay_rate,
                          FLAGS.sentence_len,
                          vocab_size,
                          FLAGS.embed_size,
                          multi_label_flag=FLAGS.multi_label_flag)
        #Initialize Save
        saver = tf.train.Saver()
        if os.path.exists(FLAGS.ckpt_dir + "checkpoint"):
            print("Restoring Variables from Checkpoint.")
            saver.restore(sess, tf.train.latest_checkpoint(FLAGS.ckpt_dir))
            #for i in range(3): #decay learning rate if necessary.
            #    print(i,"Going to decay learning rate by half.")
            #    sess.run(textCNN.learning_rate_decay_half_op)
        else:
            print('Initializing Variables')
            sess.run(tf.global_variables_initializer())
            if FLAGS.use_embedding:  #load pre-trained word embedding
                index2word = {v: k for k, v in word2index.items()}
                assign_pretrained_word_embedding(sess, index2word, vocab_size,
                                                 textCNN,
                                                 FLAGS.word2vec_model_path)
        curr_epoch = sess.run(textCNN.epoch_step)
        #3.feed data & training
        number_of_training_data = len(trainX)
        batch_size = FLAGS.batch_size
        iteration = 0
        for epoch in range(curr_epoch, FLAGS.num_epochs):
            loss, counter = 0.0, 0
            for start, end in zip(
                    range(0, number_of_training_data, batch_size),
                    range(batch_size, number_of_training_data, batch_size)):
                iteration = iteration + 1
                if epoch == 0 and counter == 0:
                    print("trainX[start:end]:", trainX[start:end])
                feed_dict = {
                    textCNN.input_x: trainX[start:end],
                    textCNN.dropout_keep_prob: 0.8,
                    textCNN.is_training_flag: FLAGS.is_training_flag
                }
                if not FLAGS.multi_label_flag:
                    feed_dict[textCNN.input_y] = trainY[start:end]
                else:
                    feed_dict[textCNN.input_y_multilabel] = trainY[start:end]
                curr_loss, lr, _ = sess.run([
                    textCNN.loss_val, textCNN.learning_rate, textCNN.train_op
                ], feed_dict)
                loss, counter = loss + curr_loss, counter + 1
                if counter % 50 == 0:
                    print(
                        "Epoch %d\tBatch %d\tTrain Loss:%.3f\tLearning rate:%.5f"
                        % (epoch, counter, loss / float(counter), lr))

                ########################################################################################################
                if start % (3000 *
                            FLAGS.batch_size) == 0:  # eval every 3000 steps.
                    eval_loss, f1_score, f1_micro, f1_macro = do_eval(
                        sess, textCNN, vaildX, vaildY, num_classes)
                    print(
                        "Epoch %d Validation Loss:%.3f\tF1 Score:%.3f\tF1_micro:%.3f\tF1_macro:%.3f"
                        % (epoch, eval_loss, f1_score, f1_micro, f1_macro))
                    # save model to checkpoint
                    save_path = FLAGS.ckpt_dir + "model.ckpt"
                    print("Going to save model..")
                    saver.save(sess, save_path, global_step=epoch)
                ########################################################################################################
            #epoch increment
            print("going to increment epoch counter....")
            sess.run(textCNN.epoch_increment)

            # 4.validation
            print(epoch, FLAGS.validate_every,
                  (epoch % FLAGS.validate_every == 0))
            if epoch % FLAGS.validate_every == 0:
                eval_loss, f1_score, f1_micro, f1_macro = do_eval(
                    sess, textCNN, testX, testY, num_classes)
                print(
                    "Epoch %d Validation Loss:%.3f\tF1 Score:%.3f\tF1_micro:%.3f\tF1_macro:%.3f"
                    % (epoch, eval_loss, f1_score, f1_micro, f1_macro))
                #save model to checkpoint
                save_path = FLAGS.ckpt_dir + "model.ckpt"
                saver.save(sess, save_path, global_step=epoch)

        # 5.最后在测试集上做测试,并报告测试准确率 Test
        test_loss, f1_score, f1_micro, f1_macro = do_eval(
            sess, textCNN, testX, testY, num_classes)
        print("Test Loss:%.3f\tF1 Score:%.3f\tF1_micro:%.3f\tF1_macro:%.3f" %
              (test_loss, f1_score, f1_micro, f1_macro))
    pass
コード例 #4
0
def main(_):
    #1.load data(X:list of lint,y:int).
    #if os.path.exists(FLAGS.cache_path):  # 如果文件系统中存在,那么加载故事(词汇表索引化的)
    #    with open(FLAGS.cache_path, 'r') as data_f:
    #        trainX, trainY, testX, testY, vocabulary_index2word=pickle.load(data_f)
    #        vocab_size=len(vocabulary_index2word)
    #else:
    if 1 == 1:
        trainX, trainY, testX, testY = None, None, None, None
        vocabulary_word2index, vocabulary_index2word = create_voabulary(
            word2vec_model_path=FLAGS.word2vec_model_path,
            name_scope="cnn2")  #simple='simple'
        vocab_size = len(vocabulary_word2index)
        print("cnn_model.vocab_size:", vocab_size)
        vocabulary_word2index_label, vocabulary_index2word_label = create_voabulary_label(
            name_scope="cnn2")
        if FLAGS.multi_label_flag:
            FLAGS.traning_data_path = 'training-data/train-zhihu6-title-desc.txt'  #test-zhihu5-only-title-multilabel.txt
        train, test, _ = load_data_multilabel_new(
            vocabulary_word2index,
            vocabulary_word2index_label,
            multi_label_flag=FLAGS.multi_label_flag,
            traning_data_path=FLAGS.traning_data_path
        )  #,traning_data_path=FLAGS.traning_data_path
        trainX, trainY = train
        testX, testY = test
        # 2.Data preprocessing.Sequence padding
        print("start padding & transform to one hot...")
        trainX = pad_sequences(trainX, maxlen=FLAGS.sentence_len,
                               value=0.)  # padding to max length
        testX = pad_sequences(testX, maxlen=FLAGS.sentence_len,
                              value=0.)  # padding to max length
        #with open(FLAGS.cache_path, 'w') as data_f: #save data to cache file, so we can use it next time quickly.
        #    pickle.dump((trainX,trainY,testX,testY,vocabulary_index2word),data_f)
        print("trainX[0]:", trainX[0])  #;print("trainY[0]:", trainY[0])
        # Converting labels to binary vectors
        print("end padding & transform to one hot...")
    #2.create session.
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        #Instantiate Model
        textCNN = TextCNN(filter_sizes,
                          FLAGS.num_filters,
                          FLAGS.num_classes,
                          FLAGS.learning_rate,
                          FLAGS.batch_size,
                          FLAGS.decay_steps,
                          FLAGS.decay_rate,
                          FLAGS.sentence_len,
                          vocab_size,
                          FLAGS.embed_size,
                          FLAGS.is_training,
                          multi_label_flag=FLAGS.multi_label_flag)
        #Initialize Save
        saver = tf.train.Saver()
        if os.path.exists(FLAGS.ckpt_dir + "checkpoint"):
            print("Restoring Variables from Checkpoint")
            saver.restore(sess, tf.train.latest_checkpoint(FLAGS.ckpt_dir))
        else:
            print('Initializing Variables')
            sess.run(tf.global_variables_initializer())
            if FLAGS.use_embedding:  #load pre-trained word embedding
                assign_pretrained_word_embedding(
                    sess,
                    vocabulary_index2word,
                    vocab_size,
                    textCNN,
                    word2vec_model_path=FLAGS.word2vec_model_path)
        curr_epoch = sess.run(textCNN.epoch_step)
        #3.feed data & training
        number_of_training_data = len(trainX)
        batch_size = FLAGS.batch_size
        for epoch in range(curr_epoch, FLAGS.num_epochs):
            loss, acc, counter = 0.0, 0.0, 0
            for start, end in zip(
                    range(0, number_of_training_data, batch_size),
                    range(batch_size, number_of_training_data, batch_size)):
                if epoch == 0 and counter == 0:
                    print("trainX[start:end]:", trainX[start:end]
                          )  #;print("trainY[start:end]:",trainY[start:end])
                feed_dict = {
                    textCNN.input_x: trainX[start:end],
                    textCNN.dropout_keep_prob: 0.5
                }
                if not FLAGS.multi_label_flag:
                    feed_dict[textCNN.input_y] = trainY[start:end]
                else:
                    feed_dict[textCNN.input_y_multilabel] = trainY[start:end]
                curr_loss, curr_acc, _ = sess.run(
                    [textCNN.loss_val, textCNN.accuracy, textCNN.train_op],
                    feed_dict)  #curr_acc--->TextCNN.accuracy
                loss, counter, acc = loss + curr_loss, counter + 1, acc + curr_acc
                if counter % 50 == 0:
                    print(
                        "Epoch %d\tBatch %d\tTrain Loss:%.3f\tTrain Accuracy:%.3f"
                        % (epoch, counter, loss / float(counter),
                           acc / float(counter))
                    )  #tTrain Accuracy:%.3f---》acc/float(counter)

            #epoch increment
            print("going to increment epoch counter....")
            sess.run(textCNN.epoch_increment)

            # 4.validation
            print(epoch, FLAGS.validate_every,
                  (epoch % FLAGS.validate_every == 0))
            if epoch % FLAGS.validate_every == 0:
                eval_loss, eval_acc = do_eval(sess, textCNN, testX, testY,
                                              batch_size,
                                              vocabulary_index2word_label)
                print(
                    "Epoch %d Validation Loss:%.3f\tValidation Accuracy: %.3f"
                    % (epoch, eval_loss, eval_acc))
                #save model to checkpoint
                save_path = FLAGS.ckpt_dir + "model.ckpt"
                saver.save(sess, save_path, global_step=epoch)

        # 5.最后在测试集上做测试,并报告测试准确率 Test
        test_loss, test_acc = do_eval(sess, textCNN, testX, testY, batch_size,
                                      vocabulary_index2word_label)
    pass
コード例 #5
0
def main(_):
    # 1.load data with vocabulary of words and labels
    vocabulary_word2index, vocabulary_index2word = create_voabulary(word2vec_model_path=FLAGS.word2vec_model_path,name_scope="dynamic_memory_network")
    vocab_size = len(vocabulary_word2index)
    vocabulary_word2index_label, vocabulary_index2word_label = create_voabulary_label(name_scope="dynamic_memory_network")
    questionid_question_lists=load_final_test_data(FLAGS.predict_source_file)
    test= load_data_predict(vocabulary_word2index,vocabulary_word2index_label,questionid_question_lists)
    testX=[]
    question_id_list=[]
    for tuple in test:
        question_id,question_string_list=tuple
        question_id_list.append(question_id)
        testX.append(question_string_list)
    # 2.Data preprocessing: Sequence padding
    print("start padding....")
    testX2 = pad_sequences(testX, maxlen=FLAGS.sequence_length, value=0.)  # padding to max length
    testX2_cnn = pad_sequences(testX, maxlen=FLAGS.sentence_len, value=0.)  # padding to max length, for CNN
    print("end padding...")
   # 3.create session.
    config=tf.ConfigProto()
    config.gpu_options.allow_growth=True
    graph1 = tf.Graph().as_default()
    graph2 = tf.Graph().as_default()
    graph3 = tf.Graph().as_default()
    graph4 = tf.Graph().as_default()
    graph5 = tf.Graph().as_default()
    global sess_dmn
    global sess_entity
    global sess_cnn
    global sess_rcnn
    with graph1:#DynamicMemoryNetwork
        sess_dmn = tf.Session(config=config)
        model_dmn = DynamicMemoryNetwork(FLAGS.num_classes, FLAGS.learning_rate, FLAGS.batch_size, FLAGS.decay_steps, FLAGS.decay_rate, FLAGS.sequence_length,
                                     FLAGS.story_length,vocab_size, FLAGS.embed_size, FLAGS.hidden_size, FLAGS.is_training,num_pass=FLAGS.num_pass,
                                     use_gated_gru=FLAGS.use_gated_gru,decode_with_sequences=FLAGS.decode_with_sequences,multi_label_flag=FLAGS.multi_label_flag,l2_lambda=FLAGS.l2_lambda)
        saver_dmn = tf.train.Saver()
        if os.path.exists(FLAGS.ckpt_dir_dmn + "checkpoint"):
            print("Restoring Variables from Checkpoint of DMN.")
            saver_dmn.restore(sess_dmn, tf.train.latest_checkpoint(FLAGS.ckpt_dir_dmn))
        else:
            print("Can't find the checkpoint.going to stop.DMN")
            return
    with graph2:#EntityNet
        sess_entity = tf.Session(config=config)
        model_entity = EntityNetwork(FLAGS.num_classes, FLAGS.learning_rate, FLAGS.batch_size, FLAGS.decay_steps, FLAGS.decay_rate, FLAGS.sequence_length,
                              FLAGS.story_length,vocab_size, FLAGS.embed_size, FLAGS.hidden_size, FLAGS.is_training,
                              multi_label_flag=True, block_size=FLAGS.block_size,use_bi_lstm=FLAGS.use_bi_lstm)
        saver_entity = tf.train.Saver()
        if os.path.exists(FLAGS.ckpt_dir_entity + "checkpoint"):
            print("Restoring Variables from Checkpoint of EntityNet.")
            saver_entity.restore(sess_entity, tf.train.latest_checkpoint(FLAGS.ckpt_dir_entity))
        else:
            print("Can't find the checkpoint.going to stop.EntityNet.")
            return
    with graph3:#TextCNN
        sess_cnn=tf.Session(config=config)
        model_cnn = TextCNN(filter_sizes, FLAGS.num_filters, FLAGS.num_classes, FLAGS.learning_rate, FLAGS.batch_size,
                          FLAGS.decay_steps, FLAGS.decay_rate,FLAGS.sentence_len, vocab_size, FLAGS.embed_size, FLAGS.is_training)
        saver_cnn = tf.train.Saver()
        if os.path.exists(FLAGS.ckpt_dir_cnn + "checkpoint"):
            print("Restoring Variables from Checkpoint.TextCNN.")
            saver_cnn.restore(sess_cnn, tf.train.latest_checkpoint(FLAGS.ckpt_dir_cnn))
        else:
            print("Can't find the checkpoint.going to stop.TextCNN.")
            return
    with graph5:  #TextCNN_256embedding
        sess_cnn_256_embedding = tf.Session(config=config)
        model_cnn_256_embedding = TextCNN(filter_sizes_256_embedding, FLAGS.num_filters_256_embedding, FLAGS.num_classes, FLAGS.learning_rate,
                                FLAGS.batch_size,FLAGS.decay_steps, FLAGS.decay_rate, FLAGS.sentence_len, vocab_size,
                                FLAGS.embed_size_256_embedding, FLAGS.is_training)
        saver_cnn_256_embedding = tf.train.Saver()
        if os.path.exists(FLAGS.ckpt_dir_cnn_256_embedding + "checkpoint"):
            print("Restoring Variables from Checkpoint.TextCNN_256_embedding")
            saver_cnn_256_embedding.restore(sess_cnn_256_embedding, tf.train.latest_checkpoint(FLAGS.ckpt_dir_cnn_256_embedding))
        else:
            print("Can't find the checkpoint.going to stop.TextCNN_256_embedding.")
            return
    #with graph4:#RCNN
    #    sess_rcnn=tf.Session(config=config)
    #    model_rcnn=TextRCNN(FLAGS.num_classes, FLAGS.learning_rate, FLAGS.decay_steps, FLAGS.decay_rate,FLAGS.sentence_len,
    #            vocab_size,FLAGS.embed_size,FLAGS.is_training,FLAGS.batch_size,multi_label_flag=FLAGS.multi_label_flag)
    #    saver_rcnn = tf.train.Saver()
    #    if os.path.exists(FLAGS.ckpt_dir_rcnn + "checkpoint"):
    #        print("Restoring Variables from Checkpoint.TextRCNN.")
    #        saver_rcnn.restore(sess_rcnn, tf.train.latest_checkpoint(FLAGS.ckpt_dir_rcnn))
    #    else:
    #        print("Can't find the checkpoint.going to stop.TextRCNN.")
    #        return

        # 5.feed data, to get logits
        number_of_training_data=len(testX2);print("number_of_training_data:",number_of_training_data)
        index=0
        predict_target_file_f = codecs.open(FLAGS.predict_target_file, 'a', 'utf8')
        global sess_dmn
        global sess_entity
        for start, end in zip(range(0, number_of_training_data, FLAGS.batch_size),range(FLAGS.batch_size, number_of_training_data+1, FLAGS.batch_size)):
            #1.DMN
            logits_dmn=sess_dmn.run(model_dmn.logits,feed_dict={model_dmn.query:testX2[start:end],model_dmn.story: np.expand_dims(testX2[start:end],axis=1),
                                                        model_dmn.dropout_keep_prob:1.0})
            #2.EntityNet
            logits_entity=sess_entity.run(model_entity.logits,feed_dict={model_entity.query:testX2[start:end],model_entity.story: np.expand_dims(testX2[start:end],axis=1),
                                                        model_entity.dropout_keep_prob:1.0})
            #3.CNN
            logits_cnn = sess_cnn.run(model_cnn.logits,feed_dict={model_cnn.input_x: testX2_cnn[start:end], model_cnn.dropout_keep_prob: 1})
            #4.RCNN
            #logits_rcnn = sess_rcnn.run(model_rcnn.logits, feed_dict={model_rcnn.input_x: testX2_cnn[start:end],model_rcnn.dropout_keep_prob: 1})  # 'shape of logits:', ( 1, 1999)
            #5.CN_256_original_embeddding
            logits_cnn_256_embedding =sess_cnn_256_embedding.run(model_cnn_256_embedding.logits,feed_dict={model_cnn_256_embedding.input_x: testX2_cnn[start:end],
                                                                 model_cnn_256_embedding.dropout_keep_prob: 1})
            #how to combine to logits: average
            logits=logits_cnn*0.3+logits_cnn_256_embedding*0.3+logits_entity*0.2+logits_dmn*0.2#+logits_rcnn*0.15
            question_id_sublist=question_id_list[start:end]
            get_label_using_logits_batch(question_id_sublist, logits, vocabulary_index2word_label, predict_target_file_f)
            index=index+1
        predict_target_file_f.close()
コード例 #6
0
def main(_):
    training_data_path = '/Users/liyangyang/Downloads/bdci/train.txt'
    vocabulary_word2index, vocabulary_index2word, vocabulary_label2index, vocabulary_index2label = \
        data_util.create_vocabulary(training_data_path, 17259, name_scope='cnn')
    vocab_size = len(vocabulary_word2index) + 1
    print("cnn_model.vocab_size:", vocab_size)
    num_classes = len(vocabulary_index2label)
    print("num_classes:", num_classes)
    print(vocabulary_index2label)
    train, test = data_util.load_data_multilabel(training_data_path,
                                                 vocabulary_word2index,
                                                 vocabulary_label2index, 200)
    trainX, trainY = train
    testX, testY = test
    # trainX = trainX[0:8000]
    # trainY = trainY[0:8000]
    # testX = testX[0:500]
    # testY = testY[0:500]
    # print some message for debug purpose
    print("length of training data:", len(trainX),
          ";length of validation data:", len(testX))
    print("trainX.shape", np.array(trainX).shape)
    print("trainY.shape", np.array(trainY).shape)
    print("trainX[0]:", trainX[1])
    print("trainY[0]:", trainY[1])

    print("end padding & transform to one hot...")

    # 2.create session.
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        # Instantiate Model
        textCNN = TextCNN(filter_sizes, FLAGS.num_filters, FLAGS.num_classes,
                          FLAGS.learning_rate, FLAGS.batch_size,
                          FLAGS.decay_steps, FLAGS.decay_rate,
                          FLAGS.sentence_len, vocab_size, FLAGS.embed_size,
                          FLAGS.is_training)
        # Initialize Save
        saver = tf.train.Saver()
        if os.path.exists(FLAGS.ckpt_dir + "checkpoint"):
            print("Restoring Variables from Checkpoint.")
            saver.restore(sess, tf.train.latest_checkpoint(FLAGS.ckpt_dir))
            # for i in range(3): #decay learning rate if necessary.
            #    print(i,"Going to decay learning rate by half.")
            #    sess.run(textCNN.learning_rate_decay_half_op)
        else:
            print('Initializing Variables')
            sess.run(tf.global_variables_initializer())
            if FLAGS.use_embedding:  # load pre-trained word embedding
                assign_pretrained_word_embedding(sess, vocabulary_index2word,
                                                 vocab_size, textCNN)
        curr_epoch = sess.run(textCNN.epoch_step)
        # 3.feed data & training
        number_of_training_data = len(trainX)
        batch_size = FLAGS.batch_size
        iteration = 0
        for epoch in range(curr_epoch, FLAGS.num_epochs):
            loss, acc, counter = 0.0, 0.0, 0
            for start, end in zip(
                    range(0, number_of_training_data, batch_size),
                    range(batch_size, number_of_training_data, batch_size)):
                iteration = iteration + 1
                if epoch == 0 and counter == 0:
                    print("trainX[start:end]:", trainX[start:end])
                    print("trainY[start:end]:", trainY[start:end])
                feed_dict = {
                    textCNN.input_x: trainX[start:end],
                    textCNN.dropout_keep_prob: 0.5,
                    textCNN.iter: iteration,
                    textCNN.tst: not FLAGS.is_training
                }
                if not FLAGS.multi_label_flag:
                    feed_dict[textCNN.input_y] = trainY[start:end]
                else:
                    feed_dict[textCNN.input_y_multilabel] = trainY[start:end]
                curr_loss, lr, curr_acc, _ = sess.run([
                    textCNN.loss_val, textCNN.learning_rate, textCNN.accuracy,
                    textCNN.train_op
                ], feed_dict)
                loss, counter, acc = loss + curr_loss, counter + 1, acc + curr_acc
                if counter % 2 == 0:
                    print(
                        "Epoch %d\tBatch %d\tTrain Loss:%.3f\tLearning rate:%.5f\tTrain Accuracy:%.3f"
                        % (epoch, counter, loss / float(counter), lr,
                           acc / float(counter)))

                ########################################################################################################
                # if start % (2000 * FLAGS.batch_size) == 0:  # eval every 3000 steps.
                #     eval_loss, f1_score, precision, recall = do_eval(sess, textCNN, testX, testY, iteration)
                #     print("Epoch %d Validation Loss:%.3f\tF1 Score:%.3f\tPrecision:%.3f\tRecall:%.3f" % (
                #         epoch, eval_loss, f1_score, precision, recall))
                #     # save model to checkpoint
                #     save_path = FLAGS.ckpt_dir + "model.ckpt"
                #     saver.save(sess, save_path, global_step=epoch)
                ########################################################################################################
            # epoch increment
            print("going to increment epoch counter....")
            sess.run(textCNN.epoch_increment)

            # 4.validation
            print(epoch, FLAGS.validate_every,
                  (epoch % FLAGS.validate_every == 0))
            if epoch % FLAGS.validate_every == 0:
                # save model to checkpoint
                save_path = FLAGS.ckpt_dir + "model.ckpt"
                saver.save(sess, save_path, global_step=epoch)

                eval_loss, eval_acc = do_eval(sess, textCNN, testX, testY,
                                              iteration, batch_size)
                print(
                    "Epoch %d Validation Loss:%.3f\tValidation Accuracy: %.3f"
                    % (epoch, eval_loss, eval_acc))

        # 5.最后在测试集上做测试,并报告测试准确率 Test
        eval_loss, eval_acc = do_eval(sess, textCNN, testX, testY, iteration,
                                      batch_size)
        print("Test Loss:%.3f" % (eval_loss))
    pass
コード例 #7
0
for tuple in test:
    question_id, question_string_list = tuple
    question_id_list.append(question_id)
    testX.append(question_string_list)
# 2.Data preprocessing: Sequence padding
print("start padding....")
testX2 = pad_sequences(testX, maxlen=FLAGS.sentence_len,
                       value=0.)  # padding to max length
print("end padding...")
# 3.create session.
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
# 4.Instantiate Model
textCNN = TextCNN(filter_sizes, FLAGS.num_filters, FLAGS.num_classes,
                  FLAGS.learning_rate, FLAGS.batch_size, FLAGS.decay_steps,
                  FLAGS.decay_rate, FLAGS.sentence_len, vocab_size,
                  FLAGS.embed_size, FLAGS.is_training)
saver = tf.train.Saver()
if os.path.exists(FLAGS.ckpt_dir + "checkpoint"):
    print("Restoring Variables from Checkpoint")
    saver.restore(sess, tf.train.latest_checkpoint(FLAGS.ckpt_dir))
else:
    print("Can't find the checkpoint.going to stop")
    #return
# 5.feed data, to get logits
number_of_training_data = len(testX2)
print("number_of_training_data:", number_of_training_data)


#index = 0
#predict_target_file_f = codecs.open(FLAGS.predict_target_file, 'a', 'utf8')
コード例 #8
0
def main(_):
    if 1 == 1:
        f = h5py.File('../datautils/charData50.hdf5', 'r')
        X, Y = f['X'].value, f['Y'].value
        Y = Y - 1
        d = shelve.open('../datautils/charData50.data')
        Vocab, vocabulary_index2word, vocabulary_word2index = d['Vocab'], d[
            'id2c'], d['c2id']
        lentrain = int(len(X) * 0.9)
        trainX, trainY, testX, testY = X[0:lentrain], Y[0:lentrain], X[
            lentrain:], Y[lentrain:]
        vocab_size = len(vocabulary_word2index) + 1
        print("cnn_model.vocab_size:", vocab_size)

        print("trainX[0]:", trainX[0])  #;print("trainY[0]:", trainY[0])

    #2.create session.
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        #Instantiate Model
        textCNN = TextCNN(filter_sizes, FLAGS.num_filters, FLAGS.num_classes,
                          FLAGS.learning_rate, FLAGS.batch_size,
                          FLAGS.decay_steps, FLAGS.decay_rate,
                          FLAGS.sentence_len, vocab_size, FLAGS.embed_size,
                          FLAGS.is_training)
        #Initialize Save
        saver = tf.train.Saver()
        if os.path.exists(FLAGS.ckpt_dir + "checkpoint") and 0:
            print("Restoring Variables from Checkpoint")
            saver.restore(sess, tf.train.latest_checkpoint(FLAGS.ckpt_dir))
        else:
            print('Initializing Variables')
            sess.run(tf.global_variables_initializer())
            if FLAGS.use_embedding:  #load pre-trained word embedding
                assign_pretrained_word_embedding(
                    sess,
                    vocabulary_index2word,
                    vocab_size,
                    textCNN,
                    word2vec_model_path=FLAGS.word2vec_model_path)
        curr_epoch = sess.run(textCNN.epoch_step)
        #3.feed data & training
        number_of_training_data = len(trainX)
        batch_size = FLAGS.batch_size
        for epoch in range(curr_epoch, FLAGS.num_epochs):
            print('epoch', epoch)
            loss, acc, counter = 0.0, 0.0, 0
            for start, end in zip(
                    range(0, number_of_training_data, batch_size),
                    range(batch_size, number_of_training_data, batch_size)):
                if epoch == 0 and counter == 0:
                    print("trainX[start:end]:", trainX[start:end]
                          )  #;print("trainY[start:end]:",trainY[start:end])
                feed_dict = {
                    textCNN.input_x: trainX[start:end],
                    textCNN.dropout_keep_prob: 0.5
                }
                feed_dict[textCNN.input_y] = trainY[start:end]
                curr_loss, curr_acc, _ = sess.run(
                    [textCNN.loss_val, textCNN.accuracy, textCNN.train_op],
                    feed_dict)  #curr_acc--->TextCNN.accuracy
                loss, counter, acc = loss + curr_loss, counter + 1, acc + curr_acc
                if counter % 50 == 0:
                    print(
                        "Epoch %d\tBatch %d\tTrain Loss:%.3f\tTrain Accuracy:%.3f"
                        % (epoch, counter, loss / float(counter),
                           acc / float(counter))
                    )  #tTrain Accuracy:%.3f---》acc/float(counter)

            #epoch increment
            print("going to increment epoch counter....")
            sess.run(textCNN.epoch_increment)

            # 4.validation
            print(epoch, FLAGS.validate_every,
                  (epoch % FLAGS.validate_every == 0))
            if epoch % FLAGS.validate_every == 0:
                eval_loss, eval_acc = do_eval(sess, textCNN, testX, testY,
                                              batch_size)
                print(
                    "Epoch %d Validation Loss:%.3f\tValidation Accuracy: %.3f"
                    % (epoch, eval_loss, eval_acc))
                #save model to checkpoint
                save_path = FLAGS.ckpt_dir + "model.ckpt"
                saver.save(sess, save_path, global_step=epoch)

        # 5.最后在测试集上做测试,并报告测试准确率 Test
        test_loss, test_acc = do_eval(sess, textCNN, testX, testY, batch_size)
    pass
コード例 #9
0
ファイル: p7_TextCNN_train.py プロジェクト: zyq11223/NLP-1
def main(_):
    trainX, trainY, testX, testY = None, None, None, None
    vocabulary_word2index, vocabulary_index2word, vocabulary_label2index, vocabulary_index2label = create_vocabulary(
        FLAGS.traning_data_path, FLAGS.vocab_size, name_scope=FLAGS.name_scope)
    vocab_size = len(vocabulary_word2index)
    print("cnn_model.vocab_size:", vocab_size)
    num_classes = len(vocabulary_index2label)
    train, test = load_data_multilabel(FLAGS.traning_data_path,
                                       vocabulary_word2index,
                                       vocabulary_label2index,
                                       FLAGS.sentence_len)
    trainX, trainY = train
    testX, testY = test
    #print some message for debug purpose
    print("length of training data:", len(trainX),
          ";length of validation data:", len(testX))
    print("trainX[0]:", trainX[0])
    print("trainY[0]:", trainY[0])
    train_y_short = get_target_label_short(trainY[0])
    print("train_y_short:", train_y_short)

    #2.create session.
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        #Instantiate Model
        textCNN = TextCNN(filter_sizes,
                          FLAGS.num_filters,
                          num_classes,
                          FLAGS.learning_rate,
                          FLAGS.batch_size,
                          FLAGS.decay_steps,
                          FLAGS.decay_rate,
                          FLAGS.sentence_len,
                          vocab_size,
                          FLAGS.embed_size,
                          FLAGS.is_training,
                          multi_label_flag=FLAGS.multi_label_flag)
        #Initialize Save
        saver = tf.train.Saver()
        if os.path.exists(FLAGS.ckpt_dir + "checkpoint"):
            print("Restoring Variables from Checkpoint")
            saver.restore(sess, tf.train.latest_checkpoint(FLAGS.ckpt_dir))
        else:
            print('Initializing Variables')
            sess.run(tf.global_variables_initializer())
            if FLAGS.use_embedding:  #load pre-trained word embedding
                assign_pretrained_word_embedding(sess, vocabulary_index2word,
                                                 vocab_size, textCNN,
                                                 FLAGS.word2vec_model_path)
        curr_epoch = sess.run(textCNN.epoch_step)
        #3.feed data & training
        number_of_training_data = len(trainX)
        batch_size = FLAGS.batch_size
        for epoch in range(curr_epoch, FLAGS.num_epochs):
            loss, counter = 0.0, 0
            for start, end in zip(
                    range(0, number_of_training_data, batch_size),
                    range(batch_size, number_of_training_data, batch_size)):
                if epoch == 0 and counter == 0:
                    print("trainX[start:end]:", trainX[start:end])
                feed_dict = {
                    textCNN.input_x: trainX[start:end],
                    textCNN.dropout_keep_prob: 0.5
                }
                if not FLAGS.multi_label_flag:
                    feed_dict[textCNN.input_y] = trainY[start:end]
                else:
                    feed_dict[textCNN.input_y_multilabel] = trainY[start:end]
                curr_loss, lr, _ = sess.run([
                    textCNN.loss_val, textCNN.learning_rate, textCNN.train_op
                ], feed_dict)
                loss, counter = loss + curr_loss, counter + 1
                if counter % 50 == 0:
                    print(
                        "Epoch %d\tBatch %d\tTrain Loss:%.3f\tLearning rate:%.5f"
                        % (epoch, counter, loss / float(counter), lr))

            #epoch increment
            print("going to increment epoch counter....")
            sess.run(textCNN.epoch_increment)

            # 4.validation
            print(epoch, FLAGS.validate_every,
                  (epoch % FLAGS.validate_every == 0))
            if epoch % FLAGS.validate_every == 0:
                eval_loss, f1_score, precision, recall = do_eval(
                    sess, textCNN, testX, testY)
                print(
                    "Epoch %d Validation Loss:%.3f\tF1 Score:%.3f\tPrecision:%.3f\tRecall:%.3f"
                    % (epoch, eval_loss, f1_score, precision, recall))
                #save model to checkpoint
                save_path = FLAGS.ckpt_dir + "model.ckpt"
                saver.save(sess, save_path, global_step=epoch)

        # 5.最后在测试集上做测试,并报告测试准确率 Test
        test_loss, _, _, _ = do_eval(sess, textCNN, testX, testY)
        print("Test Loss:%.3f" % (test_loss))
    pass