Пример #1
0
def get_batch_data(mode='train'):
    '''Makes batch queues from the data.
    
    Args:
      mode: A string. Either 'train', 'val', or 'test' 
    Returns:
      A Tuple of X_batch (Tensor), Y_batch (Tensor), and number of batches (int).
      X_batch and Y_batch have of the shape [batch_size, maxlen].
    '''
    # Load data
    X, Y = load_data(mode)

    # Create Queues
    input_queues = tf.train.slice_input_producer(
        [tf.convert_to_tensor(X, tf.int32),
         tf.convert_to_tensor(Y, tf.int32)])

    # create batch queues
    X_batch, Y_batch = tf.train.shuffle_batch(
        input_queues,
        num_threads=8,
        batch_size=Hyperparams.batch_size,
        capacity=Hyperparams.batch_size * 64,
        min_after_dequeue=Hyperparams.batch_size * 32,
        allow_smaller_final_batch=False)
    # calc total batch count
    num_batch = len(X) // Hyperparams.batch_size

    return X_batch, Y_batch, num_batch
Пример #2
0
def main():
    testing_file = "./new_data/test.ann.json"
    trained_model = "./checkpoints/model.ckpt"
    embedding_file = "D:/DataMining/QASystem/wiki/wiki.zh.text.vector"
    # embedding_file = "./wiki.zh.text.vector"
    embedding_size = 60  # Word embedding dimension
    batch_size = 64  # Batch data size
    sequence_length = 150  # Sentence length
    learning_rate = 0.01
    gpu_mem_usage = 0.75
    gpu_device = "/gpu:0"
    cpu_device = "/cpu:0"

    config = get_config()  # Not used yet
    embeddings, word2idx = load_embedding(embedding_file)
    questions, evidences, y1, y2 = load_data(testing_file, word2idx,
                                             sequence_length)
    with tf.Graph().as_default(), tf.device(gpu_device):
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=gpu_mem_usage)
        session_conf = tf.ConfigProto(allow_soft_placement=True,
                                      gpu_options=gpu_options)
        with tf.variable_scope('Model'):
            model = DGCNN(config, embeddings, sequence_length, embedding_size)
            with tf.Session(config=session_conf).as_default() as sess:
                saver = tf.train.Saver()
                print("Start loading the model")
                saver.restore(sess, trained_model)
                print("The model is loaded")
                acc1, acc2 = [], []
                for batch_questions, batch_evidences, batch_y1, batch_y2 in next_batch(
                        questions, evidences, y1, y2, batch_size):
                    feed_dict = {
                        model.e: batch_evidences,
                        model.q: batch_questions,
                        model.y1: batch_y1,
                        model.y2: batch_y2,
                        model.is_train: False
                    }
                    acc1_, acc2_ = sess.run([model.acc1, model.acc2],
                                            feed_dict)
                    acc1.append(acc1_)
                    acc2.append(acc2_)
                    print('Acc1 %2.3f\tAcc2 %2.3f' % (acc1_, acc2_))
                print('Average: Acc1 %2.3f\tAcc2 %2.3f' %
                      (np.mean(acc1), np.mean(acc2)))
Пример #3
0
def main():
    graph = ModelGraph("test")

    with tf.Session() as sess:
        tf.sg_init(sess)

        # restore parameters
        saver = tf.train.Saver()
        saver.restore(sess, tf.train.latest_checkpoint('asset/train/ckpt'))

        X, Y = load_data("test")
        idx2chr = load_charmaps()[0]

        with codecs.open('results.txt', 'w', 'utf-8') as fout:
            results = []
            for step in range(len(X) // Hyperparams.batch_size - 1):
                X_batch = X[step * Hyperparams.batch_size:(step + 1) *
                            Hyperparams.batch_size]
                Y_batch = Y[step * Hyperparams.batch_size:(step + 1) *
                            Hyperparams.batch_size]

                # predict characters
                logits = sess.run(graph.logits, {graph.X_batch: X_batch})
                preds = np.squeeze(np.argmax(logits, -1))

                for x, y, p in zip(X_batch, Y_batch, preds):  # sentence-wise
                    ground_truth = ''
                    predicted = ''
                    for xx, yy, pp in zip(x, y, p):  # character-wise
                        if xx == 0: break
                        else:
                            predicted += idx2chr.get(xx, "*")
                            ground_truth += idx2chr.get(xx, "*")
                        if pp == 1: predicted += " "
                        if yy == 1: ground_truth += " "

                        if pp == yy: results.append(1)
                        else: results.append(0)

                    fout.write(u"▌Expected: " + ground_truth + "\n")
                    fout.write(u"▌Got: " + predicted + "\n\n")
            fout.write(u"Final Accuracy = %d/%d=%.2f" %
                       (sum(results), len(results),
                        float(sum(results)) / len(results)))
Пример #4
0
def get_batch_data():
    '''Makes batch queues from the data.

    Returns:
      A Tuple of x (Tensor), y (Tensor).
      x and y have the shape [batch_size, maxlen].
    '''
    # Load data
    X, Y = load_data()
    
    # Create Queues
    input_queues = tf.train.slice_input_producer([tf.convert_to_tensor(X, tf.int32), 
                                                  tf.convert_to_tensor(Y, tf.int32)])

    # create batch queues
    x, y = tf.train.shuffle_batch(input_queues,
                                  num_threads=8,
                                  batch_size=Hyperparams.batch_size, 
                                  capacity=Hyperparams.batch_size*64,
                                  min_after_dequeue=Hyperparams.batch_size*32, 
                                  allow_smaller_final_batch=False) 
    
    return x, y # (16, 100), (16, 100)
Пример #5
0
def main():
    # testing_file = "D:/DataMining/QASystem/new_data/test.ann.json"
    testing_file = "D:/DataMining/QASystem/new_data/validation.ann.json"
    # testing_file = "D:/DataMining/QASystem/new_data/training.json"
    trained_model = "checkpoints/model.ckpt"
    embedding_file = "D:/DataMining/QASystem/wiki/wiki.zh.text.vector"
    embedding_size = 60  # word embedding维度
    hidden_size = 100  # 隐藏层神经元数量
    keep_prob = 1  # 0.8
    batch_size = 60  # 分批数据大小

    max_quelen, max_evilen = get_max_length(testing_file)
    embeddings, word2idx = load_embedding(embedding_file)
    questions, evidences, y1, y2 = load_data(testing_file, word2idx,
                                             max_quelen, max_evilen)
    with tf.Graph().as_default():
        with tf.variable_scope('Model'):
            model = BiDAF(embeddings, max_quelen, max_evilen, embedding_size,
                          hidden_size, keep_prob)
            with tf.Session().as_default() as sess:
                saver = tf.train.Saver()
                print("开始加载模型")
                saver.restore(sess, trained_model)
                print("加载模型完毕")
                # sess.run(tf.global_variables_initializer()) 前面已经使用restore恢复变量了,如果再使用global_variables_initializer,会导致所有学习到的东西清零
                for batch_questions, batch_evidences, batch_y1, batch_y2 in next_batch(
                        questions, evidences, y1, y2, batch_size):
                    feed_dict = {
                        model.x: batch_evidences,
                        model.q: batch_questions,
                        model.y1: batch_y1,
                        model.y2: batch_y2
                    }
                    acc_s, acc_e = sess.run([model.acc_s, model.acc_e],
                                            feed_dict)
                    print('ACC_S: %s\t\tACC_E: %s' % (acc_s, acc_e))
Пример #6
0
def main():
    
    f = open('results.txt', 'w')

    f.write("Preprocessing data...\n\n")
    # pre-process data
    train_X, train_Y, train_idx, _, test_X, test_idx = load_data(config.data_path, config.test_path)
    names = list(train_X)
    types = train_X.dtypes
    floats = (types == np.float64)

    new_X_GAM, new_test_GAM = construct_features(train_X, train_Y, test_X, have_poly=False)
    
    # feature selection
    f.write("Feature Selection\n")
    ridge_scores, ridge_X, ridge_test, ridge_names = select_features(train_X, train_Y, test_X, config.ridge_select, config.ridge_feats)
    lasso_scores, lasso_X, lasso_test, lasso_names = select_features(train_X, train_Y, test_X, config.lasso_select, config.lasso_feats)
    knn_scores, knn_X, knn_test, knn_names = select_features(train_X, train_Y, test_X, config.knn_select, config.knn_feats)
    rf_scores, rf_X, rf_test, rf_names = select_features(train_X, train_Y, test_X, config.rf_select, config.rf_feats)
    est_scores, est_X, est_test, est_names = select_features(train_X, train_Y, test_X, config.est_select, config.est_feats)
    write_selection_results(f, 'Ridge Regression', config.ridge_feats, ridge_scores, ridge_names)
    write_selection_results(f, 'LASSO Regression', config.lasso_feats, lasso_scores, lasso_names)
    write_selection_results(f, 'K-Nearest Neighbours', config.knn_feats, knn_scores, knn_names)
    write_selection_results(f, 'Random Forest', config.rf_feats, rf_scores, rf_names)
    write_selection_results(f, 'Gradient Boosting', config.est_feats, est_scores, est_names)
    f.write('\n#######################################\n\n')

    # model selection
    f.write("Model Selection\n")
    ridge_scores = cross_valid(config.ridge_models, ridge_X, train_Y)
    lasso_scores = cross_valid(config.lasso_models, lasso_X, train_Y)
    knn_scores = cross_valid(config.knn_models, knn_X, train_Y)
    rf_scores = cross_valid(config.rf_models, rf_X, train_Y)
    est_scores = cross_valid(config.est_models, est_X, train_Y)
    write_model_results(f, 'Ridge Regression', config.ridge_models, ridge_scores)
    write_model_results(f, 'LASSO Regression', config.lasso_models, lasso_scores)
    write_model_results(f, 'K-Nearest Neighbours', config.knn_models, knn_scores)
    write_model_results(f, 'Random Forest', config.rf_models, rf_scores)
    write_model_results(f, 'Gradient Boosting', config.est_models, est_scores)
    f.write('\n#######################################\n\n')

    best_reg = config.lasso3
    best_tree = config.est3
    best_reg.fit(lasso_X, train_Y)
    predictions_reg = best_reg.predict(lasso_test)
    best_tree.fit(est_X, train_Y)
    predictions_tree = best_tree.predict(est_test)
    write_test_file(predictions_reg, test_idx, 'results_reg.csv')
    write_test_file(predsictions_tree, test_idx, 'results_tree.csv')

#    valid_X = new_X[:200]
    valid_Y = train_Y[:200] 
#    new_X = new_X[200:]
    train1_Y = train_Y[200:]

#    est.fit(new_X, train_Y)
#    preds = est.predict(new_test)
    err = []
    for i in range(90, 100, 10):
        sel = SelectPercentile(mutual_info_regression, percentile=i)
        new1_X = sel.fit_transform(new_X, train_Y)
        valid_X = new1_X[:200]
 
        train_X = new1_X[200:]

        est.fit(train_X, train1_Y)
        predictions = est.predict(valid_X)
#    preds = np.exp(predictions)
#    print(predictions)
#    print(preds)
#    write_test_file(preds, test_idx)
        err.append(np.sqrt(mean_squared_error(valid_Y, predictions)))
        print(explained_variance_score(valid_Y, predictions))
        print(r2_score(valid_Y, predictions))
        plt.scatter(valid_Y, predictions)
        x = [10.5, 11, 11.5, 12, 12.5, 13, 13.5]
        y = [10.5, 11, 11.5, 12, 12.5, 13, 13.5]
        plt.plot(x,y,'--')
        plt.ylabel("Predictions")
        plt.xlabel("Actual Y-values")
        plt.show()
#    plt.plot([10,20,30,40,50,60,70,80,90],err)
#    plt.xlabel("Percentage of Feature")
#    plt.ylabel("Validation MSE")
#    plt.show()
#    preds = np.exp(preds)
#    write_test_file(preds, test_idx)
#    new2_X = rfe2.fit_transform(new_X, train_Y)
#    print(new2_X.shape)
#    new3_X = rfe3.fit_transform(new_X, train_Y)
#    print(new3_X.shape)
#    new4_X = rfe4.fit_transform(new_X, train_Y)
#    print(new4_X.shape)
#    new5_X = rfe5.fit_transform(new_X, train_Y)
#    print(new5_X.shape)
#    new2_X = rfe2.fit_transform(new_X, train_Y)
#    new1_X = rfe1.fit_transform(new_X, train_Y)
#    new2_X = rfe2.fit_transform(new_X, train_Y)
#    new3_X = rfe3.fit_transform(new_X, train_Y)
#    new4_X = rfe4.fit_transform(new_X, train_Y)
#    pca1.fit(train_X, train_Y)
#    sel3.fit(train_X, train_Y)
#    new4_X = pca2.fit_transform(new_X, train_Y)

#    names1 = [new_names[i] for i in np.where(rfe1.support_ == True)[0]]
#    names2 = [new_names[i] for i in np.where(rfe2.support_ == True)[0]]

#    scores1 = cross_valid(models, new_X, train_Y)
#    scores2 = cross_valid([lasso2], new2_X, train_Y)
#    scores3 = cross_valid([lasso3], new3_X, train_Y)
#    scores4 = cross_valid([lasso4], new4_X, train_Y)
#    scores5 = cross_valid([lasso5], new5_X, train_Y)
#    scores5 = cross_valid(models, new3_X, train_Y)
#    scores5 = cross_valid(models, new3_X, train_Y)
#    scores6 = cross_valid(models, new4_X, train_Y)
#    print(sel_names)
#    print(new1_X.shape)
#    print(new2_X.shape)
#    print(new_X.shape)
#    print(scores1)
#    print(scores2)
#    print(scores3)
#    print(scores4)
#    print(scores5)
#    valid_X = new_X[:200]
    valid_Y = train_Y[:200] 
#    train_X = new_X[200:]
    train_Y = train_Y[200:]
#    new_train = sel3.transform(train_X)
#    new_valid = sel3.transform(valid_X)
#    print(new_valid.shape)
    err = []
    for i in range(80, 90, 10):
        pca = PCA(n_components=i)
        new1_X = pca.fit_transform(new_X, train_Y)
        valid_X = new1_X[:200]
 
        train_X = new1_X[200:]

        gam = LinearGAM(n_splines=8).gridsearch(train_X, train_Y)
        predictions = gam.predict(valid_X)
#    preds = np.exp(predictions)
#    print(predictions)
#    print(preds)
#    write_test_file(preds, test_idx)
        err.append(np.sqrt(mean_squared_error(valid_Y, predictions)))
        print(explained_variance_score(valid_Y, predictions))
        print(r2_score(valid_Y, predictions))
        plt.scatter(valid_Y, predictions)
        x = [10.5, 11, 11.5, 12, 12.5, 13, 13.5]
        y = [10.5, 11, 11.5, 12, 12.5, 13, 13.5]
        plt.plot(x,y,'--')
        plt.ylabel("Predictions")
        plt.xlabel("Actual Y-values")
        plt.show()
Пример #7
0
    trained_model = "checkpoints/model.ckpt"
    embedding_file = "D:/DataMining/QASystem/wiki/wiki.zh.text.vector"
    embedding_size = 60  # word embedding维度
    epochs = 30  # 20
    batch_size = 60  # 分批数据大小
    hidden_size = 100  # 隐藏层神经元数量
    keep_prob = 0.8  # 0.8
    learning_rate = 0.01  # 0.001
    lrdown_rate = 0.9  # 0.8
    gpu_mem_usage = 0.75
    gpu_device = "/gpu:0"
    cpu_device = "/cpu:0"

    max_quelen, max_evilen = get_max_length(training_file)
    embeddings, word2idx = load_embedding(embedding_file)
    questions, evidences, y1, y2 = load_data(
        training_file, word2idx, max_quelen, max_evilen)
    with tf.Graph().as_default(), tf.device(cpu_device):
        # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_mem_usage)
        # session_conf = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)
        with tf.variable_scope('Model'):
            model = BiDAF(embeddings, max_quelen, max_evilen, embedding_size, hidden_size, keep_prob)
            with tf.Session().as_default() as sess:  # config=session_conf
                saver = tf.train.Saver()
                print("开始训练")
                sess.run(tf.global_variables_initializer())
                for i in range(epochs):
                    print("正在进行第%s次迭代训练" % (i+1))
                    for batch_questions, batch_evidences, batch_y1, batch_y2 in next_batch(questions, evidences, y1, y2, batch_size):
                        feed_dict = {
                            model.x: batch_evidences,
                            model.q: batch_questions,
Пример #8
0
def main():
    training_file = "./new_data/training.json"
    validation_file = "./new_data/validation.ann.json"
    trained_model = "./checkpoints/model.ckpt"
    embedding_file = "D:/DataMining/QASystem/wiki/wiki.zh.text.vector"
    # embedding_file = "./wiki.zh.text.vector"
    embedding_size = 60  # Word embedding dimension
    epochs = 100
    batch_size = 64  # Batch data size
    sequence_length = 150  # Sentence length
    learning_rate = 0.0001
    lrdown_rate = 1
    gpu_mem_usage = 0.75
    gpu_device = "/gpu:0"
    cpu_device = "/cpu:0"

    config = get_config()  # Not used yet
    embeddings, word2idx = load_embedding(embedding_file)
    questions, evidences, y1, y2 = load_data(training_file, word2idx,
                                             sequence_length)
    questions_vali, evidences_vali, y1_vali, y2_vali = load_data(
        validation_file, word2idx, sequence_length)
    data_size = len(questions)
    permutation = np.random.permutation(data_size)
    questions = questions[permutation, :]
    evidences = evidences[permutation, :]
    y1 = y1[permutation]
    y2 = y2[permutation]
    with tf.Graph().as_default(), tf.device(gpu_device):
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=gpu_mem_usage)
        session_conf = tf.ConfigProto(allow_soft_placement=True,
                                      gpu_options=gpu_options)
        with tf.variable_scope('Model'):
            model = DGCNN(config, embeddings, sequence_length, embedding_size)
            with tf.Session(config=session_conf).as_default() as sess:
                saver = tf.train.Saver()
                print("Start training")
                sess.run(tf.global_variables_initializer())
                for i in range(epochs):
                    batch_number = 1
                    for batch_questions, batch_evidences, batch_y1, batch_y2 in next_batch(
                            questions, evidences, y1, y2, batch_size):
                        start_time = time.time()
                        feed_dict = {
                            model.e: batch_evidences,
                            model.q: batch_questions,
                            model.y1: batch_y1,
                            model.y2: batch_y2,
                            model.lr: learning_rate,
                            model.is_train: True
                        }
                        _, loss, acc1, acc2 = sess.run(
                            [model.train, model.loss, model.acc1, model.acc2],
                            feed_dict)
                        duration = time.time() - start_time
                        print(
                            'Epoch: [%d][%d/%d]\tTime %.3f\tLoss %2.3f\tAcc1 %2.3f\tAcc2 %2.3f'
                            % (i + 1, batch_number * batch_size, data_size,
                               duration, loss, acc1, acc2))
                        batch_number += 1
                    learning_rate *= lrdown_rate

                    # validation
                    start_time = time.time()
                    feed_dict = {
                        model.e: evidences_vali,
                        model.q: questions_vali,
                        model.y1: y1_vali,
                        model.y2: y2_vali,
                        model.is_train: False
                    }
                    loss, acc1, acc2 = sess.run(
                        [model.loss, model.acc1, model.acc2], feed_dict)
                    duration = time.time() - start_time
                    print(
                        'Validation: Time %.3f\tLoss %2.3f\tAcc1 %2.3f\tAcc2 %2.3f'
                        % (duration, loss, acc1, acc2))

                    saver.save(sess, trained_model)

                print("End of the training")
Пример #9
0
    trained_model = "save/model.ckpt"
    embedding_file = "D:/DataMining/QASystem/wiki/wiki.zh.text.vector"
    embedding_size = 60  # Word embedding dimension
    epochs = 30
    batch_size = 128  # Batch data size
    hidden_size = 100  # Number of hidden layer neurons
    time_step = 100  # Sentence length
    keep_prob = 0.8
    learning_rate = 0.01
    lrdown_rate = 0.9
    gpu_mem_usage = 0.75
    gpu_device = "/gpu:0"

    time_step = get_max_length(training_file)
    embeddings, word2idx = load_embedding(embedding_file)
    questions, evidences, y1, y2 = load_data(training_file, word2idx,
                                             time_step)
    with tf.Graph().as_default(), tf.device(gpu_device):
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=gpu_mem_usage)
        session_conf = tf.ConfigProto(allow_soft_placement=True,
                                      gpu_options=gpu_options)
        with tf.variable_scope('Model'):
            model = DGCNN(embeddings, time_step, embedding_size, hidden_size)
            with tf.Session(config=session_conf).as_default() as sess:
                saver = tf.train.Saver()
                print("Start training")
                sess.run(tf.global_variables_initializer())
                for i in range(epochs):
                    print("The training of the %s iteration is underway" %
                          (i + 1))
                    for batch_questions, batch_evidences, batch_y1, batch_y2 in next_batch(