Exemplo n.º 1
0
def main():
    # Load Data
    data = np.loadtxt("zipcombo.dat")
    X, y = data_split(data, y_col=0)

    # Investigate effect of k values
    k_list = [1, 3, 7, 9, 27]
    xval_k_val = validate_k(X, y, k_list)
    np.savetxt('knn_variable_selection.csv',
               xval_k_val,
               delimiter=',',
               fmt='%10.20f')
    # Solve quesiton 1
    q1_errors = q1_regularised(X, y, k_list)
    np.savetxt('knn_q1.csv', q1_errors, delimiter=',', fmt='%10.20f')

    n = 20
    xval_k_stats, k_max_index = q2_regularised(X, y, n, k_list)
    np.savetxt('knn_variable_selection_n_{}.csv'.format(n),
               xval_k_stats,
               delimiter=',',
               fmt='%10.20f')
    np.savetxt('knn_k_max_index.csv'.format(n),
               k_max_index,
               delimiter=',',
               fmt='%10.20f')
Exemplo n.º 2
0
def main():
    # Load Data
    data = np.loadtxt("zipcombo.dat")
    X, y = data_split(data, y_col=0)
    gauss_c = list(range(-6, 3))
    gauss_c = [3**i for i in gauss_c]

    d_errors, full_errors = kernel_d_selection(VectorisedKernelPerceptron,
                                               X,
                                               y,
                                               k=5,
                                               d_vals=gauss_c,
                                               epochs=8)
    q1_train, q1_test = q1(VectorisedKernelPerceptron, X, y, gauss_c, 0.2, 10,
                           1, 20)
    np.savetxt('q1_train_errors_gauss.csv',
               q1_train,
               delimiter=',',
               fmt='%10.20f')
    np.savetxt('q1_test_errors_gauss.csv',
               q1_test,
               delimiter=',',
               fmt='%10.20f')

    d_prime_errors, confusion = d_hyperparameter_selection(
        VectorisedKernelPerceptron,
        X,
        y,
        gauss_c,
        k=5,
        epochs=10,
        seed=1,
        runs=20)
    np.savetxt('confusion_gauss.csv', confusion, delimiter=',', fmt='%i')
    np.savetxt('d_prime_errors_gauss.csv',
               d_prime_errors,
               delimiter=',',
               fmt='%10.20f')

    np.savetxt('d_errors_gauss.csv', d_errors, delimiter=',', fmt='%10.20f')
    np.savetxt('full_errors_gauss.csv',
               full_errors,
               delimiter=',',
               fmt='%10.20f')
Exemplo n.º 3
0
def main():
    # Load Data
    data = np.loadtxt("zipcombo.dat")
    X, y = data_split(data, y_col=0)
    poly_d = list(range(1, 8))

    d_errors, full_errors = kernel_d_selection(onevsonePerceptron,
                                               X,
                                               y,
                                               k=5,
                                               d_vals=list(range(1, 8)),
                                               epochs=10)
    q1_train, q1_test = q1(onevsonePerceptron,
                           X,
                           y,
                           poly_d,
                           percentage=0.2,
                           epochs=10,
                           seed=0,
                           runs=20)
    np.savetxt('q1_train_errors_1v1.csv',
               q1_train,
               delimiter=',',
               fmt='%10.20f')
    np.savetxt('q1_test_errors_1v1.csv', q1_test, delimiter=',', fmt='%10.20f')

    d_prime_errors = d_hyperparameter_selection(onevsonePerceptron,
                                                X,
                                                y,
                                                d_vals=poly_d,
                                                k=5,
                                                epochs=10,
                                                seed=0,
                                                runs=20)
    np.savetxt('d_prime_errors_1v1.csv',
               d_prime_errors,
               delimiter=',',
               fmt='%10.20f')

    np.savetxt('d_errors_1v1.csv', d_errors, delimiter=',', fmt='%10.20f')
    np.savetxt('full_errors_1v1.csv',
               full_errors,
               delimiter=',',
               fmt='%10.20f')
Exemplo n.º 4
0
    def train_model(self,
                    epochs=32,
                    val_ratio=0.1,
                    train_batch_size=256,
                    val_batch_size=None):
        # train_batch_size
        if val_batch_size is None:
            val_batch_size = 2 * train_batch_size
        x_train, y_train, x_val, y_val = data_split(self.data,
                                                    val_ratio=val_ratio)
        train_len = x_train.shape[0]
        steps_per_epoch = math.ceil(train_len / train_batch_size)

        input_x = self.inputParams['input_ids']
        input_x_mask = self.inputParams['input_mask']
        input_x_seg = self.inputParams['segment_ids']
        input_y = self.inputParams['input_labels']
        keep_prob = self.inputParams['keep_prob']
        # learning_rate   = self.inputParams['learning_rate']
        num_train_steps = self.inputParams['num_train_steps']

        loss = self.outputParams['loss']
        accuracy = self.outputParams['accuracy']
        train_op = self.outputParams['train_op']

        merged_summary = self.summaryParams['merged_summary']

        # 获取模型中所有的训练参数。
        tvars = tf.trainable_variables()
        # 加载BERT模型
        (assignment_map, initialized_variable_names
         ) = modeling.get_assignment_map_from_checkpoint(tvars, BERT_CKPT)
        tf.train.init_from_checkpoint(BERT_CKPT, assignment_map)

        gen_train_batch = gen_batch_data(x_train, y_train, train_batch_size)
        gen_val_batch = gen_batch_data(x_val, y_val, val_batch_size)

        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            max_acc = 0
            noChangeEpoches = 0
            save_saver = tf.train.Saver()
            lr = 0.01
            for epoch in range(epochs):
                print("epoch:{0}".format(epoch))
                noChangedSteps = 0
                for j in range(steps_per_epoch):
                    x_train_batch, y_train_batch = next(gen_train_batch)
                    # processor_x   返回的是list 构成的np.arry
                    x_train_batch = self.data.processor_x(x_train_batch)
                    y_train_batch = self.data.processor_y(y_train_batch)

                    input_ids_batch, input_mask_batch, segment_ids_batch \
                        = conver2Input(x_train_batch,max_seq_len=256)

                    fetches = [loss, accuracy, train_op]

                    feed_dict = {
                        input_x: input_ids_batch,
                        input_x_mask: input_mask_batch,
                        input_x_seg: segment_ids_batch,
                        input_y: y_train_batch,
                        keep_prob: 0.8,
                        # learning_rate:lr,
                        num_train_steps: epochs * steps_per_epoch,
                    }

                    loss_, accuracy_, _ = sess.run(fetches,
                                                   feed_dict=feed_dict)
                    print('当前批次: {}/{}/{} | 当前训练损失: {} | 当前训练准确率: {}'.format(
                        j,
                        steps_per_epoch,
                        epoch,
                        loss_,
                        accuracy_,
                    ))

                    if j % 100 == 0 or j == self.data.get_step() - 1:
                        x_val_batch, y_val_batch = next(gen_val_batch)
                        x_val_batch = self.data.processor_x(x_val_batch)
                        y_val_batch = self.data.processor_y(y_val_batch)
                        input_ids_val_batch, input_mask_val_batch, segment_ids_val_batch \
                            = conver2Input(x_val_batch, max_seq_len=256)

                        val_loss, val_acc = sess.run(
                            [loss, accuracy],
                            feed_dict={
                                input_x: input_ids_val_batch,
                                input_x_mask: input_mask_val_batch,
                                input_x_seg: segment_ids_val_batch,
                                input_y: y_val_batch,
                                keep_prob: 1,
                            })
                        print('当前批次: {}/{}/{} | 当前验证集损失: {} | 当前验证集准确率: {}'.
                              format(
                                  j,
                                  steps_per_epoch,
                                  epoch,
                                  val_loss,
                                  val_acc,
                              ))
                        if val_acc > max_acc:
                            noChangedSteps = 0
                            max_acc = val_acc
                            if not os.path.exists(MODEL_PATH):
                                os.makedirs(MODEL_PATH)
                            save_path = save_saver.save(sess, self.model_path)
                            print("Model saved in path: %s" % save_path)
                        else:
                            noChangedSteps += 100

                # lr = 0.9*lr
                if noChangedSteps >= steps_per_epoch:
                    noChangeEpoches += 1
                else:
                    noChangeEpoches == 0

                if noChangeEpoches >= 3:
                    break
Exemplo n.º 5
0
'''
flyai库中的提供的数据处理方法
传入整个数据训练多少轮,每批次批大小
'''
dataset = Dataset(epochs=args.EPOCHS,
                  batch=args.BATCH,
                  val_batch=args.VAL_BATCH)
mymodel = Model(dataset)

print("number of train examples:%d" % dataset.get_train_length())
print("number of validation examples:%d" % dataset.get_validation_length())

QA_model = mymodel.QA_model
print(QA_model)

x_train, y_train, x_val, y_val = data_split(dataset, val_ratio=0.1)
train_len = x_train.shape[0]
val_len = x_val.shape[0]

test_x_data = x_val[-args.BATCH:]
test_y_data = y_val[-args.BATCH:]


def show_result(model, que_data, ans_data):
    score = 0.0
    for i, que in enumerate(que_data):
        predict = model.predict(**que)
        print("预测结果:%s" % predict)
        print("实际答案:%s" % ans_data[i]["ans_text"])
        score += sentence_bleu([jieba.lcut(predict)],
                               jieba.lcut(ans_data[i]["ans_text"]),