コード例 #1
0
def test(test_dir):
    print("Loading test data...")
    start_time = time.time()
    x_test, y_test = process_file(test_dir, word_to_id, cat_to_id,
                                  config.seq_length)

    session = tf.Session(config=tfconfig)
    session.run(tf.global_variables_initializer())
    saver = tf.train.Saver()
    saver.restore(sess=session, save_path=save_path)  # 读取保存的模型

    print('Testing...')
    loss_test, acc_test = evaluate(session, x_test, y_test)
    msg = 'Test Loss: {0:>6.2}, Test Acc: {1:>7.2%}'
    print(msg.format(loss_test, acc_test))

    batch_size = 128
    data_len = len(x_test)
    num_batch = int((data_len - 1) / batch_size) + 1

    y_pred_cls = np.zeros(shape=len(x_test), dtype=np.int32)  # 保存预测结果
    for i in range(num_batch):  # 逐批次处理
        start_id = i * batch_size
        end_id = min((i + 1) * batch_size, data_len)
        feed_dict = {
            model.input_x: x_test[start_id:end_id],
            model.keep_prob: 1.0
        }
        y_pred_cls[start_id:end_id] = session.run(model.y_pred_cls,
                                                  feed_dict=feed_dict)

    time_dif = get_time_dif(start_time)
    print("Time usage:", time_dif)
    return y_pred_cls
コード例 #2
0
def train():
    print("Configuring TensorBoard and Saver...")
    # 配置 Tensorboard,重新训练时,请将tensorboard文件夹删除,不然图会覆盖
    tensorboard_dir = 'tensorboard/rnn'
    if not os.path.exists(tensorboard_dir):
        os.makedirs(tensorboard_dir)
    merged = tf.summary.merge_all()
    writer = tf.summary.FileWriter(tensorboard_dir)
    # 配置 Saver
    saver = tf.train.Saver()
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    # 创建session
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    writer.add_graph(sess.graph)

    # 读取数据
    print("Loading training  data: %.1f.csv" % f)
    tarin_dir = os.path.join(base_dir, data_dir)
    x_train, y_train = process_file(tarin_dir, config)
    x_normlize = x_train
    y_normlize = y_train

    # 开始训练
    print('Training and evaluating...')
    for epoch in range(config.epoches):
        x_batch, y_batch = batch_iter(x_normlize, y_normlize, config)
        for iteration in range(config.num_iterations):
            # print('Epoch: ', epoch + 1)
            # batch_train = batch_iter(x_train, y_train, config.batch_size)
            feed_dict = {
                model.xs: x_batch,
                model.ys: y_batch,
                # create initial state
            }
            _, cost, state, pred = sess.run([
                model.train_op, model.cost, model.cell_final_state, model.pred
            ],
                                            feed_dict=feed_dict)
            # plotting
            # if v % 20 == 0:
            #     plt.figure(v)
            #     plt.plot(t[0, :], (v/10)*y_batch[0].flatten(), 'r', t[0, :], (v/10)*pred.flatten()[:config.time_steps], 'b--',
            #              t[0, :], (v/10) *x_batch[0].flatten(), 'k-.')
            #     plt.ylim((-16, 16))
            #     plt.draw()
            #     plt.pause(0.3)
            if iteration % 20 == 0:
                print('cost: ', round(cost, 4))
                result = sess.run(merged, feed_dict)
                writer.add_summary(result, iteration)

    # 保存网络
    saver.save(sess=sess, save_path=save_path)
コード例 #3
0
def test():
    print("Loading test data...")
    start_time = time.time()
    x_test, y_test = process_file(test_dir, cat_to_id, config.seq_length)

    session = tf.Session()
    session.run(tf.global_variables_initializer())
    saver = tf.train.Saver()
    print('save_path', save_path)
    saver.restore(sess=session, save_path=save_path)

    print('Testing...')
    loss_test, acc_test = evaluate(session, x_test, y_test)
    msg = 'Test Loss: {0:>6.4}, Test Acc: {1:>7.4%}'
    print(msg.format(loss_test, acc_test))

    batch_size = 128
    data_len = len(x_test)
    num_batch = int((data_len - 1) / batch_size) + 1

    y_test_cls = np.argmax(y_test, 1)
    y_pred_cls = np.zeros(shape=len(x_test), dtype=np.int32)

    auc_score = []
    for i in range(num_batch):
        start_id = i * batch_size
        end_id = min((i + 1) * batch_size, data_len)
        feed_dict = {
            model.input_x: x_test[start_id:end_id],
            model.keep_prob: 1.0
        }
        y_pred_cls[start_id:end_id] = session.run(model.y_pred_cls,
                                                  feed_dict=feed_dict)
        ##add for auc
        softmax_score = session.run(model.softmax_score, feed_dict=feed_dict)
        for line in softmax_score:
            auc_score.append(line[1])

    auc_score = np.array(auc_score)
    #auc = round(metrics.auc(y_test_cls,auc_score),4)
    auc = round(roc_auc_score(y_test_cls, auc_score), 4)
    print('Test Auc:', str(auc * 100) + '%')

    brier_score = brier_score_loss(y_test_cls, auc_score)
    print('Brierscore', brier_score)

    print("Precision, Recall and F1-Score...")
    print(
        metrics.classification_report(y_test_cls,
                                      y_pred_cls,
                                      target_names=categories))

    print("Confusion Matrix...")
    cm = metrics.confusion_matrix(y_test_cls, y_pred_cls)
    print(cm)
コード例 #4
0
def test():
    """
    测试模型
    :return:
    """
    print("Loading test data...")
    start_time = time.time()
    x_test, y_test = process_file(test_dir, word_to_id, cat_to_id,
                                  config.seq_length)

    session = tf.Session()
    session.run(tf.global_variables_initializer())
    saver = tf.train.Saver()
    saver.restore(sess=session, save_path=save_path)  # 读取保存的模型

    print('Testing...')
    loss_test, acc_test = evaluate(session, x_test, y_test)
    msg = 'Test Loss: {0:>6.2}, Test Acc: {1:>7.2%}'
    print(msg.format(loss_test, acc_test))

    batch_size = 128
    data_len = len(x_test)
    num_batch = int((data_len - 1) / batch_size) + 1

    y_test_cls = np.argmax(y_test, 1)
    y_pred_cls = np.zeros(shape=len(x_test), dtype=np.int32)  # 保存预测结果
    for i in range(num_batch):  # 逐批次处理
        start_id = i * batch_size
        end_id = min((i + 1) * batch_size, data_len)
        feed_dict = {
            model.input_x: x_test[start_id:end_id],
            model.keep_prob: 1.0
        }
        y_pred_cls[start_id:end_id] = session.run(model.y_pred_cls,
                                                  feed_dict=feed_dict)

    # 评估
    print("Precision, Recall and F1-Score...")
    print(
        metrics.classification_report(y_test_cls,
                                      y_pred_cls,
                                      target_names=categories))

    # 混淆矩阵
    print("Confusion Matrix...")
    cm = metrics.confusion_matrix(y_test_cls, y_pred_cls)
    print(cm)

    time_dif = get_time_dif(start_time)
    print("Time usage:", time_dif)
コード例 #5
0
def test():
    print("Loading test data...")
    start_time = time.time()
    x_test, y_test = process_file(test_dir, word_to_id, cat_to_id, config.seq_length)

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1) 
    session = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
    
    session.run(tf.global_variables_initializer())
    saver = tf.train.Saver()
    saver.restore(sess=session, save_path=save_path)  # 读取保存的模型

    print('Testing...')
    loss_test, acc_test = evaluate(session, x_test, y_test)
    msg = 'Test Loss: {0:>6.2}, Test Acc: {1:>7.2%}'
    print(msg.format(loss_test, acc_test))

    batch_size = 128
    data_len = len(x_test)
    num_batch = int((data_len - 1) / batch_size) + 1

    y_test_cls = np.argmax(y_test, 1)
    y_pred_cls = np.zeros(shape=len(x_test), dtype=np.int32)  # 保存预测结果
    for i in range(num_batch):  # 逐批次处理
        start_id = i * batch_size
        end_id = min((i + 1) * batch_size, data_len)
        feed_dict = {
            model.input_x: x_test[start_id:end_id],
            model.keep_prob: 1.0
        }
        y_pred_cls[start_id:end_id] = session.run(model.y_pred_cls, feed_dict=feed_dict)

    # Error print
    for i in range(len(y_pred_cls)):
        if y_pred_cls[i] != y_test_cls[i]:
            print('Predict:',categories[y_pred_cls[i]],'True:',categories[y_test_cls[i]])
            print('Sentence:',''.join([words[id] for id in x_test[i] if id != 0]))
            
    # 评估
    print("Precision, Recall and F1-Score...")
    print(metrics.classification_report(y_test_cls, y_pred_cls, target_names=categories))

    # 混淆矩阵
    print("Confusion Matrix...")
    cm = metrics.confusion_matrix(y_test_cls, y_pred_cls)
    print(cm)

    time_dif = get_time_dif(start_time)
    print("Time usage:", time_dif)
コード例 #6
0
def test():
    # v = random.randint(start, end)
    # 读取数据
    print("Loading test  data: ")

    # tarin_data = os.path.join(base_dir, dir)
    tarin_dir = os.path.join(base_dir, data_dir)
    x_data, y_data = process_file(tarin_dir, config)
    # x_max =  max(abs(x_test))
    # y_max = max(y_train)
    # x_max = max(x_test)
    x_normlize = x_data
    y_normlize = y_data

    # 创建session
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver()
    saver.restore(sess=sess, save_path=save_path)  # 读取保存的模型

    print('Testing...')
    x_batch, y_batch = batch_iter(x_normlize, y_normlize, config)
    x_test = x_batch[1].reshape(1, 1000, 1)
    y_test = y_batch[1].reshape(1, 1000, 1)
    feed_dict = {
        model.xs: x_test,
        model.ys: y_test,
        # create initial state
    }

    state, pred = sess.run([model.cell_final_state, model.pred],
                           feed_dict=feed_dict)

    # plotting
    # plt.figure(v)
    plt.plot(x_test.flatten(), 'r', y_test.flatten(), 'b--', state.flatten(),
             'k-.')
    plt.ylim((-16, 16))
    plt.draw()
    plt.pause(0.3)
    os.system("pause")
コード例 #7
0
import torch
from torch import nn
import torch.nn.functional as f
from torch.autograd import Variable

##查看GPU是否可用
print(torch.cuda.is_available())

categories, cat_to_id = read_category()
print(categories)

words, word_to_id = read_vocab('cnews_vocab.txt')
print(words)
##加载训练集
x_train, y_train = process_file('cnews_small_sample.txt', word_to_id,
                                cat_to_id, 600)
print('x_train=', x_train)
##加载验证集
x_val, y_val = process_file('cnews_val.txt', word_to_id, cat_to_id, 600)


###验证集上进行准确率评估
def evaluate(model, Loss, optimizer, x_val, y_val):

    batch_val = data_load(x_val, y_val, 32)
    acc = 0
    los = 0
    for x_batch, y_batch in batch_val:
        size = len(x_batch)
        x = np.array(x_batch)
        y = np.array(y_batch)
コード例 #8
0
def train():
    print("Configuring TensorBoard and Saver...")
    # 配置 Tensorboard,重新训练时,请将tensorboard文件夹删除,不然图会覆盖
    tensorboard_dir = 'tensorboard/textcnn'
    if not os.path.exists(tensorboard_dir):
        os.makedirs(tensorboard_dir)

    tf.summary.scalar("loss", model.loss)
    tf.summary.scalar("accuracy", model.acc)
    merged_summary = tf.summary.merge_all()
    writer = tf.summary.FileWriter(tensorboard_dir)

    # 配置 Saver
    saver = tf.train.Saver()
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    if not os.path.exists(save_dir_bak):
        os.makedirs(save_dir_bak)
    print("Loading training and validation data...")
    # 载入训练集与验证集
    start_time = time.time()
    x_train, y_train = process_file(train_dir, word_to_id, cat_to_id, config.seq_length)
    # 不需要验证集
    # x_val, y_val = process_file(val_dir, word_to_id, cat_to_id, config.seq_length)
    x_test, y_test = process_file(test_dir, word_to_id, cat_to_id, config.seq_length)
    time_dif = get_time_dif(start_time)
    print("Time usage:", time_dif)

    # 创建session
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1) 
    session = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
    session.run(tf.global_variables_initializer())
    writer.add_graph(session.graph)

    print('Training and evaluating...')
    start_time = time.time()
    total_batch = 0  # 总批次
    # best_acc_val = 0.0  # 最佳验证集准确率
    best_acc_test = 0.0
    last_improved = 0  # 记录上一次提升批次
    require_improvement = 1500  # 如果超过1000轮未提升,提前结束训练

    flag = False
    for epoch in range(config.num_epochs):
        print('Epoch:', epoch + 1)
        batch_train = batch_iter(x_train, y_train, config.batch_size)
        for x_batch, y_batch in batch_train:
            feed_dict = feed_data(x_batch, y_batch, config.dropout_keep_prob)

            if total_batch % config.save_per_batch == 0:
                # 每多少轮次将训练结果写入tensorboard scalar
                s = session.run(merged_summary, feed_dict=feed_dict)
                writer.add_summary(s, total_batch)

            if total_batch % config.print_per_batch == 0:
                # 每多少轮次输出在训练集和验证集上的性能
                feed_dict[model.keep_prob] = 1.0
                loss_train, acc_train = session.run([model.loss, model.acc], feed_dict=feed_dict)
                # loss_val, acc_val = evaluate(session, x_val, y_val)  # todo
                loss_test, acc_test = evaluate(session, x_test, y_test)
                if acc_test > best_acc_test:
                    # 保存最好结果
                    best_acc_test = acc_test
                    last_improved = total_batch
                    saver.save(sess=session, save_path=save_path_bak)
                    improved_str = '*'
                else:
                    improved_str = ''

                time_dif = get_time_dif(start_time)
                msg = 'Iter: {0:>6}, Train Loss: {1:>6.2}, Train Acc: {2:>7.2%},' \
                      + ' Test Loss: {3:>6.2}, Test Acc: {4:>7.2%}, Time: {5} {6}'
                print(msg.format(total_batch, loss_train, acc_train, loss_test, acc_test, time_dif, improved_str))

            feed_dict[model.keep_prob] = config.dropout_keep_prob
            session.run(model.optim, feed_dict=feed_dict)  # 运行优化
            total_batch += 1

            if total_batch - last_improved > require_improvement:
                # 验证集正确率长期不提升,提前结束训练
                print("No optimization for a long time, auto-stopping...")
                flag = True
                break  # 跳出循环
        if flag:  # 同上
            break
コード例 #9
0
    for i in normal_num:
        get_train_data(i)
        config = TCNNConfig()
        if not os.path.exists(vocab_dir):  # 如果不存在词汇表,重建
            build_vocab(train_dir, vocab_dir, config.vocab_size)
        categories, cat_to_id = read_category(train_dir)
        words, word_to_id = read_vocab(vocab_dir)
        config.vocab_size = len(words)
        config.num_classes = len(categories)
        model = TextCNN(config)
        # 训练模型并保存到bak
        train()

        print("Loading test data...")
        start_time = time.time()
        x_test, y_test = process_file(test_dir, word_to_id, cat_to_id, config.seq_length)

        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1) 
        session = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
        
        session.run(tf.global_variables_initializer())
        saver = tf.train.Saver()
        saver.restore(sess=session, save_path=save_path_bak)  # 读取保存的模型

        print('Testing...')
        loss_test, acc_test = evaluate(session, x_test, y_test)
        if acc_test > max_acc and loss_test < 5:
            print('GET GREATEST MODEL!')
            max_acc = acc_test
            greatest_normal_num = i
            saver.save(sess=session, save_path=save_path)
コード例 #10
0
def train():
    print("Configuring TensorBoard and Saver...")
    # 配置 Tensorboard,重新训练时,请将tensorboard文件夹删除,不然图会覆盖
    tensorboard_dir = 'tensorboard/tb30641000v01/'
    if not os.path.exists(tensorboard_dir):
        os.makedirs(tensorboard_dir)

    train_loss_summ = tf.summary.scalar("train_loss", model.loss)
    train_acc_summ = tf.summary.scalar("trian_accuracy", model.acc)

    val_loss_summ = tf.summary.scalar("validation_loss", model.loss)
    val_acc_summ = tf.summary.scalar("validation_accuracy", model.acc)
    writer = tf.summary.FileWriter(tensorboard_dir)

    # 配置 Saver
    saver = tf.train.Saver()
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    print("Loading training and validation data...")

    # 载入训练集与验证集
    start_time = time.time()
    x_train, y_train = process_file(train_txt_dirs, seq_length, word_to_id, cat_to_id, config.seq_length)
    print "训练样本总数是{}".format(len(x_train))

    x_val, y_val = process_file(test_txt_dirs, seq_length, word_to_id, cat_to_id, config.seq_length)
    print "测试集样本总数是{}".format(len(x_val))
    time_dif = get_time_dif(start_time)
    print("Time usage:", time_dif)

    # 创建session
    session = tf.Session()
    session.run(tf.global_variables_initializer())
    writer.add_graph(session.graph)

    print('Training and evaluating...')
    start_time = time.time()
    total_batch = 0  # 总批次
    best_acc_val = 0.0  # 最佳验证集准确率
    last_improved = 0  # 记录上一次提升批次
    require_improvement = 1000  # 如果超过1000轮未提升,提前结束训练
    # flag = False
    for epoch in range(config.num_epochs):
        print('Epoch:', epoch + 1)
        batch_train = batch_iter(x_train, y_train, config.batch_size)
        for x_batch, y_batch in batch_train:
            feed_dict = feed_data(x_batch, y_batch, config.dropout_keep_prob)

            if total_batch % config.print_per_batch == 0:
                # 每多少轮次输出在训练集和验证集上的性能
                # feed_dict[model.keep_prob] = 1.0
                loss_train, acc_train = session.run([model.loss, model.acc], feed_dict=feed_dict)
                train_loss_summ_, train_acc_summ_ = session.run([train_loss_summ, train_acc_summ], feed_dict=feed_dict)
                writer.add_summary(train_loss_summ_, total_batch)
                writer.add_summary(train_acc_summ_, total_batch)

                loss_val, acc_val = evaluate(session, x_val, y_val)  # todo
                val_loss_summ_, val_acc_summ_ = session.run([val_loss_summ, val_acc_summ],
                                                            feed_dict={model.input_x: x_val,
                                                                       model.input_y: y_val,
                                                                       model.keep_prob: 1.0})
                writer.add_summary(val_loss_summ_, total_batch)
                writer.add_summary(val_acc_summ_, total_batch)

                if acc_val > best_acc_val:
                    # 保存最好结果
                    best_acc_val = acc_val
                    last_improved = total_batch
                    saver.save(sess=session, save_path=save_path)
                    improved_str = '*'
                else:
                    improved_str = ''

                time_dif = get_time_dif(start_time)
                msg = 'Iter: {0:>6}, Train Loss: {1:>6.2}, Train Acc: {2:>7.2%},' \
                      + ' Val Loss: {3:>6.2}, Val Acc: {4:>7.2%}, Time: {5} {6}'
                print(msg.format(total_batch, loss_train, acc_train, loss_val, acc_val, time_dif, improved_str))

            session.run(model.optim, feed_dict=feed_dict)  # 运行优化
            total_batch += 1
コード例 #11
0
def test():
    print("Loading test data...")
    start_time = time.time()
    contents = read_file(test_dir)[0]
    x_test, y_test = process_file(test_dir, word_to_id, cat_to_id,
                                  config.seq_length)

    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    session = tf.Session(config=tf_config)
    session.run(tf.global_variables_initializer())
    saver = tf.train.Saver(tf.global_variables())
    saver.restore(sess=session, save_path=save_path)  # 读取保存的模型

    print('Testing...')
    loss_test, acc_test = evaluate(session, x_test, y_test)
    msg = 'Test Loss: {0:>6.2}, Test Acc: {1:>7.2%}'
    print(msg.format(loss_test, acc_test))

    batch_size = 128
    data_len = len(x_test)
    num_batch = int((data_len - 1) / batch_size) + 1

    y_test_cls, y_pred_cls = [], []
    for r_y in y_test:
        m = []
        for k in range(len(r_y)):
            if r_y[k] >= 0.5:
                m.append(k)
        y_test_cls.append(m)
    #y_test_cls = sorted(y_test_cls,reverse=True)

    #y_test_cls = np.argmax(y_test, 1)
    #y_pred_cls = np.zeros(shape=len(x_test), dtype=np.int32)  # 保存预测结果
    pred_prob = np.zeros(shape=[len(x_test), config.num_classes],
                         dtype=np.float32)
    #sorted_pred_prob = np.zeros(shape=[len(x_test),config.num_classes],dtype=np.float32)
    for i in range(num_batch):  # 逐批次处理
        start_id = i * batch_size
        end_id = min((i + 1) * batch_size, data_len)
        feed_dict = {
            model.input_x: x_test[start_id:end_id],
            model.keep_prob: 1.0,
            model.batch_size: len(x_test[start_id:end_id]),
            model.real_length: get_real_seq_length(x_test[start_id:end_id])
        }
        pred_prob[start_id:end_id] = session.run(model.pred_prob,
                                                 feed_dict=feed_dict)
        #sorted_pred_prob[start_id:end_id]=[np.argsort(pred_prob[i]) for i in range(start_id,end_id)]
        '''
        for i in range(start_id,end_id):
            if pred_prob[i][int(sorted_pred_prob[i][-1])]>0.85:
                y_pred_cls[i] = sorted_pred_prob[i][-1]
            else:
                y_pred_cls[i] = sorted_pred_prob[i][-2]
        '''
        #print('a')
    for p_y in pred_prob:
        m = []
        for k in range(len(p_y)):
            if p_y[k] >= 0.5:
                m.append(k)
        if len(m) < 1:
            m.append(np.argmax(p_y))
        y_pred_cls.append(m)
    res = []
    res_indexs = []
    for i in range(len(y_test_cls)):
        res.append([
            contents[i], '#'.join([id_to_cat[_] for _ in y_test_cls[i]]),
            '#'.join([id_to_cat[_] for _ in y_pred_cls[i]])
        ])
    res = pd.DataFrame(res, columns=['content', 'real_label', 'pred_label'])
    res.to_excel('result.xlsx', index=False, encoding='utf-8')
    for i in range(len(y_test_cls)):
        res_indexs.append([
            contents[i], '#'.join([str(_) for _ in y_test_cls[i]]),
            '#'.join([str(_) for _ in y_pred_cls[i]])
        ])
    res_indexs = pd.DataFrame(res_indexs,
                              columns=['content', 'real_label', 'pred_label'])
    res_indexs.to_excel('result_index.xlsx', index=False, encoding='utf-8')
    #y_pred_cls[start_id:end_id] = session.run(model.y_pred_cls, feed_dict=feed_dict)
    # TP = defaultdict(int)
    # FP =  defaultdict(int)
    # TN = defaultdict(int)
    # FN = defaultdict(int)
    # for i in range(len(y_test_cls)):
    #     for j in range(len(y_test_cls[i])):
    #         for n in range(len(cat_to_id)):
    #             if y_pred_cls[i][j] == n  and y_test_cls[i][j] == n:
    #                 TP[n] += 1
    #             elif y_pred_cls[i][j] == n  and y_test_cls[i][j] != n:
    #                 FP[n] += 1
    #             elif  y_pred_cls[i][j] != n  and y_test_cls[i][j] == n:
    #                 FN[n] += 1
    #             else:
    #                 TN[n] += 1

    # 评估
    print("Precision, Recall and F1-Score...")
    report_labels = unique_labels(y_test_cls, y_pred_cls)
    p, r, f1, s = metrics.precision_recall_fscore_support(y_test_cls,
                                                          y_pred_cls,
                                                          labels=report_labels,
                                                          average='micro')
    #print(p)
    report = pd.DataFrame(
        [[report_labels[i], p[i], r[i], f1[i], s[i]]
         for i in range(len(report_labels))],
        columns=['label', 'precision', 'recall', 'f1', 'support'])
    #report = pd.DataFrame([p,r,f1,s],columns=['precision','recall','f1','support'])
    report.to_csv('../data/temp/confuse_report.csv', index=False)
    print(
        metrics.classification_report(y_test_cls,
                                      y_pred_cls,
                                      target_names=categories))

    # 混淆矩阵
    print("Confusion Matrix...")
    cm = metrics.confusion_matrix(y_test_cls, y_pred_cls)
    df = pd.DataFrame(metrics.confusion_matrix(y_test_cls, y_pred_cls))
    df.to_csv('../data/temp/cm.csv', encoding='utf-8')
    '''
    wb = xlwt.Workbook()
    ws = wb.add_sheet('Sheet1')
    for i in range(len(cm)):
        for j in range(len(cm[i])):
            ws.write(i,j,cm[i][j])
    wb.save('../data/temp/cm.xls')
    '''
    print(cm)

    time_dif = get_time_dif(start_time)
    print("Time usage:", time_dif)
コード例 #12
0
def train():
    print("Configuring TensorBoard and Saver...")
    # 配置 Tensorboard,重新训练时,请将tensorboard文件夹删除,不然图会覆盖
    tensorboard_dir = 'tensorboard/textcnn'
    if not os.path.exists(tensorboard_dir):
        os.makedirs(tensorboard_dir)

    tf.summary.scalar("loss", model.loss)
    #tf.summary.scalar("accuracy", model.acc)
    merged_summary = tf.summary.merge_all()
    writer = tf.summary.FileWriter(tensorboard_dir)

    print("Loading training and validation data...")
    # 载入训练集与验证集
    start_time = time.time()
    x_train, y_train = process_file(train_dir, word_to_id, cat_to_id,
                                    config.seq_length)
    x_val, y_val = process_file(val_dir, word_to_id, cat_to_id,
                                config.seq_length)
    time_dif = get_time_dif(start_time)
    print("Time usage:", time_dif)

    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    # 创建session
    session = tf.Session(config=tf_config)
    session.run(tf.global_variables_initializer())
    writer.add_graph(session.graph)
    # 配置 Saver
    saver = tf.train.Saver(tf.global_variables())
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    else:
        saver.restore(session, save_path=save_path)

    print('Training and evaluating...')
    start_time = time.time()
    total_batch = 0  # 总批次
    best_acc_val = 0.0  # 最佳验证集准确率
    last_improved = 0  # 记录上一次提升批次
    require_improvement = 1000  # 如果超过1000轮未提升,提前结束训练

    flag = False
    for epoch in range(config.num_epochs):
        print('Epoch:', epoch + 1)
        batch_train = batch_iter(x_train, y_train, config.batch_size)
        for x_batch, y_batch in batch_train:
            feed_dict = feed_data(x_batch, y_batch, config.dropout_keep_prob,
                                  True)
            session.run(model.optim, feed_dict=feed_dict)  # 运行优化
            #ot =  session.run(model.rnn_output,feed_dict=feed_dict)
            #aot = session.run(model.att_out,feed_dict=feed_dict)
            #pools = session.run(model.pools,feed_dict=feed_dict)
            #pools2 = session.run(model.pool2, feed_dict=feed_dict)
            #alls = session.run(model.all,feed_dict=feed_dict)
            #convs = session.run(model.convs,feed_dict=feed_dict)
            if total_batch % config.save_per_batch == 0:
                # 每多少轮次将训练结果写入tensorboard scalar
                s = session.run(merged_summary, feed_dict=feed_dict)
                writer.add_summary(s, total_batch)

            if total_batch % config.print_per_batch == 0:
                # 每多少轮次输出在训练集和验证集上的性能
                feed_dict[model.keep_prob] = 1.0
                # feed_dict[model.istraining] = False

                loss_train, pred_prob_train = session.run(
                    [model.loss, model.pred_prob], feed_dict=feed_dict)
                train_acc = get_acc(pred_prob_train, y_batch)

                loss_val, acc_val = evaluate(session, x_val, y_val)  # todo

                if acc_val > best_acc_val:
                    # 保存最好结果
                    best_acc_val = acc_val
                    last_improved = total_batch
                    saver.save(sess=session, save_path=save_path)
                    improved_str = '*'
                else:
                    improved_str = ''

                time_dif = get_time_dif(start_time)
                msg = 'Iter: {0:>6}, Train Loss: {1:>6.2}, Train Acc: {2:>7.2%},' \
                      + ' Val Loss: {3:>6.2}, Val Acc: {4:>7.2%}, Time: {5} {6}'
                print(
                    msg.format(total_batch, loss_train, train_acc, loss_val,
                               acc_val, time_dif, improved_str))

            total_batch += 1

            if total_batch - last_improved > require_improvement:
                config.decay_steps = total_batch + 1
                config.decay_rate = config.decay_rate * 0.1
コード例 #13
0
def train(net='cnn', epoch=20):
    # 读取训练集
    x_train, y_train = None, None

    for i in range(1, 6):
        x, y = process_file('data_batch_%d' % i)
        if x_train is None:
            x_train = x
            y_train = y
        else:
            x_train = np.append(x_train, x, axis=0)
            y_train = np.append(y_train, y, axis=0)
        del x, y

    x_dev, y_dev = process_file('test_batch')

    x = tf.placeholder(tf.float32, [
        None, config.image_width * config.image_height * config.image_channel
    ])
    x_reshape = tf.reshape(
        x, [-1, config.image_channel, config.image_height, config.image_width])
    # [batch, depth, height, width] => [batch, height, width, depth]
    x_reshape = tf.transpose(x_reshape, [0, 2, 3, 1])

    if net == 'cnn':
        model = CNN()
    elif net == 'vgg':
        model = VGG()
    else:
        pass

    out = model.output(input=x_reshape)
    y_ = tf.placeholder(tf.float32, [None, config.classes])
    loss = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=out, labels=y_))
    tf.summary.scalar('loss', loss)

    correct_prediction = tf.equal(tf.argmax(out, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    tf.summary.scalar('accuracy', accuracy)

    optimizer = tf.train.AdamOptimizer(config.learning_rate).minimize(loss)

    with tf.Session() as sess:
        writer = tf.summary.FileWriter("logs/", sess.graph)
        merged = tf.summary.merge_all()
        sess.run(tf.global_variables_initializer())
        '''
        variable_names = [v.name for v in tf.trainable_variables()]
        values = sess.run(variable_names)
        for k, v in zip(variable_names, values):
            print("Variable: ", k)
            print("Shape: ", v.shape)

        print_num_of_total_parameters()
        '''
        step = 1

        best_acc = 0.
        start_time = time.time()
        for e in range(1, epoch + 1):
            for x_batch, y_batch in batch_iter(x=x_train,
                                               y=y_train,
                                               batch_size=config.batch_size):
                step = step + 1
                _, trainloss, train_acc = sess.run(
                    [optimizer, loss, accuracy],
                    feed_dict={
                        x: x_batch,
                        y_: y_batch,
                        model.keep_prob: 0.5,
                        model.is_training: True
                    })

                if step % 20 == 0:
                    pass
                    #  print('Iterator:%d loss:%f train acc:%f' % (step, trainloss, train_acc))

                if step % 781 == 0:
                    train_acc, summary = sess.run(
                        [accuracy, merged],
                        feed_dict={
                            x: x_train[:10000],
                            y_: y_train[:10000],
                            model.keep_prob: 1.,
                            model.is_training: False
                        })
                    writer.add_summary(summary, e)

                    acc = sess.run(accuracy,
                                   feed_dict={
                                       x: x_dev,
                                       y_: y_dev,
                                       model.keep_prob: 1.,
                                       model.is_training: False
                                   })
                    print('Iterator:%d loss:%f train acc:%f' %
                          (step, trainloss, train_acc))
                    elapsed_time = time.time() - start_time
                    print('\033[1;32mepoch:%d/%d' % (e, config.epoch))
                    print('\033[1;32mvalidation accuracy:%f\033[0m' % acc,
                          end='')
                    if acc > best_acc:
                        best_acc = acc
                        print('\033[1;35m(new best acc!)\033[0m')
                    else:
                        print('')
コード例 #14
0
def train():
    print("Configuring TensorBoard and Saver...")
    #
    tensorboard_dir = 'tensorboard/cnn'
    if not os.path.exists(tensorboard_dir):
        os.makedirs(tensorboard_dir)

    tf.summary.scalar("loss", model.loss)
    tf.summary.scalar("accuracy", model.acc)
    merged_summary = tf.summary.merge_all()
    writer = tf.summary.FileWriter(tensorboard_dir)

    #
    saver = tf.train.Saver()
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    print("Loading training and validation data...")
    #
    start_time = time.time()
    x_train, y_train = process_file(train_dir, cat_to_id, config.seq_length)
    x_val, y_val = process_file(val_dir, cat_to_id, config.seq_length)

    #
    session = tf.Session()
    session.run(tf.global_variables_initializer())
    writer.add_graph(session.graph)

    print('Training and evaluating...')
    start_time = time.time()
    total_batch = 0  #
    best_acc_val = 0.0  #
    last_improved = 0  #
    require_improvement = 10000  #

    flag = False
    for epoch in range(config.num_epochs):
        print('Epoch:', epoch + 1)
        batch_train = batch_iter(x_train, y_train, config.batch_size)
        for x_batch, y_batch in batch_train:
            feed_dict = feed_data(x_batch, y_batch, config.dropout_keep_prob)

            if total_batch % config.save_per_batch == 0:

                #print(feed_dict)
                s = session.run(merged_summary, feed_dict=feed_dict)
                writer.add_summary(s, total_batch)

            if total_batch % config.print_per_batch == 0:
                #
                feed_dict[model.keep_prob] = 1.0
                loss_train, acc_train = session.run([model.loss, model.acc],
                                                    feed_dict=feed_dict)
                loss_val, acc_val = evaluate(session, x_val, y_val)  # todo

                if acc_val > best_acc_val:
                    #
                    best_acc_val = acc_val
                    last_improved = total_batch
                    saver.save(sess=session, save_path=save_path)
                    improved_str = '*'
                else:
                    improved_str = ''

                time_dif = get_time_dif(start_time)
                msg = 'Iter: {0:>6}, Train Loss: {1:>6.2}, Train Acc: {2:>7.2%},' \
                      + ' Val Loss: {3:>6.2}, Val Acc: {4:>7.2%}, Time: {5} {6}'
                print(
                    msg.format(total_batch, loss_train, acc_train, loss_val,
                               acc_val, time_dif, improved_str))

            session.run(model.optim, feed_dict=feed_dict)
            total_batch += 1

            if total_batch - last_improved > require_improvement:
                #
                print("No optimization for a long time, auto-stopping...")
                flag = True
                break  #
        if flag:  #
            break