Exemplo n.º 1
0
def predict_valid(sess, model, logger):
    """Test on the valid data."""
    time0 = time.time()
    predict_labels_list = list()  # 所有的预测结果
    marked_labels_list = list()
    for i in tqdm(range(int(n_va_batches))):
        [X_batch, y_batch] = get_batch(data_valid_path, i)
        marked_labels_list.extend(y_batch)
        _batch_size = len(X_batch)
        fetches = [model.y_pred]
        feed_dict = {
            model.X_inputs: X_batch,
            model.batch_size: _batch_size,
            model.tst: True,
            model.keep_prob: 1.0
        }
        predict_labels = sess.run(fetches, feed_dict)[0]
        predict_labels_list.extend(predict_labels)

    f1_micro, f1_macro, score12 = cail_evaluator_least(predict_labels_list,
                                                       marked_labels_list)
    print('precision_micro=%g, recall_micro=%g, score12=%g, time=%g s' %
          (f1_micro, f1_macro, score12, time.time() - time0))
    logger.info(
        '\nValid predicting...\nEND:Global_step={}: f1_micro={}, f1_macro={}, score12={}, time=%g s'
        .format(sess.run(model.global_step), f1_micro, f1_macro, score12,
                time.time() - time0))
Exemplo n.º 2
0
def predict(sess, model, logger):
    """Test on the test data."""
    time0 = time.time()
    te_batches = os.listdir(data_test_path)
    n_te_batches = len(te_batches)
    predict_labels_list = list()  # 所有的预测结果
    marked_labels_list = list()
    for i in tqdm(range(n_te_batches)):
        X_batch, y_batch = get_batch(data_test_path, i)
        _batch_size = len(X_batch)
        marked_labels_list.extend(y_batch)
        fetches = [model.y_pred]
        feed_dict = {
            model.X_inputs: X_batch,
            model.batch_size: _batch_size,
            model.tst: True,
            model.keep_prob: 1.0
        }
        predict_labels = sess.run(fetches, feed_dict)[0]
        predict_labels_list.extend(predict_labels)
    predict_scores_file = scores_path + model_name + '/' + 'predict.npy'
    marked_scores_file = scores_path + model_name + '/' + 'origin.npy'
    np.save(predict_scores_file, predict_labels_list)
    np.save(marked_scores_file, marked_labels_list)
    f1_micro, f1_macro, score12 = cail_evaluator_least(predict_labels_list,
                                                       marked_labels_list)
    print('f1_micro=%g, f1_macro=%g, score12=%g, time=%g s' %
          (f1_micro, f1_macro, score12, time.time() - time0))
    logger.info(
        '\nTest predicting...\nEND:Global_step={}: f1_micro={}, f1_macro={}, score12={}, time=%g s'
        .format(sess.run(model.global_step), f1_micro, f1_macro, score12,
                time.time() - time0))