def validate(model, data, cui_to_idx, tokenizer, threshold, test=False):
    model = model.eval()
    X, Xt, Y, M, Mt, doc_idx, concepts = prepare_data(data,
                                                      cui_to_idx,
                                                      tokenizer,
                                                      for_test=test)
    # print ("Entered test function ...")
    # print ("X shape: ", X.shape, " Xt Shape: ", Xt.shape, " Y Shape: ", Y.shape, " M Shape: ", M.shape, " Mt shape: ", Mt.shape)

    O = []
    i = 0
    while i < X.shape[0]:
        x = torch.tensor(X[i:i + 4]).to(device, dtype=torch.long)
        m = torch.tensor(M[i:i + 4]).to(device, dtype=torch.long)
        xt = torch.tensor(Xt[i:i + 4]).to(device, dtype=torch.long)
        mt = torch.tensor(Mt[i:i + 4]).to(device, dtype=torch.long)

        o = model(x, m, xt, mt)
        o = F.sigmoid(o)

        o[o >= threshold] = 1
        o[o < threshold] = 0

        o = o.data.to('cpu').numpy().flatten()

        O.append(o)
        i += 4
        print(i, "/", X.shape[0])

    O = np.concatenate(O)

    Y, O = convert_to_doc_labels(Y, O, doc_idx, concepts, concept_to_cui,
                                 cui_to_idx)

    results = {}
    f1_score_micro, f1_score_macro = f1_score(Y, O)
    pr_score_micro, pr_score_macro = precision_score(Y, O)
    re_score_micro, re_score_macro = recall_score(Y, O)
    results['f1_score_micro'] = f1_score_micro
    results['f1_score_macro'] = f1_score_macro
    results['pr_score_micro'] = pr_score_micro
    results['pr_score_macro'] = pr_score_macro
    results['re_score_micro'] = re_score_micro
    results['re_score_macro'] = re_score_macro

    # display(results)

    return f1_score_micro, results
Exemplo n.º 2
0
def validate(model, mesh_to_idx, mesh_vocab, tokenizer, threshold):
    model = model.eval()
    path = '../data/bioasq_dataset/val_data'
    list_files = os.listdir(path)
    print(list_files)

    true_labels = []
    pred_labels = []
    for file in list_files:
        file_content = json.load(open(path + '/' + file, 'r'))
        i = 0
        while i < len(file_content):
            input_idx_seq, input_mask, target, true_labels_batch = prepare_minibatch(
                file_content[i:i + 4], mesh_to_idx, tokenizer)
            input_idx_seq = torch.tensor(input_idx_seq).to(device,
                                                           dtype=torch.long)
            input_mask = torch.tensor(input_mask).to(device, dtype=torch.long)
            predict, _ = model(input_idx_seq, input_mask)
            predict = F.sigmoid(predict)
            predict[predict > threshold] = 1
            predict[predict < threshold] = 0
            predict = predict.data.to('cpu').numpy()

            for j in range(predict.shape[0]):
                nnz_idx = np.nonzero(predict[j, :])[0]
                pred_labels_article = [mesh_vocab[idx] for idx in nnz_idx]
                pred_labels.append(pred_labels_article)

            true_labels.extend(true_labels_batch)
            i += 4

    # for k in range(len(true_labels)):
    # 	print (true_labels[k])
    # 	print (pred_labels[k])

    f1_score_micro, f1_score_macro = f1_score(true_labels, pred_labels)
    print("f1 score micro: ", f1_score_micro, " f1 score macro: ",
          f1_score_macro)

    return f1_score_micro
def validate(model, data, cui_to_idx, tokenizer, threshold):
    model = model.eval()
    X, Mask, Y_p, Y_i, Y_o = prepare_data(data, cui_to_idx, tokenizer)

    pred_labels_mat_p = []
    pred_labels_mat_i = []
    pred_labels_mat_o = []

    i = 0
    while i < X.shape[0]:
        input_idx_seq = torch.tensor(X[i:i + 4]).to(device, dtype=torch.long)
        input_mask = torch.tensor(Mask[i:i + 4]).to(device, dtype=torch.long)
        predict_p, predict_i, predict_o = model(input_idx_seq, input_mask)

        predict_p = F.sigmoid(predict_p)
        predict_i = F.sigmoid(predict_i)
        predict_o = F.sigmoid(predict_o)

        predict_p[predict_p >= threshold] = 1
        predict_p[predict_p < threshold] = 0
        predict_i[predict_i >= threshold] = 1
        predict_i[predict_i < threshold] = 0
        predict_o[predict_o >= threshold] = 1
        predict_o[predict_o < threshold] = 0

        predict_p = predict_p.data.to('cpu').numpy()
        predict_i = predict_i.data.to('cpu').numpy()
        predict_o = predict_o.data.to('cpu').numpy()

        # target_p = Y_p[i:i+4]
        # target_i = Y_i[i:i+4]
        # target_o = Y_o[i:i+4]

        pred_labels_mat_p.append(predict_p)
        pred_labels_mat_i.append(predict_i)
        pred_labels_mat_o.append(predict_o)

        i += 4

    # true_labels_mat_p = np.vstack(true_labels_mat_p)
    # true_labels_mat_i = np.vstack(true_labels_mat_i)
    # true_labels_mat_o = np.vstack(true_labels_mat_o)

    pred_labels_mat_p = np.vstack(pred_labels_mat_p)
    pred_labels_mat_i = np.vstack(pred_labels_mat_i)
    pred_labels_mat_o = np.vstack(pred_labels_mat_o)

    results = {}
    f1_score_micro_p, f1_score_macro_p = f1_score(Y_p, pred_labels_mat_p)
    pr_score_micro_p, pr_score_macro_p = precision_score(
        Y_p, pred_labels_mat_p)
    re_score_micro_p, re_score_macro_p = recall_score(Y_p, pred_labels_mat_p)
    results['f1_score_micro_p'] = f1_score_micro_p
    results['f1_score_macro_p'] = f1_score_macro_p
    results['pr_score_micro_p'] = pr_score_micro_p
    results['pr_score_macro_p'] = pr_score_macro_p
    results['re_score_micro_p'] = re_score_micro_p
    results['re_score_macro_p'] = re_score_macro_p

    f1_score_micro_i, f1_score_macro_i = f1_score(Y_i, pred_labels_mat_i)
    pr_score_micro_i, pr_score_macro_i = precision_score(
        Y_i, pred_labels_mat_i)
    re_score_micro_i, re_score_macro_i = recall_score(Y_i, pred_labels_mat_i)
    results['f1_score_micro_i'] = f1_score_micro_i
    results['f1_score_macro_i'] = f1_score_macro_i
    results['pr_score_micro_i'] = pr_score_micro_i
    results['pr_score_macro_i'] = pr_score_macro_i
    results['re_score_micro_i'] = re_score_micro_i
    results['re_score_macro_i'] = re_score_macro_i

    f1_score_micro_o, f1_score_macro_o = f1_score(Y_o, pred_labels_mat_o)
    pr_score_micro_o, pr_score_macro_o = precision_score(
        Y_o, pred_labels_mat_o)
    re_score_micro_o, re_score_macro_o = recall_score(Y_o, pred_labels_mat_o)
    results['f1_score_micro_o'] = f1_score_micro_o
    results['f1_score_macro_o'] = f1_score_macro_o
    results['pr_score_micro_o'] = pr_score_micro_o
    results['pr_score_macro_o'] = pr_score_macro_o
    results['re_score_micro_o'] = re_score_micro_o
    results['re_score_macro_o'] = re_score_macro_o
    results['avg_micro_f1_score'] = (f1_score_micro_p + f1_score_micro_i +
                                     f1_score_micro_o) / 3.0

    # display(results)

    return (f1_score_micro_p + f1_score_micro_i +
            f1_score_micro_o) / 3.0, results