Esempio n. 1
0
def run_reweight():
    d_ids: List[int] = list(load_dev_claim_ids())
    claims = get_claims_from_ids(d_ids)
    top_k = 7
    param = {'k1': 0.5}
    pred = predict_by_reweighter(get_bm25_module(), claims, top_k, param)
    print(evaluate(pred))
Esempio n. 2
0
def eval_from_score_d(score_d: Dict[CPID, float], top_k):
    candidate: List[Tuple[Dict, List[Dict]]] = load_dev_candiate()
    dp_not_found = 0

    def get_predictions(
        claim_and_candidate: Tuple[Dict,
                                   List[Dict]]) -> Tuple[str, List[Dict]]:
        claim_info, candidates = claim_and_candidate
        nonlocal dp_not_found
        for candi in candidates:
            cid = candi['cid']
            pid = candi['pid']
            cpid = CPID("{}_{}".format(cid, pid))

            if cpid in score_d:
                candi['new_score'] = score_d[cpid]
            else:
                dp_not_found += 1
                candi['new_score'] = 0.01

            candi['final_score'] = candi['new_score'] + candi['score'] / 100
            candi[
                'rationale'] = "final_score={}  cls_score={}  lucene_score={}".format(
                    candi['final_score'], candi['new_score'], candi['score'])

        candidates.sort(key=lambda c: c['final_score'], reverse=True)
        return claim_info['cId'], candidates[:top_k]

    predictions = lmap(get_predictions, candidate)
    print("{} data points are not found in predictions".format(dp_not_found))
    r = evaluate(predictions, debug=False)
    print(r)
    return r
Esempio n. 3
0
def run_bm25_rm():
    d_ids: List[int] = list(load_dev_claim_ids())
    claims = get_claims_from_ids(d_ids)
    rm_info = load_from_pickle("perspective_dev_claim_rm")
    top_k = 7
    pred = predict_by_bm25_rm(get_bm25_module(), rm_info, claims, top_k)
    print(evaluate(pred))
Esempio n. 4
0
def run_reweight():
    top_k = 7
    claims, val = train_split()
    param = {'k1': 1}
    target = claims[:50]
    pred = predict_by_reweighter(get_bm25_module(), target, top_k, param)
    print(param)
    print(evaluate(pred))
Esempio n. 5
0
def run_rel_scorer():
    claims, val = train_split()
    top_k = 6
    target = filter_avail(val)
    print("targets", len(target))
    pc_score_d = load_from_pickle("pc_rel_based_score_train")
    pred = predict_from_dict(pc_score_d, target, top_k)
    print(evaluate(pred))
Esempio n. 6
0
def run_gold_lm():
    claims, val = train_split()
    top_k = 5
    print("Building lms")
    claim_lms: List[ClaimLM] = build_gold_claim_lm_train()
    print("Predicting")
    pred = predict_by_lm(claim_lms, claims, top_k)
    print(evaluate(pred))
Esempio n. 7
0
def run_bert_baseline():
    claims, val = train_split()
    top_k = 50
    target = filter_avail(val)
    print("targets", len(target))
    pc_score_d = load_from_pickle("pc_bert_baseline_score_d_train")
    pred = predict_from_dict(pc_score_d, target, top_k)
    print(evaluate(pred))
Esempio n. 8
0
def run_eval_with_dict(pickle_name):
    d_ids: List[int] = list(load_dev_claim_ids())
    claims = get_claims_from_ids(d_ids)
    print("targets", len(claims))
    top_k = 8
    pc_score_d = load_from_pickle(pickle_name)
    pred = predict_from_dict(pc_score_d, claims, top_k)
    print(evaluate(pred))
Esempio n. 9
0
def run_eval_with_two_dict():
    d_ids: List[int] = list(load_dev_claim_ids())
    claims = get_claims_from_ids(d_ids)
    print("targets", len(claims))
    top_k = 7
    pc_score_d = load_from_pickle("pc_bert_baseline_score_d")
    pc_score_d2 = load_from_pickle("pc_random_walk_based_score_d")
    pred = predict_from_two_dict(pc_score_d, pc_score_d2, claims, top_k)
    print(evaluate(pred))
Esempio n. 10
0
def run_random_walk_score_with_weight():
    d_ids: List[int] = list(load_dev_claim_ids())
    claims = get_claims_from_ids(d_ids)
    top_k = 7
    q_tf_replace = dict(load_from_pickle("random_walk_score_100"))
    q_tf_replace = dict_key_map(lambda x: int(x), q_tf_replace)
    bm25 = get_bm25_module()
    pred = pc_predict_vector_query_and_reweight(bm25, q_tf_replace, claims,
                                                top_k, {'k1': 0.5})
    print(evaluate(pred))
Esempio n. 11
0
def run_lm():
    d_ids: List[int] = list(load_dev_claim_ids())
    claims = get_claims_from_ids(d_ids)
    top_k = 5
    q_tf_replace = dict(load_from_pickle("pc_dev_par_tf"))
    q_tf_replace = dict(load_from_pickle("random_walk_score_100"))
    bm25 = get_bm25_module()
    ctf = load_collection_tf()
    pred = predict_by_lm(q_tf_replace, ctf, bm25, claims, top_k)
    print(evaluate(pred))
Esempio n. 12
0
def run_para_scorer():
    claims, val = train_split()
    top_k = 6

    target = filter_avail(val)
    print("targets", len(target))
    score_pred_file: FileName = FileName("pc_para_D_pred")
    cpid_resolute_file: FileName = FileName("resolute_dict_580_606")
    pred = predict_by_para_scorer(score_pred_file, cpid_resolute_file, target,
                                  top_k)
    print(evaluate(pred))
Esempio n. 13
0
def run_random_walk_score():
    d_ids: List[int] = list(load_dev_claim_ids())
    claims = get_claims_from_ids(d_ids)
    top_k = 7
    q_tf_replace = dict(load_from_pickle("random_walk_score_100"))
    q_tf_replace = dict_key_map(lambda x: int(x), q_tf_replace)
    #q_tf_replace = dict(load_from_pickle("pc_dev_par_tf"))
    #q_tf_replace = dict(load_from_pickle("bias_random_walk_dev_plus_all"))
    bm25 = get_bm25_module()
    pred = pc_predict_from_vector_query(bm25, q_tf_replace, claims, top_k)
    print(evaluate(pred))
Esempio n. 14
0
def run_baseline():
    d_ids: List[int] = list(load_dev_claim_ids())
    claims = get_claims_from_ids(d_ids)
    print("targets", len(claims))
    top_k = 5
    score_pred_file: FileName = FileName("pc_para_D_pred_dev_11")
    cpid_resolute_file: FileName = FileName("resolute_dict_dev_11")
    # score_pred_file: FileName = FileName("pc_para_D_pred_dev")
    # cpid_resolute_file: FileName = FileName("resolute_dict_dev")
    pred = predict_by_para_scorer(score_pred_file, cpid_resolute_file, claims,
                                  top_k)
    print(evaluate(pred))
Esempio n. 15
0
def run_lm2():
    d_ids: List[int] = list(load_dev_claim_ids())
    claims = get_claims_from_ids(d_ids)
    top_k = 5
    tokenizer = PCTokenizer()
    tf_d = {
        c['cId']: Counter(nltk.tokenize.word_tokenize(c['text']))
        for c in claims
    }
    bm25 = get_bm25_module()
    ctf = get_perspective_tf()
    pred = predict_by_lm(tf_d, ctf, bm25, claims, top_k)
    print(evaluate(pred))
Esempio n. 16
0
def run_bm25():
    claims, val = train_split()
    top_k = 20
    pred = predict_by_bm25(get_bm25_module(), claims, top_k)
    print(evaluate(pred))
Esempio n. 17
0
def run_oracle_on_candiate():
    claims, val = train_split()
    top_k = 5
    pred = predict_by_oracle_on_candidate(claims, top_k)
    print(evaluate(pred))
Esempio n. 18
0
def run_baseline():
    claims, val = train_split()
    top_k = 50
    pred = predict_by_elastic_search(claims, top_k)
    print(evaluate(pred))
Esempio n. 19
0
def run_bm25():
    d_ids: List[int] = list(load_dev_claim_ids())
    claims = get_claims_from_ids(d_ids)
    top_k = 7
    pred = predict_by_bm25(get_bm25_module(), claims, top_k)
    print(evaluate(pred))
Esempio n. 20
0
def run_next_sent():
    d_ids: List[int] = list(load_dev_claim_ids())
    claims = get_claims_from_ids(d_ids)[:10]
    top_k = 7
    pred = pc_predict_by_bert_next_sent(get_bm25_module(), claims, top_k)
    print(evaluate(pred))