Esempio n. 1
0
def test_calculating_ndcg_test_index():
    case1 = [4, 3, 2, 0, 1]
    dcg_score_1 = ndcg_at_k(case1, 3)
    dcg_score_2 = ndcg_at_k(case1, 4)
    dcg_score_3 = ndcg_at_k(case1, 5)
    assert [dcg_score_1, dcg_score_3, dcg_score_2] == sorted(
        [dcg_score_2, dcg_score_1, dcg_score_3], reverse=True)
Esempio n. 2
0
def test_calculating_ndcg_non_ideal_case():
    case1 = [4, 3, 2, 0, 1]
    case2 = [4, 2, 3, 0, 1]
    case3 = [0, 1, 2, 3, 4]
    dcg_score_1 = ndcg_at_k(case1, 5)
    dcg_score_2 = ndcg_at_k(case2, 5)
    dcg_score_3 = ndcg_at_k(case3, 5)
    assert [dcg_score_1, dcg_score_2, dcg_score_3] == sorted(
        [dcg_score_2, dcg_score_1, dcg_score_3], reverse=True)
Esempio n. 3
0
def calculate_relevancy(ground_truth, search_results):
    if not (ground_truth and search_results):
        logging.warn("Parameters are empty")
        return [(0.0, 0)]

    relevancy_map = {x: i + 1 for i, x in enumerate(reversed(ground_truth))}
    logging.debug("Found relevancy {}".format(relevancy_map))

    rel = [relevancy_map.get(result, 0) for result in search_results]
    logging.debug("Found relevancy {}".format(rel))

    starting_value = min(3, len(ground_truth))
    ranges = [starting_value] + [i for i in [3, 5, 10, 15, 25, 50, 100] if i > starting_value]
    scores = [(ndcg_at_k(rel, i), i) for i in ranges
              if i > starting_value]
    logging.debug("Found relevancy score {}".format(scores))
    return scores
Esempio n. 4
0
def test_calculating_ndcg_extra_k():
    case1 = [4, 3, 2, 0]
    dcg_score_1 = ndcg_at_k(case1, 5)
    print(dcg_score_1)
Esempio n. 5
0
def test_calculating_ndcg():
    ground_truth = [4, 3, 2, 1, 0]
    dcg_score_ideal = ndcg_at_k(ground_truth, 5)
    assert dcg_score_ideal == 1.0