Exemplo n.º 1
0
def test_precision_evaluator_average(eval_at, expected_first):
    matches_ids = [[0, 1, 2, 3, 4], [-1, -1, -1, -1, -1], [-1, -1, -1, -1, -1]]

    desired_ids = [[1, 0, 20, 30, 40], [1, 0, 20, 30, 40], [1, 0, 20, 30, 40]]

    evaluator = PrecisionEvaluator(eval_at=eval_at)
    assert evaluator.evaluate(actual=matches_ids[0],
                              desired=desired_ids[0]) == expected_first
    assert evaluator.evaluate(actual=matches_ids[1],
                              desired=desired_ids[1]) == 0.0
    assert evaluator.evaluate(actual=matches_ids[2],
                              desired=desired_ids[2]) == 0.0
    assert evaluator._running_stats._n == 3
    np.testing.assert_almost_equal(evaluator.mean, expected_first / 3)
Exemplo n.º 2
0
def test_precision_evaluator(eval_at, expected):
    matches_ids = [0, 1, 2, 3, 4]

    desired_ids = [1, 0, 20, 30, 40]

    evaluator = PrecisionEvaluator(eval_at=eval_at)
    assert evaluator.evaluate(actual=matches_ids, desired=desired_ids) == expected
    np.testing.assert_almost_equal(evaluator.mean, expected)
Exemplo n.º 3
0
def test_precision_evaluator_average(eval_at, expected_first):
    matches_ids = [[0, 1, 2, 3, 4], [-1, -1, -1, -1, -1], [-1, -1, -1, -1, -1]]

    groundtruth_ids = [[1, 0, 20, 30, 40], [1, 0, 20, 30, 40],
                       [1, 0, 20, 30, 40]]

    evaluator = PrecisionEvaluator(eval_at=eval_at)
    assert evaluator.evaluate(
        matches_ids=matches_ids[0],
        groundtruth_ids=groundtruth_ids[0]) == expected_first
    assert evaluator.evaluate(matches_ids=matches_ids[1],
                              groundtruth_ids=groundtruth_ids[1]) == 0.0
    assert evaluator.evaluate(matches_ids=matches_ids[2],
                              groundtruth_ids=groundtruth_ids[2]) == 0.0
    assert evaluator.num_documents == 3
    assert evaluator.sum == expected_first
    assert evaluator.avg == expected_first / 3
Exemplo n.º 4
0
def test_precision_evaluator_no_groundtruth():
    matches_ids = [0, 1, 2, 3, 4]

    desired_ids = []

    evaluator = PrecisionEvaluator(eval_at=2)
    assert evaluator.evaluate(actual=matches_ids, desired=desired_ids) == 0.0
    assert evaluator._running_stats._n == 1
    np.testing.assert_almost_equal(evaluator.mean, 0.0)
Exemplo n.º 5
0
def test_precision_evaluator(eval_at, expected):
    matches_ids = [0, 1, 2, 3, 4]

    groundtruth_ids = [1, 0, 20, 30, 40]

    evaluator = PrecisionEvaluator(eval_at=eval_at)
    assert evaluator.evaluate(matches_ids=matches_ids,
                              groundtruth_ids=groundtruth_ids) == expected
    assert evaluator.num_documents == 1
    assert evaluator.sum == expected
    assert evaluator.avg == expected
Exemplo n.º 6
0
def test_precision_evaluator_no_groundtruth():
    matches_ids = [0, 1, 2, 3, 4]

    groundtruth_ids = []

    evaluator = PrecisionEvaluator(eval_at=2)
    assert evaluator.evaluate(matches_ids=matches_ids,
                              groundtruth_ids=groundtruth_ids) == 0.0
    assert evaluator.num_documents == 1
    assert evaluator.sum == 0.0
    assert evaluator.avg == 0.0