コード例 #1
0
def test_mtable_generation(k, p, alpha):
    f = fair.Fair(k, p, alpha)

    # create an adjusted mtable with alpha unadjusted
    mtable = f.create_adjusted_mtable()

    # get alpha adjusted
    alpha_adjusted = f.adjust_alpha()

    # create a new unadjusted mtable with the new alpha
    f_adjusted = fair.Fair(k, p, alpha_adjusted)
    mtable_adjusted = f_adjusted.create_unadjusted_mtable()

    assert mtable == mtable_adjusted  # they should be same to the 5th decimal
コード例 #2
0
def test_adjust_alpha(k, p, alpha, result):
    f = fair.Fair(k, p, alpha)

    adjusted_alpha = f.adjust_alpha()

    assert abs(adjusted_alpha -
               result) < 0.0001  # they should be same to the 5th decimal
コード例 #3
0
def test_compute_fail_probability(k, p, alpha, result):
    f = fair.Fair(k, p, alpha)

    adjusted_mtable = f.create_adjusted_mtable()

    prob = f.compute_fail_probability(adjusted_mtable)

    assert abs(prob -
               result) < 0.0001  # they should be same to the 5th decimal
コード例 #4
0
def test_re_rank(k, p, alpha, ranking):
    f = fair.Fair(k, p, alpha)

    re_ranked = f.re_rank(ranking)

    # input should not be fair
    assert not f.is_fair(ranking)

    # check length
    assert len(ranking) == len(re_ranked)

    # check content
    assert set([r.id for r in ranking]) == set([r.id for r in re_ranked])

    # output should be fair
    assert f.is_fair(re_ranked)
コード例 #5
0
def test_fail_probability_calcualtors():
    Ms = [1000, 10000]
    ks = [10, 20, 50, 100, 200]
    ps = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
    alphas = [0.01, 0.05, 0.1, 0.15]

    allowed_offset = 0.02  # we tolerate an absolute difference in probability of 0.02

    for M in Ms:
        for k in ks:
            for p in ps:
                rankings = simulator.generate_rankings(M, k, p)
                for alpha in alphas:
                    f = fair.Fair(k, p, alpha)

                    mtable = f.create_adjusted_mtable()

                    experimental = simulator.compute_fail_probability(
                        rankings, mtable)
                    analytical = f.compute_fail_probability(mtable)

                    # Not pretty, but adding all the parameters in the assert, so we know what combination fails
                    assert M > 0 and k > 0 and p > 0 and alpha > 0 \
                           and abs(experimental - analytical) < (allowed_offset + alpha * 0.01 / allowed_offset)
コード例 #6
0
def test_create_adjusted_mtable(k, p, alpha, result):
    f = fair.Fair(k, p, alpha)

    mtable = f.create_adjusted_mtable()

    assert mtable == result
コード例 #7
0
def test_is_fair(k, p, alpha, ranking):
    f = fair.Fair(k, p, alpha)

    assert len(ranking) == k

    assert f.is_fair(ranking)