def test_interleave(self): r1 = list(range(100)) r2 = list(range(100, 200)) for i in range(1000): method = il.Probabilistic([r1, r2]) ranking = method.interleave() print(list(ranking))
def test_round_robin(self): rankings = [[0, 0, 0], [1, 1, 1], [2, 2, 2]] pm = il.Probabilistic(rankings) for i in range(0, self.n): result = pm.interleave() result.sort() assert result == [0, 1, 2]
def test_compute_scores(self): rankings = [[0, 1, 2], [2, 1, 0], [1, 0, 2]] pm = il.Probabilistic(rankings) prefs = defaultdict(int) credits = defaultdict(float) for i in range(self.n): r = pm.interleave() clicks = [] for idx, d in enumerate(r): if np.random.rand() < (d + 0.1) / 3.3: clicks = [idx] res = pm.compute_scores(r, clicks) for idx in res: credits[idx] += res[idx] for i in range(len(credits)): for j in range(i + 1, len(credits)): if credits[i] > credits[j]: prefs[(i, j)] += 1 elif credits[j] > credits[i]: prefs[(j, i)] += 1 print(credits) print(prefs) assert prefs[(1, 0)] > prefs[(0, 1)] assert prefs[(1, 2)] > prefs[(2, 1)] assert prefs[(2, 0)] > prefs[(0, 2)]
def test_uniqueness(self): pm = il.Probabilistic([[0, 1, 2], [1, 2, 0]]) for i in range(self.n): ranking = pm.interleave() ranking.sort() uniq_ranking = list(set(ranking)) uniq_ranking.sort() assert ranking == uniq_ranking
def test_memorylessness(self): result = [] pm = il.Probabilistic([[0, 1], [2, 3]]) for i in range(self.n): result.extend(pm.interleave()) result = list(set(result)) result.sort() assert result == [0, 1, 2, 3]
def test_uniform(self): ideal = 0.5 counts = [0.0, 0.0] pm = il.Probabilistic([[0], [1]]) for i in range(self.n): r = pm.interleave() counts[r[0]] += 1 for j in [0, 1]: self.assert_almost_equal(ideal, counts[j] / self.n)
def test_uniqueness(self): rankings = [[0, 1, 2], [1, 2, 0], [2, 0, 1]] pm = il.Probabilistic(rankings, max_length=2) for i in range(0, self.n): ranking = pm.interleave() ranking.sort() uniq_ranking = list(set(ranking)) uniq_ranking.sort() assert ranking == uniq_ranking
def test_interleave(self): r1 = list(range(100)) r2 = list(range(50, 150)) r3 = list(range(100, 200)) r4 = list(range(150, 250)) for i in range(100): method = il.Probabilistic([r1, r2, r3, r4]) ranking = method.interleave() method.evaluate(ranking, [0, 1, 2])
def test_uniform(self): rankings = [[0], [1], [2]] l = len(rankings) ideal = 1.0 / l counts = [0.0] * l pm = il.Probabilistic(rankings) for i in range(0, self.n): counts[pm.interleave()[0]] += 1 for j in range(0, l): self.assert_almost_equal(ideal, counts[j] / self.n)
def test_selected_ranker(self): rankings = [[0, 3, 6], [1, 4, 7], [2, 5, 8]] pm = il.Probabilistic(rankings) selected_rankers = defaultdict(int) for i in range(self.n): r = pm.interleave() selected_rankers[r[0] % 3] += 1 self.assert_almost_equal(selected_rankers[0] / self.n, 1 / 3) self.assert_almost_equal(selected_rankers[1] / self.n, 1 / 3) self.assert_almost_equal(selected_rankers[2] / self.n, 1 / 3) selected_rankers = defaultdict(int) for i in range(self.n): pm = il.Probabilistic(rankings) r = pm.interleave() selected_rankers[r[0] % 3] += 1 self.assert_almost_equal(selected_rankers[0] / self.n, 1 / 3) self.assert_almost_equal(selected_rankers[1] / self.n, 1 / 3) self.assert_almost_equal(selected_rankers[2] / self.n, 1 / 3)
def test_interaction(self): ideals = {0: 0.44444, 1: 0.50000, 2: 0.05556} counts = {} for d in ideals: counts[d] = 0.0 pm = il.Probabilistic([[0, 1], [1, 2]]) for i in range(self.n): counts[pm.interleave()[0]] += 1 for d in ideals: self.assert_almost_equal(ideals[d], counts[d] / self.n)
def test_softmax(self): ideals = {0: 0.86056, 1: 0.10757, 2: 0.03187} counts = {} for d in ideals: counts[d] = 0.0 pm = il.Probabilistic([[0, 1, 2], [0, 1, 2]]) for i in range(self.n): counts[pm.interleave()[0]] += 1 for d in ideals: self.assert_almost_equal(ideals[d], counts[d] / self.n)
def test_init_sampling(self): p = il.Probabilistic([[1, 2], [1, 3]], sample_num=20000, replace=False) rankings, probabilities = zip(*p.ranking_distribution) ideal = set([(1, 3), (1, 2), (2, 1), (2, 3), (3, 1), (3, 2)]) assert ideal == set([tuple(r) for r in rankings]) ideal_prob = { (1, 2): 0.444444444, (1, 3): 0.444444444, (2, 1): 0.049382716, (2, 3): 0.00617284, (3, 1): 0.049382716, (3, 2): 0.00617284 } for ranking, prob in zip(rankings, probabilities): self.assert_almost_equal(prob, ideal_prob[tuple(ranking)]) res = p.interleave() assert tuple(res) in ideal
def test_dump(self, tmpdir): tmpfile = str(tmpdir) + '/probabilistic.json' p = il.Probabilistic([[1, 2], [1, 3]], sample_num=10, replace=False) p.dump_rankings(tmpfile) with open(tmpfile, 'r') as f: obj = json.load(f) # Test keys s = {str(hash(r)) for r in p._rankings} assert s == set(obj.keys()) # Test rankings l1 = sorted(p._rankings) l2 = sorted([v['ranking']['ranking_list'] for v in obj.values()]) assert l1 == l2 # Test lists j1 = [r.lists for r in p._rankings] j2 = [r['ranking']['lists'] for r in obj.values()] assert j1 == j2
def test_ranking_with_teams(self): result = defaultdict(int) pm = il.Probabilistic([[1, 2, 3], [2, 3, 1]]) for i in range(self.n): result[pm.interleave()] += 1 assert len(result) == 6
def test_no_shortage(self): rankings = [[0, 1], [0, 1, 2]] pm = il.Probabilistic(rankings) assert 2 == len(pm.interleave())
def test_sanity(self): assert il.Probabilistic([[0], [0]]).interleave() == [0]
def test_sanity(self): rankings = [[0]] pm = il.Probabilistic(rankings) assert pm.interleave() == [0]