Ejemplo n.º 1
0
    def decide_on_players(
            bot_ids: Iterable[BotID], rank_sys: RankingSystem,
            ticket_sys: TicketSystem) -> Tuple[List[BotID], List[BotID]]:
        """
        Find two balanced teams. The TicketSystem and the RankingSystem to find
        a fair match up between some bots that haven't played for a while.
        """
        limit = 200

        tries_left = limit
        while tries_left > 0:
            tries_left -= 1

            # Pick some bots that haven't played for a while
            picked = ticket_sys.pick_bots(bot_ids)
            shuffle(picked)
            ratings = [rank_sys.get(bot) for bot in picked]

            blue = tuple(ratings[0:3])
            orange = tuple(ratings[3:6])

            # Is this a fair match?
            required_fairness = min(tries_left / limit, MIN_REQ_FAIRNESS)
            if trueskill.quality([blue, orange]) >= required_fairness:
                print(
                    f"Match: {picked[0:3]} vs {picked[3:6]}\nMatch quality: {trueskill.quality([blue, orange])}"
                )
                ticket_sys.choose(picked, bot_ids)
                return picked[0:3], picked[3:6]

        raise Exception("Failed to find a fair match")
Ejemplo n.º 2
0
def match_quality(player_skills: {int: trueskill.TrueSkill}, team1: [Player],
                  team2: [Player]) -> float:
    teams = (
        [player_skills[player.player_id] for player in team1],
        [player_skills[player.player_id] for player in team2],
    )
    return trueskill.quality(teams)
Ejemplo n.º 3
0
def suggest_match(players=p, team_size=3):
    teams = itertools.combinations(players, team_size)

    for player in players:
        if player not in p:
            p[player] = Player(player)

    elo_pred = []
    ts_pred = []

    for team in teams:
        t1_n = list(team)
        t2_n_comb = [x for x in players if x not in team]

        for t2_n in itertools.combinations(t2_n_comb, team_size):
            t2_n = list(t2_n)
            t1_elo = [p[x].last_rating(ELO_MODEL) for x in team]
            t2_elo = [
                p[x].last_rating(ELO_MODEL) for x in players if x not in team
            ]

            t1_ts = [p[x].last_rating(TRUESKILL_MODEL) for x in team]
            t2_ts = [
                p[x].last_rating(TRUESKILL_MODEL) for x in players
                if x not in team
            ]

            t1_p, t2_p = elo.predict_winner(t1_elo, t2_elo)
            elo_pred.append((abs(t1_p - t2_p), t1_n, t2_n))

            ts_pred.append((ts.quality([t1_ts, t2_ts]), t1_n, t2_n))

    return (min(elo_pred), max(ts_pred))
Ejemplo n.º 4
0
def matchmake(ratings, players):
    rated_a = []
    rated_b = []
    ties = [0]
    matches_a = []
    matches_b = []
    attempt = 0
    for team_a in list(itertools.combinations(players, 5)):
        team_b = [x for x in players if x not in team_a]
        rated_a = []
        rated_b = []

        for player in team_a:
            rated_a.append(ratings[player])
        for player in team_b:
            rated_b.append(ratings[player])

        quality = trueskill.quality([rated_a, rated_b])
        if quality > ties[0]:
            matches_a.insert(0, team_a)
            matches_b.insert(0, team_b)
            ties.insert(0, quality)
            debug("{}: {:.1%} chance to draw".format(attempt, ties[0]))

        attempt += 1

    return matches_a, matches_b, ties
Ejemplo n.º 5
0
    def quality_with(self, other: 'Search') -> float:
        assert all(other.raw_ratings)
        assert other.players

        team1 = [Rating(*rating) for rating in self.ratings]
        team2 = [Rating(*rating) for rating in other.ratings]

        return quality([team1, team2])
Ejemplo n.º 6
0
    def calculate_quality_of_game_teams(
            team1: Iterable['Skill'], team2: Iterable['Skill']) -> (int, int):
        """
        Calculate the quality of the game, which is the likelihood of the game being a draw (evenly balanced).

        Returns a tuple of ints -- percentages,
          - The first being the quality if team1 has their best roster on and team2 has their worst,
          - The second being the quality if team1 has their worst roster on and team2 has their best.
        """
        favouring_team1 = trueskill.quality(rating_groups=[
            Skill._get_maximum_clout_team_rating_group(team1),
            Skill._get_minimum_clout_team_rating_group(team2)
        ])
        favouring_team2 = trueskill.quality(rating_groups=[
            Skill._get_minimum_clout_team_rating_group(team1),
            Skill._get_maximum_clout_team_rating_group(team2)
        ])
        return int(favouring_team1 * 100), int(favouring_team2 * 100)
Ejemplo n.º 7
0
    def decide_on_players_2(bot_ids: Iterable[BotID], rank_sys: RankingSystem,
                            ticket_sys: TicketSystem) -> Tuple[List[BotID], List[BotID]]:
        """
        Find two balanced teams. The TicketSystem and the RankingSystem to find
        a fair match up between some bots that haven't played for a while.
        """

        # Composing a team of the best player + the worst two players will likely yield a balanced match (0, 4, 5).
        # These represent a few arrangements like that which seem reasonable to try, they will be checked against
        # the trueskill system.
        likely_balances = [(0, 4, 5), (0, 3, 5), (0, 2, 5), (0, 3, 4)]

        # Experimental average quality based on limit:
        # 1000: 0.4615
        # 400:  0.460
        # 100:  0.457
        # 10:   0.448
        num_bot_groups_to_test = 400

        # How much we value the tightness of rating distribution in a given match.
        # A higher number will yield matches with similarly skilled bots, but potentially lower probability of a draw.
        tightness_weight = 1.0

        tries_left = num_bot_groups_to_test
        best_quality_found = 0
        best_score_found = 0
        best_match = None
        chosen_balance = None

        while tries_left > 0:
            tries_left -= 1

            # Pick some bots that haven't played for a while
            picked = ticket_sys.pick_bots(bot_ids)
            candidates = [Candidate(bot, rank_sys.get(bot)) for bot in picked]
            candidates.sort(key=lambda c: float(c.rating), reverse=True)
            tightness = 1 / (numpy.std([float(c.rating) for c in candidates]) + 1)

            for balance in likely_balances:
                blue_candidates = candidates[balance[0]], candidates[balance[1]], candidates[balance[2]]
                orange_candidates = [c for c in candidates if c not in blue_candidates]
                quality = trueskill.quality([[c.rating for c in blue_candidates], [c.rating for c in orange_candidates]])
                score = quality + tightness * tightness_weight
                if score > best_score_found:
                    best_score_found = score
                    best_quality_found = quality
                    best_match = (blue_candidates, orange_candidates)
                    chosen_balance = balance

        blue_ids = [c.bot_id for c in best_match[0]]
        orange_ids = [c.bot_id for c in best_match[1]]
        tickets_consumed = sum([ticket_sys.get_ensured(b) for b in blue_ids + orange_ids])
        print(f"Match: {blue_ids} vs {orange_ids}\nMatch quality: {best_quality_found}  score: {best_score_found}  "
              f"Rank pattern: {chosen_balance}")
        ticket_sys.choose(blue_ids + orange_ids, bot_ids)
        return blue_ids, orange_ids
Ejemplo n.º 8
0
    def quality(self, team_format):
        setup = []
        for n, team in enumerate(team_format):
            setup.append([])
            for m in team:
                player = self.players[m]
                setup[n].append(
                    trueskill.Rating(mu=player.mu, sigma=player.sigma))

        return trueskill.quality(setup)
Ejemplo n.º 9
0
def test_compatibility_with_another_rating_systems():
    """All rating system modules should implement ``rate_1vs1`` and
    ``quality_1vs1`` to provide shortcuts for 1 vs 1 simple competition games.
    """
    r1, r2 = Rating(30, 3), Rating(20, 2)
    assert quality_1vs1(r1, r2) == quality([(r1, ), (r2, )])
    rated = rate([(r1, ), (r2, )])
    assert rate_1vs1(r1, r2) == (rated[0][0], rated[1][0])
    rated = rate([(r1, ), (r2, )], [0, 0])
    assert rate_1vs1(r1, r2, drawn=True) == (rated[0][0], rated[1][0])
Ejemplo n.º 10
0
def test_deprecated_individual_rating_groups():
    r1, r2, r3 = Rating(50, 1), Rating(10, 5), Rating(15, 5)
    with raises(TypeError):
        deprecated_call(rate, [r1, r2, r3])
    with raises(TypeError):
        deprecated_call(quality, [r1, r2, r3])
    assert t.transform_ratings([r1, r2, r3]) == rate([(r1,), (r2,), (r3,)])
    assert t.match_quality([r1, r2, r3]) == quality([(r1,), (r2,), (r3,)])
    deprecated_call(t.transform_ratings, [r1, r2, r3])
    deprecated_call(t.match_quality, [r1, r2, r3])
Ejemplo n.º 11
0
    def save_to_database(self, winner_team, db, update=False):
        if update is False:
            db.add_game(self)

        # Ratings der einzelnen Spieler laden (mu und sigma können auch explizit übergeben werden)
        ratings = [
            player.rating for player in
            [self.player1, self.player2, self.player3, self.player4]
        ]

        print("Initial player ratings:")
        for rating in ratings:
            print(rating)

        # Teams zuweisen
        # TODO: Teams variabel machen
        team1 = ratings[0:2]
        team2 = ratings[2:4]

        print('{:.1%} chance to draw'.format(trueskill.quality([team1,
                                                                team2])))
        if trueskill.quality([team1, team2]) < 0.50:
            print('This match seems to be not so fair')

        # neue Bewertungen anhand des Ergebnisses berechnen
        if winner_team == 1:
            (self.player1.rating,
             self.player2.rating), (self.player3.rating,
                                    self.player4.rating) = trueskill.rate(
                                        [team1, team2],
                                        ranks=[0,
                                               1])  # Team1 wins (rank lower)
        elif winner_team == 2:
            (self.player1.rating,
             self.player2.rating), (self.player3.rating,
                                    self.player4.rating) = trueskill.rate(
                                        [team1, team2],
                                        ranks=[1,
                                               0])  # Team2 wins (rank lower)

        # save updated skills to database
        for player in [self.player1, self.player2, self.player3, self.player4]:
            db.update_player_skill(player)
Ejemplo n.º 12
0
def test_compatibility_with_another_rating_systems():
    """All rating system modules should implement ``rate_1vs1`` and
    ``quality_1vs1`` to provide shortcuts for 1 vs 1 simple competition games.
    """
    r1, r2 = Rating(30, 3), Rating(20, 2)
    assert quality_1vs1(r1, r2) == quality([(r1,), (r2,)])
    rated = rate([(r1,), (r2,)])
    assert rate_1vs1(r1, r2) == (rated[0][0], rated[1][0])
    rated = rate([(r1,), (r2,)], [0, 0])
    assert rate_1vs1(r1, r2, drawn=True) == (rated[0][0], rated[1][0])
Ejemplo n.º 13
0
def test_deprecated_individual_rating_groups():
    r1, r2, r3 = Rating(50, 1), Rating(10, 5), Rating(15, 5)
    with raises(TypeError):
        deprecated_call(rate, [r1, r2, r3])
    with raises(TypeError):
        deprecated_call(quality, [r1, r2, r3])
    assert t.transform_ratings([r1, r2, r3]) == rate([(r1, ), (r2, ), (r3, )])
    assert t.match_quality([r1, r2, r3]) == quality([(r1, ), (r2, ), (r3, )])
    deprecated_call(t.transform_ratings, [r1, r2, r3])
    deprecated_call(t.match_quality, [r1, r2, r3])
Ejemplo n.º 14
0
def balance(queue):
    size = len(queue) // 2
    best_score = 0
    best_teams = None
    for team1 in itertools.combinations(queue[1:], size - 1):
        team1 = queue[:1] + list(team1)
        team2 = [x for x in queue if x not in team1]
        team1_rating = list(map(lambda id: state.get_rating(id), team1))
        team2_rating = list(map(lambda id: state.get_rating(id), team2))
        score = trueskill.quality([team1_rating, team2_rating])
        if score > best_score:
            best_score = score
            best_teams = (team1, team2)
    return best_teams
Ejemplo n.º 15
0
def skill_is_match_fair(team_1_elo, team_2_elo, fairness_threshold):
	# Ensure order is enforced before stripping values from keys
	team_1_elo_sort = OrderedDict(sorted(team_1_elo.items(), key=lambda t: t[1]))
	team_2_elo_sort = OrderedDict(sorted(team_2_elo.items(), key=lambda t: t[1]))
	# Holds stripped ratings
	team_1_ratings = skill_to_rating_array(team_1_elo_sort)
	team_2_ratings = skill_to_rating_array(team_2_elo_sort)

	fairness = quality([team_1_ratings, team_2_ratings])
	
	if fairness >= fairness_threshold:
		return True

	return False
Ejemplo n.º 16
0
 def get_match_quality(self, match):
   team_a, team_b = match[0], match[1]
   # Two teams in one match, number of rounds ignored
   if len(match[0]) == len(match[1]):
     team_a_skills = [self.get_player_skill(p) for p in team_a]
     team_a_skills = self.__modify_team_sigmas_by_mu_ratio(team_a_skills)
     team_b_skills = [self.get_player_skill(p) for p in team_b]
     team_b_skills = self.__modify_team_sigmas_by_mu_ratio(team_b_skills)
     return quality([team_a_skills, team_b_skills])
   else:
     # Unmatched teams, aggregate team skill and rate as 1vs1
     skill_team_a = self.get_team_skill(team_a)
     skill_team_b = self.get_team_skill(team_b)
     return quality_1vs1(skill_team_a, skill_team_b)
Ejemplo n.º 17
0
def create_match(available_players):
	if len(available_players)>=4:
                games_list=[]
                #berechne alle moeglichen paarungen
                for player_1 in available_players:
                        for player_2 in available_players:
                                if player_2==player_1:
                                        continue
                                for player_3 in available_players:
                                        if player_3==player_1 or player_3==player_2:
                                                continue
                                        for player_4 in available_players:
                                                if player_4==player_1 or player_4==player_2 or player_4==player_3:
                                                        continue
                                                games_list.append([[player_1,player_2],[player_3,player_4]])
                best_game=games_list[0]
                for game in games_list:
                        if quality(game)>quality(best_game):
                                best_game=game
                #waehle alle spiele, welche in der qualitaet nur um 0% vom besten spiel abweichen
                best_games=[]
                for game in games_list:
                        if quality(game)>(quality(best_game)-1*quality(best_game)):
                                best_games.append(game)
		#print(len(games_list),len(best_games))
                #waehle aus dieser liste die spiele aus, wo in summe am seltensten gespielt wurde
                best_number=(best_game[0][0].number_games+best_game[0][1].number_games+best_game[1][1].number_games+best_game[1][0].number_games)
                for game in best_games:
                        game_number=(game[0][0].number_games+game[0][1].number_games+game[1][1].number_games+game[1][0].number_games)
                        if game_number<best_number:
                                best_game=game
                games_list=[best_game]
                for game in best_games:
                        game_number=(game[0][0].number_games+game[0][1].number_games+game[1][1].number_games+game[1][0].number_games)
                        if (game_number-game_number)<(best_number):
                                games_list.append(game)
		return games_list
Ejemplo n.º 18
0
  def extract_team_from_combinations_of_team(self):
    """
    4:4チームの全組み合わせのうち、team1の勝率が3-7割のものを抽出
    """
    for t1, t2 in zip(self.c_team1, self.c_team2):
      t1_rate=[]
      t2_rate=[]
      for name1, name2 in zip(t1, t2):
        t1_rate.append(Rating(self.player_d[name1]))
        t2_rate.append(Rating(self.player_d[name2]))

        win_rate = float(win_probability(t1_rate, t2_rate))
        draw_rate = float(quality([(t1_rate), (t2_rate)]))

        if (win_rate >= 0.3) & (win_rate <= 0.7):
          self.possible_team1.append(list(t1))
          self.possible_team2.append(list(t2))
          self.possible_team1_win_rate.append(win_rate)
Ejemplo n.º 19
0
def calculate_ratings(ratings, player_list, games):

    for i, row in games.iterrows():
        team_a = []
        team_b = []
        result = 2  # 2 is tie in the csv
        for j, column in row.iteritems():
            if j.startswith("TeamA"):
                team_a.append(column)
            elif j.startswith("TeamB"):
                team_b.append(column)
            elif j == "Winner":
                result = column
            else:
                raise (Exception)

        rated_a = []
        rated_b = []

        for player in team_a:
            rated_a.append(ratings[player])
        for player in team_b:
            rated_b.append(ratings[player])

        debug('{:.1%} chance to draw'.format(
            trueskill.quality([rated_a, rated_b])))

        if result == 0:
            rated_a, rated_b = trueskill.rate([rated_a, rated_b], ranks=[0, 1])
        elif result == 1:
            rated_a, rated_b = trueskill.rate([rated_a, rated_b], ranks=[1, 0])
        else:
            rated_a, rated_b = trueskill.rate([rated_a, rated_b], ranks=[0, 0])

        for player, rating in zip(team_a, rated_a):
            ratings[player] = rating

        for player, rating in zip(team_b, rated_b):
            ratings[player] = rating

    return ratings
Ejemplo n.º 20
0
def make_teams(players, guildid, pool=10):
    '''
    Make teams based on rating.
    :param players: list of userid of participating players
    :param pool: number of matches to generate from which the best is chosen
    :return: t (list of userids), ct (list of userids), predicted quality of match
    '''
    player_ratings = {id: get_skill(id, guildid) for id in players}
    t = ct = []
    best_quality = 0.0
    for i in range(pool):
        random.shuffle(players)
        team_size = len(players) // 2
        t1 = {id: player_ratings[id] for id in players[:team_size]}
        t2 = {id: player_ratings[id] for id in players[team_size:]}
        quality = ts.quality([t1, t2])
        if quality > best_quality:
            t = list(t1.keys())
            ct = list(t2.keys())
            best_quality = quality
    return t, ct, best_quality
Ejemplo n.º 21
0
def suggest_teams(player_skills: {int: trueskill.Rating}):
    players = frozenset(player_skills.keys())
    max_team_size = min(len(players) // 2, MAX_PLAYERS_PER_TEAM)
    min_team_size = max(1, len(players) - MAX_PLAYERS_PER_TEAM)
    teams_seen = set()

    for r in range(min_team_size, max_team_size + 1):
        for team1 in itertools.combinations(players, r):
            team2 = tuple(players - frozenset(team1))

            if team1 in teams_seen or team2 in teams_seen:
                continue
            teams_seen.add(team1)
            teams_seen.add(team2)

            team1_skills = [player_skills[player_id] for player_id in team1]
            team2_skills = [player_skills[player_id] for player_id in team2]
            quality = trueskill.quality((team1_skills, team2_skills))
            p_win = win_probability(trueskill.global_env(), team1_skills,
                                    team2_skills)
            yield team1, team2, quality, p_win
Ejemplo n.º 22
0
    def get_match_qualities(self, players_present):
        players_per_team = int(len(players_present) / 2)
        teams = list(itertools.combinations(players_present, players_per_team))

        seen_matches = set()
        match_qualities = []

        # TODO(edgard): Instead of hardcoding IDs, we should support setting
        # a cvar (e.g. "seta qlx_oloraculoDontMix 1234:5678,1234:9987"). This
        # can be done in the client and doesn't require restarting.
        # toro = 76561198282206581
        # mandiok = 76561198257902041

        for team_a in teams:
            for team_b in teams:
                match_key = repr(sorted((sorted(team_a), sorted(team_b))))

                if list(set(team_a)
                        & set(team_b)) or match_key in seen_matches:
                    continue

                # if ((toro in team_a and mandiok in team_a) or
                #     (toro in team_b and mandiok in team_b)):
                #   continue

                seen_matches.add(match_key)

                team_a_ratings = [
                    self.get_player_ratings(player_id) for player_id in team_a
                ]
                team_b_ratings = [
                    self.get_player_ratings(player_id) for player_id in team_b
                ]

                quality = trueskill.quality([team_a_ratings, team_b_ratings])
                match_qualities.append([quality, [team_a, team_b]])

        return match_qualities
Ejemplo n.º 23
0
def match_quality(team1, team2):
    t1_elo, t2_elo, t1_ts, t2_ts = [], [], [], []
    for player in team1:
        if player not in p:
            t1_elo.append(ELO_DEFAULT_RATING)
            t1_ts.append(Rating())
        else:
            t1_elo.append(p[player].last_rating(ELO_MODEL))
            t1_ts.append(p[player].last_rating(TRUESKILL_MODEL))

    for player in team2:
        if player not in p:
            t2_elo.append(ELO_DEFAULT_RATING)
            t2_ts.append(Rating())
        else:
            t2_elo.append(p[player].last_rating(ELO_MODEL))
            t2_ts.append(p[player].last_rating(TRUESKILL_MODEL))

    elo_pred = elo.predict_winner(t1_elo, t2_elo)
    elo_quality = 1 - abs(elo_pred[0] - elo_pred[1])
    ts_quality = ts.quality([t1_ts, t2_ts])

    return (elo_quality, ts_quality)
Ejemplo n.º 24
0
def trueskill(df):
    """
    creates trueskill ratings
    ...

    Parameters
    ----------
    df: pd.DataFrame
        games dataframe

    Returns
    -------
    df: pd.DataFrame
        with trueskill ratings added
    """
    ratings = {}
    for x in df.home_team.unique():
        ratings[x] = Rating(25)
    for x in df.away_team.unique():
        ratings[x] = Rating(25)
    for x in df.home_goalie_id.unique():
        ratings[x] = Rating(25)
    for x in df.away_goalie_id.unique():
        ratings[x] = Rating(25)

    ts_quality = []
    goalie_ts_diff = []
    team_ts_diff = []
    home_goalie_ts = []
    away_goalie_ts = []
    home_team_ts = []
    away_team_ts = []
    df = df.sort_values(by='date').copy()

    for i, r in df.iterrows():
        # get pre-match trueskill ratings from dict
        match = [(ratings[r.home_team], ratings[r.home_goalie_id]),
                 (ratings[r.away_team], ratings[r.away_goalie_id])]
        ts_quality.append(quality(match))
        goalie_ts_diff.append(ratings[r.home_goalie_id].mu - ratings[r.away_goalie_id].mu)
        team_ts_diff.append(ratings[r.home_team].mu - ratings[r.away_team].mu)
        home_goalie_ts.append(ratings[r.home_goalie_id].mu)
        away_goalie_ts.append(ratings[r.away_goalie_id].mu)
        home_team_ts.append(ratings[r.home_team].mu)
        away_team_ts.append(ratings[r.away_team].mu)

        if r.date < df.date.max():
            # update ratings dictionary with post-match ratings
            if r.home_team_win:
                match = [(ratings[r.home_team], ratings[r.home_goalie_id]),
                         (ratings[r.away_team], ratings[r.away_goalie_id])]
                [(ratings[r.home_team], ratings[r.home_goalie_id]),
                 (ratings[r.away_team], ratings[r.away_goalie_id])] = rate(match)
            else:
                match = [(ratings[r.away_team], ratings[r.away_goalie_id]),
                         (ratings[r.home_team], ratings[r.home_goalie_id])]
                [(ratings[r.away_team], ratings[r.away_goalie_id]),
                 (ratings[r.home_team], ratings[r.home_goalie_id])] = rate(match)

    df['ts_game_quality'] = ts_quality
    df['goalie_ts_diff'] = goalie_ts_diff
    df['team_ts_diff'] = team_ts_diff
    df['home_goalie_ts'] = home_goalie_ts
    df['away_goalie_ts'] = away_goalie_ts
    df['home_team_ts'] = home_team_ts
    df['away_team_ts'] = away_team_ts

    return df
Ejemplo n.º 25
0
alice, bob = Rating(25), Rating(30)  # assign Alice and Bob's ratings
if quality_1vs1(alice, bob) < 0.50:
    print('This match seems to be not so fair')
alice, bob = rate_1vs1(alice, bob)  # update the ratings after the match

print('\n\nHey there')

r1 = Rating()
r2 = Rating()

print('{:.1%} chance to draw'.format(quality_1vs1(r1, r2)))

new_r1, new_r2 = rate_1vs1(r1, r2, drawn=True)
print(new_r1)
print(new_r2)

r1 = Rating()  # 1P's skill
r2 = Rating()  # 2P's skill
r3 = Rating()  # 3P's skill
t1 = [r1]  # Team A contains just 1P
t2 = [r2, r3]  # Team B contains 2P and 3P

print('{:.1%} chance to draw'.format(quality([t1, t2])))
#13.5% chance to draw
(new_r1, ), (new_r2, new_r3) = rate([t1, t2], ranks=[0, 1])
print(new_r1)
#trueskill.Rating(mu=33.731, sigma=7.317)
print(new_r2)
#trueskill.Rating(mu=16.269, sigma=7.317)
print(new_r3)
Ejemplo n.º 26
0
def truelearn_model(records,
                    init_skill=0.,
                    def_var=None,
                    tau=0.,
                    beta_sqr=0.,
                    threshold=0.5,
                    engage_func=None,
                    draw_probability="static",
                    draw_factor=.1,
                    positive_only=True):
    """This model calculates trueskill given all positive skill using the real trueskill factorgraph.
    Args:
        records [[val]]: list of vectors for each event of the user. Format of vector
            [session, time, timeframe_id, topic_id, topic_cov ..., label]

    Returns:
        accuracy (float): accuracy for all observations
        concordance ([bool]): the concordance between actual and predicted values
    """

    num_records = float(len(records))

    if num_records <= 1:
        return 0., [], int(num_records), False

    user_model = {"mean": {}, "variance": {}}

    topics_covered = set()

    actual = [1 / 1000000000]  # as the draw probability cant be zero
    predicted = []

    stats = defaultdict(int)

    prev_label = None

    for idx, event in enumerate(records):

        #  calculate if the user is going to engage with this resource
        topic_vec = event[1:-1]
        topic_dict = get_topic_dict(topic_vec)

        # setup trueskill environment
        if draw_probability == "static":
            # _draw_probability = float(0.5932538086581619)  # population success rate
            _draw_probability = 1.
        else:
            # compute novelty prob
            _draw_probability = float(np.mean(actual))
            # _draw_probability = float(novel_prob)  # individual.. majority model

        _draw_probability *= draw_factor
        _draw_probability = 1 - 1 / 1000000000 if _draw_probability == 1. else _draw_probability

        trueskill.setup(mu=0.0,
                        sigma=1 / 1000000000,
                        beta=float(np.sqrt(beta_sqr)),
                        tau=tau,
                        draw_probability=_draw_probability,
                        backend="mpmath")

        # track unique topic encountered
        topics_covered |= set(topic_dict.keys())

        # create_teams
        team_learner = tuple()
        team_mean_learner = []

        team_content = tuple()
        team_mean_content = []

        topic_seq = []

        for topic, coverage in topic_dict.items():
            topic_seq.append(topic)
            # get user skill rating
            tmp_learner_skill = user_model["mean"].get(topic, init_skill)
            learner_skill = trueskill.Rating(
                mu=tmp_learner_skill,
                sigma=np.sqrt(user_model["variance"].get(topic, def_var)))

            team_learner += (learner_skill, )
            team_mean_learner.append(tmp_learner_skill)

            # get skill coverage
            tmp_content_topic = coverage
            topic_cov = trueskill.Rating(mu=tmp_content_topic,
                                         sigma=1 / 1000000000)
            team_content += (topic_cov, )
            team_mean_content.append(tmp_content_topic)

        # check if user engages
        pred_prob = trueskill.quality([team_learner, team_content])
        prediction = int(pred_prob >= threshold)

        # if user engages, update the model
        label = event[-1]

        # if label is negative and setting is positive only, skip updating
        if positive_only and label != 1:
            pass
        else:
            # if positive
            if label == 1:
                # learner wins
                new_team_learner, _ = trueskill.rate(
                    [team_learner, team_content], ranks=[0, 0])
            else:  # if the person is not engaged...
                # check if the winner is learner or content
                difference = np.sum(team_mean_learner) - np.sum(
                    team_mean_content)

                if difference > 0.:  # learner wins
                    new_team_learner, _ = trueskill.rate(
                        [team_learner, team_content], ranks=[0, 1])
                elif difference < 0.:  # learner loses
                    _, new_team_learner = trueskill.rate(
                        [team_content, team_learner], ranks=[0, 1])
                else:
                    new_team_learner = team_learner

            for _idx, topic in enumerate(topic_seq):
                user_model["mean"][topic], user_model["variance"][
                    topic] = new_team_learner[_idx].mu, new_team_learner[
                        _idx].sigma**2

        # if not first element, calculate accuracy
        if idx != 0:
            if label != prev_label:
                stats["change_label"] += 1

            actual.append(label)
            predicted.append(prediction)

        prev_label = label

    stats = dict(stats)

    stats["num_topics"] = len(topics_covered)

    accuracy, precision, recall, f1, stats = get_summary_stats(
        actual[1:], predicted, num_records, stats=stats, user_model=user_model)

    return accuracy, precision, recall, f1, int(num_records), stats
Ejemplo n.º 27
0
 def draw_probability(self, team1, team2):
     r1 = [p.get_rating() for p in team1]
     r2 = [p.get_rating() for p in team2]
     return trueskill.quality([r1, r2],
                              weights=self.team_weights(team1, team2))
Ejemplo n.º 28
0
    def decide_on_players_3(bot_ids: Iterable[BotID], rank_sys: RankingSystem,
                            ticket_sys: TicketSystem) -> Tuple[List[BotID], List[BotID]]:
        """
        Find two balanced teams. The TicketSystem and the RankingSystem to find
        a fair match up between some bots that haven't played for a while.
        """
        # Higher ticket strength produces a more uniform distribution of matches played, adjust by increments of 0.1
        TICKET_STRENGTH = 1
        # Higher MMR tolerance allows accurately rated bots to play in more "distant" MMR matches, adjust by increments of 1
        MMR_TOLERANCE = 4
        # Max attempts to build match of quality >= MIN_QUALITY
        MAX_ITERATIONS = 20
        MIN_QUALITY = 0.4

        rank_sys.ensure_all(bot_ids)
        ticket_sys.ensure(bot_ids)

        best_quality = 0
        best_match = None

        max_tickets = max([ticket_sys.get(bot_id) for bot_id in bot_ids])

        for i in range(MAX_ITERATIONS):
            # Get Leader Bot (choose randomly between bots with highest tickets)
            possible_leaders = [bot_id for bot_id, tickets in ticket_sys.tickets.items() if tickets == max_tickets and bot_id in bot_ids]
            leader = numpy.random.choice(possible_leaders)

            # Get MU for Leader bot, that will be the match mmr
            match_mmr = rank_sys.get(leader).mu

            # Score all bots based on probability to perform at target mmr, scaled by amount of tickets
            candidates = [Candidate(bot_id, rank_sys.get(bot_id)) for bot_id in bot_ids if bot_id != leader]
            scores = []

            for c in candidates:
                # Calculate probability to perform at desired mmr
                performance_prob = pdf(match_mmr, mu=c.rating.mu, sigma=math.sqrt(c.rating.sigma**2 + MMR_TOLERANCE**2))

                # Calculate weighting factor based on tickets
                tickets = ticket_sys.get(c.bot_id)
                tickets_weight = tickets ** TICKET_STRENGTH

                # Calculate candidate score
                scores.append(performance_prob * tickets_weight)

            # Pick 5 bots randomly based on their score
            probs = numpy.asarray(scores) / sum(scores)
            players = list(numpy.random.choice(candidates, size=5, p=probs, replace=False))
            players.append(Candidate(leader, rank_sys.get(leader)))

            # Get the highest quality match with the 6 chosen bots
            combinations = list(itertools.combinations(players, 3))
            possible_matches = len(combinations) // 2
            blue_combs = combinations[:possible_matches]
            orange_combs = combinations[:possible_matches-1:-1]

            for i in range(possible_matches):
                blue_team = blue_combs[i]
                orange_team = orange_combs[i]
                quality = trueskill.quality([[c.rating for c in blue_team], [c.rating for c in orange_team]])
                if quality > best_quality:
                    best_quality = quality
                    best_match = (blue_team, orange_team)

            if best_quality >= MIN_QUALITY:
                break

        # We sort by get_mmr() because it considers sigma
        blue_ids = sorted([c.bot_id for c in best_match[0]], key=lambda id: rank_sys.get_mmr(id), reverse=True)
        orange_ids = sorted([c.bot_id for c in best_match[1]], key=lambda id: rank_sys.get_mmr(id), reverse=True)
        
        tickets_consumed = sum([ticket_sys.get_ensured(b) for b in blue_ids + orange_ids])
        print(f"Match: {blue_ids} vs {orange_ids}\nMatch quality: {best_quality}  Tickets consumed: {tickets_consumed}")
        ticket_sys.choose(blue_ids + orange_ids, bot_ids)
        return blue_ids, orange_ids
Ejemplo n.º 29
0
def test_list_instead_of_tuple():
    r1, r2 = Rating(), Rating()
    assert rate([[r1], [r2]]) == rate([(r1, ), (r2, )])
    assert quality([[r1], [r2]]) == quality([(r1, ), (r2, )])
Ejemplo n.º 30
0
def skill_calculate_match_quality(party_1_roster, party_2_roster):
    """
    Calculates quality of match between 2 rosters
        - Takes 2 arrays of Rating objects
    """
    return quality([tuple(party_1_roster), tuple(party_2_roster)])
Ejemplo n.º 31
0
def get_quality():
    (ranked, _) = build_ranked()
    return {'quality': trueskill.quality(ranked) * 100}
Ejemplo n.º 32
0
    def update(self, message, client):
        '''
        Updates this item from the message dictionary supplied
        '''
        self.client  = client

        self.title = message['title']
        self.host = message['host']

        # Maps integral team numbers (from 2, with 1 "none") to lists of names.
        teams_map = dict.copy(message['teams'])
        self.password_protected = message.get('password_protected', False)
        self.mod = message['featured_mod']
        self.modVersion = message.get('featured_mod_versions', [])
        self.mods = message.get('sim_mods', {})
        self.options = message.get('options', [])
        num_players = message.get('num_players', 0)
        self.slots = message.get('max_players', 12)
        
        oldstate = self.state
        self.state  = message['state']

        # Assemble a players & teams lists
        self.teamlist = []
        self.observerlist = []

        self.setHidden((self.state != 'open') or (self.mod in mod_invisible))        

        # Clear the status for all involved players (url may change, or players may have left, or game closed)        
        for player in self.players:
            if player.login in client.urls:
                del client.urls[player.login]

        # Just jump out if we've left the game, but tell the client that all players need their states updated
        if self.state == "closed":
            client.usersUpdated.emit(self.players)
            return

        # Used to differentiate between newly added / removed and previously present players
        oldplayers = set(map(lambda p: p.login, self.players))

        # Following the convention used by the game, a team value of 1 represents "No team". Let's
        # desugar those into "real" teams now (and convert the dict to a list)
        # Also, turn the lists of names into lists of players, and build a player name list.
        self.players = []
        teams = []
        for team_index, team in teams_map.iteritems():
            if team_index == 1:
                for ffa_player in team:
                    if ffa_player in self.client.players:
                        self.players.append(self.client.players[ffa_player])
                        teams.append([self.client.players[ffa_player]])
            else:
                real_team = []
                for name in team:
                    if name in self.client.players:
                        self.players.append(self.client.players[name])
                        real_team.append(self.client.players[name])
                teams.append(real_team)

        # Tuples for feeding into trueskill.
        rating_tuples = []
        for team in teams:
            ratings_for_team = map(lambda player: Rating(player.rating_mean, player.rating_deviation), team)
            rating_tuples.append(tuple(ratings_for_team))

        try:
            self.gamequality = 100*round(trueskill.quality(rating_tuples), 2)
        except ValueError:
            self.gamequality = 0
        self.nTeams = len(teams)

        # Map preview code
        if self.mapname != message['mapname']:
            self.mapname = message['mapname']
            self.mapdisplayname = maps.getDisplayName(self.mapname)
            refresh_icon = True
        else:
            refresh_icon = False

        #Alternate icon: If private game, use game_locked icon. Otherwise, use preview icon from map library.
        if refresh_icon:
            if self.password_protected:
                icon = util.icon("games/private_game.png")
            else:            
                icon = maps.preview(self.mapname)
                if not icon:
                    self.client.downloader.downloadMap(self.mapname, self)
                    icon = util.icon("games/unknown_map.png")
                             
            self.setIcon(icon)

        strQuality = ""
        
        if self.gamequality == 0 :
            strQuality = "? %"
        else :
            strQuality = str(self.gamequality)+" %"

        if num_players == 1:
            playerstring = "player"
        else:
            playerstring = "players"

        color = client.players.getUserColor(self.host)

        self.editTooltip(teams)

        self.setText(self.FORMATTER_FAF.format(color=color, mapslots = self.slots, mapdisplayname=self.mapdisplayname, title=self.title, host=self.host, players=num_players, playerstring=playerstring, gamequality = strQuality))

        #Spawn announcers: IF we had a gamestate change, show replay and hosting announcements 
        if (oldstate != self.state):            
            if (self.state == "playing"):
                QtCore.QTimer.singleShot(5*60000, self.announceReplay) #The delay is there because we have a 5 minutes delay in the livereplay server
            elif (self.state == "open"):
                QtCore.QTimer.singleShot(35000, self.announceHosting)   #The delay is there because we currently the host needs time to choose a map

        # Update player URLs
        for player in self.players:
            client.urls[player.login] = self.url(player.id)

        # Determine which players are affected by this game's state change            
        newplayers = set(map(lambda p: p.login, self.players))
        affectedplayers = oldplayers | newplayers
        client.usersUpdated.emit(list(affectedplayers))
Ejemplo n.º 33
0
def test_list_instead_of_tuple():
    r1, r2 = Rating(), Rating()
    assert rate([[r1], [r2]]) == rate([(r1,), (r2,)])
    assert quality([[r1], [r2]]) == quality([(r1,), (r2,)])
Ejemplo n.º 34
0
def predict_matchup_quality(team_1, team_2):
    predicted_quality = quality([team_1.ratings, team_2.ratings])
    return predicted_quality
Ejemplo n.º 35
0
    def update(self, message, client):
        '''
        Updates this item from the message dictionary supplied
        '''
        self.client = client

        self.title = message['title']
        self.host = message['host']

        # Maps integral team numbers (from 2, with 1 "none") to lists of names.
        teams_map = dict.copy(message['teams'])
        self.password_protected = message.get('password_protected', False)
        self.mod = message['featured_mod']
        self.modVersion = message.get('featured_mod_versions', [])
        self.mods = message.get('sim_mods', {})
        self.options = message.get('options', [])
        num_players = message.get('num_players', 0)
        self.slots = message.get('max_players', 12)

        oldstate = self.state
        self.state = message['state']

        # Assemble a players & teams lists
        self.teamlist = []
        self.observerlist = []

        self.setHidden((self.state != 'open') or (self.mod in mod_invisible))

        # Clear the status for all involved players (url may change, or players may have left, or game closed)
        for player in self.players:
            if player.login in client.urls:
                del client.urls[player.login]

        # Just jump out if we've left the game, but tell the client that all players need their states updated
        if self.state == "closed":
            client.usersUpdated.emit(self.players)
            return

        # Used to differentiate between newly added / removed and previously present players
        oldplayers = set(map(lambda p: p.login, self.players))

        # Following the convention used by the game, a team value of 1 represents "No team". Let's
        # desugar those into "real" teams now (and convert the dict to a list)
        # Also, turn the lists of names into lists of players, and build a player name list.
        self.players = []
        teams = []
        for team_index, team in teams_map.iteritems():
            if team_index == 1:
                for ffa_player in team:
                    if ffa_player in self.client.players:
                        self.players.append(self.client.players[ffa_player])
                        teams.append([self.client.players[ffa_player]])
            else:
                real_team = []
                for name in team:
                    if name in self.client.players:
                        self.players.append(self.client.players[name])
                        real_team.append(self.client.players[name])
                teams.append(real_team)

        # Tuples for feeding into trueskill.
        rating_tuples = []
        for team in teams:
            ratings_for_team = map(
                lambda player: Rating(player.rating_mean, player.
                                      rating_deviation), team)
            rating_tuples.append(tuple(ratings_for_team))

        try:
            self.gamequality = 100 * round(trueskill.quality(rating_tuples), 2)
        except ValueError:
            self.gamequality = 0
        self.nTeams = len(teams)

        # Map preview code
        if self.mapname != message['mapname']:
            self.mapname = message['mapname']
            self.mapdisplayname = maps.getDisplayName(self.mapname)
            refresh_icon = True
        else:
            refresh_icon = False

        #Alternate icon: If private game, use game_locked icon. Otherwise, use preview icon from map library.
        if refresh_icon:
            if self.password_protected:
                icon = util.icon("games/private_game.png")
            else:
                icon = maps.preview(self.mapname)
                if not icon:
                    self.client.downloader.downloadMap(self.mapname, self)
                    icon = util.icon("games/unknown_map.png")

            self.setIcon(icon)

        strQuality = ""

        if self.gamequality == 0:
            strQuality = "? %"
        else:
            strQuality = str(self.gamequality) + " %"

        if num_players == 1:
            playerstring = "player"
        else:
            playerstring = "players"

        color = client.players.getUserColor(self.host)

        self.editTooltip(teams)

        self.setText(
            self.FORMATTER_FAF.format(color=color,
                                      mapslots=self.slots,
                                      mapdisplayname=self.mapdisplayname,
                                      title=self.title,
                                      host=self.host,
                                      players=num_players,
                                      playerstring=playerstring,
                                      gamequality=strQuality))

        #Spawn announcers: IF we had a gamestate change, show replay and hosting announcements
        if (oldstate != self.state):
            if (self.state == "playing"):
                QtCore.QTimer.singleShot(
                    5 * 60000, self.announceReplay
                )  #The delay is there because we have a 5 minutes delay in the livereplay server
            elif (self.state == "open"):
                QtCore.QTimer.singleShot(
                    35000, self.announceHosting
                )  #The delay is there because we currently the host needs time to choose a map

        # Update player URLs
        for player in self.players:
            client.urls[player.login] = self.url(player.id)

        # Determine which players are affected by this game's state change
        newplayers = set(map(lambda p: p.login, self.players))
        affectedplayers = oldplayers | newplayers
        client.usersUpdated.emit(list(affectedplayers))
Bcomp = rating[Bteam]

# Awin prob
def win_probability(Acomp, Bcomp, env = trueskill.global_env()):
    delta_mu = sum(Acomp[r].mu for r in Acomp.keys()) - sum(Bcomp[r].mu for r in Bcomp.keys())
    sum_sigma = sum(Acomp[r].sigma ** 2 for r in Acomp.keys()) + sum(Bcomp[r].sigma ** 2 for r in Bcomp.keys())
    size = len(Acomp) + len(Bcomp)
    denom = math.sqrt(size * (env.beta * env.beta) + sum_sigma)
    ts = env
    return ts.cdf(delta_mu / denom)
win_prob = win_probability(Acomp, Bcomp, env)
print(Ateam + ' winning probability is ' + str(round(win_prob * 100, 2)) + "%")
print(Bteam + ' winning probability is ' + str(round((1 - win_prob) * 100, 2)) + "%")

# draw prob
print('{:0.1%} chance to draw'.format(trueskill.quality([Acomp, Bcomp])))

# plot
height = [win_prob, 1 - win_prob]
bars = (Ateam, Bteam)
y_pos = np.arange(len(bars))
plt.bar(y_pos, height, color = ("#DE7A22", "#6AB187"))
plt.title('Match-up winning probabilities based on TrueSkill rating')
plt.xlabel('team')
plt.ylabel('prob')
plt.ylim(0, 1)
plt.xticks(y_pos, bars)
plt.show()


Ejemplo n.º 37
0
import sys, json
import trueskill
from itertools import combinations
players = []

for i in xrange(1, len(sys.argv), 2):
    players.append(trueskill.Rating(mu=float(sys.argv[i]), sigma=float(sys.argv[i + 1])))

if len(players) <= 2:
    result = [0, 1]
    print result

indices = range(len(players))
halfLength = len(players) / 2
bestQuality = trueskill.quality([players[:halfLength], players[halfLength:]])
bestTeams = indices

for combination in combinations(indices[1:], halfLength):
    team1Indices =  list(combination)
    team2Indices = list(set(indices) - set(team1Indices))
    team1 = [players[i] for i in team1Indices]
    team2 = [players[i] for i in team2Indices]
    quality = trueskill.quality([team1, team2])
    if quality > bestQuality:
        bestQuality = quality
        bestTeams = team1Indices + team2Indices

print bestTeams
Ejemplo n.º 38
0
def get_trueskill_quality(players: Tuple[List[BotID], List[BotID]],
                          rank_sys: RankingSystem) -> float:
    blue_ratings = [rank_sys.get(bot) for bot in players[0]]
    orange_ratings = [rank_sys.get(bot) for bot in players[1]]
    return trueskill.quality([blue_ratings, orange_ratings])
Ejemplo n.º 39
0
confiance_joueurs = [float(x)/100 for x in table_joueurs["sigma"].tolist()]
rating_joueurs = [ts.Rating(x,y) for x,y in zip(niveau_joueurs, confiance_joueurs)]
Caracteristiques = dict(zip(pool_joueurs, rating_joueurs))




#In tagpro, maximum of 924 combinations, computation is quick.

Best_quality = 0
Best_team_1 = []
Best_team_2 = []
for subset in itertools.combinations(pool_joueurs, len(pool_joueurs)/2):
    Joueurs_team_1 = list(subset)
    Joueurs_team_2 = [joueur for joueur in pool_joueurs if joueur not in subset]
    Ratings_team_1 = [Caracteristiques[x] for x in Joueurs_team_1]
    Ratings_team_2 = [Caracteristiques[x] for x in Joueurs_team_2]
    if ts.quality([Ratings_team_1, Ratings_team_2])>Best_quality:
        Best_quality = ts.quality([Ratings_team_1, Ratings_team_2])
        Best_team_1 = Joueurs_team_1
        Best_team_2 = Joueurs_team_2
        

conn.commit()
cur.close()
conn.close()


print Best_team_1
print Best_team_2
print Best_quality