Esempio n. 1
0
    def __init__(self,
                 mu: float = 2500.0,
                 sigma: float = 2500.0 / 3.0,
                 beta: float = 2500.0 / 2.0,
                 tau: float = 25.0 / 3.0,
                 draw_probability: float = 0.06,
                 **kws) -> None:
        super().__init__(**kws)

        self.mu = mu
        self.sigma = sigma
        self.beta = beta
        self.tau = tau
        self.draw_probability = draw_probability

        self.env_drawable = TrueSkill(mu=mu,
                                      sigma=sigma,
                                      beta=beta,
                                      tau=tau,
                                      draw_probability=draw_probability)
        self.env_undrawable = TrueSkill(mu=mu,
                                        sigma=sigma,
                                        beta=beta,
                                        tau=tau,
                                        draw_probability=0.0)
        self.ratings = self._create_rating_jar()
Esempio n. 2
0
def test_backend():
    env = TrueSkill(backend=(NotImplemented, NotImplemented, NotImplemented))
    with raises(TypeError):
        env.rate_1vs1(Rating(), Rating())
    with raises(ValueError):
        # '__not_defined__' backend is not defined
        TrueSkill(backend='__not_defined__')
Esempio n. 3
0
def test_deprecated_methods():
    env = TrueSkill()
    r1, r2, r3 = Rating(), Rating(), Rating()
    deprecated_call(t.transform_ratings, [(r1, ), (r2, ), (r3, )])
    deprecated_call(t.match_quality, [(r1, ), (r2, ), (r3, )])
    deprecated_call(env.Rating)
    deprecated_call(env.transform_ratings, [(r1, ), (r2, ), (r3, )])
    deprecated_call(env.match_quality, [(r1, ), (r2, ), (r3, )])
    deprecated_call(env.rate_1vs1, r1, r2)
    deprecated_call(env.quality_1vs1, r1, r2)
    deprecated_call(lambda: Rating().exposure)
    dyn = TrueSkill(draw_probability=t.dynamic_draw_probability)
    deprecated_call(dyn.rate, [(r1, ), (r2, )])
Esempio n. 4
0
File: elo.py Progetto: Taxane/dppl
def rank(player1, player2):
    """ This function will calculate the TrueSkill ranking of each player. It
    accepts two players arguments. The first is the winning player, the second
    is the losing player. Both should be Profile objects. This will not save
    the players.

    Args:
        player1: The winning user profile
        player2: The losing user profile

    Returns:
        A tuple of (player1, player2)
    """
    DRAW_PROBABILITY = 0  # It's impossible to draw 1 on 1
    approx = lambda f: round(f, 6)  # Round all floats to 6 decimals

    ts = TrueSkill(draw_probability=DRAW_PROBABILITY)
    t1 = (ts.Rating(mu=player1.mu, sigma=player1.sigma), )
    t2 = (ts.Rating(mu=player2.mu, sigma=player2.sigma), )
    r1, r2 = tuple(x[0] for x in ts.transform_ratings(rating_groups=(t1, t2)))

    player1.mu = approx(r1.mu)
    player1.sigma = approx(r1.sigma)
    player1.exposure = approx(r1.exposure)

    player2.mu = approx(r2.mu)
    player2.sigma = approx(r2.sigma)
    player2.exposure = approx(r2.exposure)

    player1.save()
    player2.save()

    return player1, player2
Esempio n. 5
0
def analyze_teams(player_blue_offense, player_blue_defense, player_red_offense,
                  player_red_defense):
    ts = TrueSkill(mu=MU,
                   sigma=SIGMA,
                   beta=BETA,
                   tau=TAU,
                   draw_probability=DRAW_PROB)

    player_blue_offense_rating = Rating(
        mu=player_blue_offense.rating_mu,
        sigma=player_blue_offense.rating_sigma
    ) if player_blue_offense is not None else None
    player_blue_defense_rating = Rating(
        mu=player_blue_defense.rating_mu,
        sigma=player_blue_defense.rating_sigma
    ) if player_blue_defense is not None else None
    player_red_offense_rating = Rating(
        mu=player_red_offense.rating_mu, sigma=player_red_offense.rating_sigma
    ) if player_red_offense is not None else None
    player_red_defense_rating = Rating(
        mu=player_red_defense.rating_mu, sigma=player_red_defense.rating_sigma
    ) if player_red_defense is not None else None

    blue_team = _build_team(player_blue_offense_rating,
                            player_blue_defense_rating)
    red_team = _build_team(player_red_offense_rating,
                           player_red_defense_rating)

    match_balance = ts.quality([blue_team, red_team])
    win_prob = win_probability(ts, blue_team, red_team)

    return {
        "match_balance": match_balance,
        "predicted_win_prob_for_blue": win_prob
    }
Esempio n. 6
0
 def __init__(self):
     self.trueskill_env = TrueSkill(mu=1700.0,
                                    sigma=1700.0 / 3.0,
                                    beta=1700.0 / 6.0,
                                    tau=1700.0 / 300.0,
                                    draw_probability=0.0)
     super(ReplayDatabase, self).__init__()
Esempio n. 7
0
def rate_race(race):
    entrants = race.ordered_entrants
    users = [UserRating(entrant, race) for entrant in entrants]
    if not race.team_race:
        groups = [(user, ) for user in users]
    else:
        groups = defaultdict(list)
        for user in users:
            groups[user.entrant.team_id].append(user)
        groups = list(groups.values())
        groups.sort(key=_sort_key)

    rating_groups = []
    ranks = []
    current_rank = 0
    for group in groups:
        sort_key = _sort_key(group)
        rating_groups.append(tuple(user.rating for user in group))
        ranks.append(current_rank)
        if sort_key < timedelta.max:
            current_rank += 1

    env = TrueSkill(backend='mpmath')
    rated = env.rate(rating_groups, ranks)

    with atomic():
        for ratings, group in zip(rated, groups):
            for rating, user in zip(ratings, group):
                user.set_rating(rating)
Esempio n. 8
0
def convergence():
    ENV = TrueSkill(mu=25, sigma=8 + 1 / 3, draw_probability=0)
    p1 = ENV.create_rating()
    p2 = ENV.create_rating()
    p1_log = []
    p2_log = []
    for _ in range(400):
        p1, p2 = rate_1vs1(p1, p2, drawn=False)
        p1_log.append(p1.mu - 3 * p1.sigma)
        p2_log.append(p2.mu - 3 * p2.sigma)
    colors = ["blue", "green", "red", "cyan", "magenta", "yellow", "black"]
    legend = []
    player = 1
    for i in [p1_log, p2_log]:
        color_line = colors.pop()
        plt.plot(i, color=color_line, linewidth=2)
        color_patch = mpatches.Patch(color=color_line,
                                     label="player" + str(player))
        legend.append(color_patch)
        player += 1
    plt.ylabel('TrueSkill Value')
    plt.xlabel('Number of games played')
    plt.title("Covergence of trueskill value for each player")
    plt.legend(handles=legend)
    plt.savefig("figures/perfect_convergence.pdf")
    plt.close()
Esempio n. 9
0
def calculate_scores():
    env = TrueSkill(draw_probability=0.14)
    # trueskill.setup(draw_probability=0.14)
    scores = load_previous_scores()
    vote_count = 0

    with open(FLAGS.votes_file, 'r') as votes_file:
        for line in votes_file:
            img_1, img_2, winner = line.strip().split(",")

            img_1_ts = scores[img_1]
            img_2_ts = scores[img_2]

            if winner == "0":
                #winner left
                img_1_ts_new, img_2_ts_new = rate_1vs1(img_1_ts, img_2_ts)
            else:
                #winner right
                img_2_ts_new, img_1_ts_new = rate_1vs1(img_2_ts, img_1_ts)

            scores[img_1] = img_1_ts_new
            scores[img_2] = img_2_ts_new

            vote_count += 1

            if vote_count % 1000 == 0:
                print("Processed {} votes".format(vote_count))

    get_highest_rated(scores)

    save_scores(scores)
Esempio n. 10
0
class RankPreviewer:
    env = TrueSkill(mu=MU, sigma=SIGMA, beta=BETA, tau=TAU, draw_probability=0.01)

    @classmethod
    def get_current_rank_for_players(cls, report) -> List[Rating]:
        return [DB.db.get_s1_player_stats(pl).get_rating() for pl in
                report.players]

    @classmethod
    def calc_new_ranks(cls, report : DB.RankedMatch, old_ranks : List[Rating]) -> List[Rating]:
        try:
            if report.scrapped:
                return [Rating(mu=i.mu-20, sigma=i.sigma) for i in old_ranks]
            new_ranks = cls.to_1d(
                cls.env.rate([(i,) for i in old_ranks], ranks=[pos for pos in report.players_pos.values()])
            )
        except ValueError as e:
            logger.error(f"{type(e).__name__}: {e}")
            return old_ranks
        return new_ranks

    @classmethod
    def get_ranks_preview(cls, report) -> Dict[int, float]:
        old_ranks = cls.get_current_rank_for_players(report)
        new_ranks = cls.calc_new_ranks(report, old_ranks)
        return dict(zip(report.players, [SKILL(new) - SKILL(old) for new, old in zip(new_ranks, old_ranks)]))

    @staticmethod
    def to_1d(ls : List[Iterable]) -> List:
        return sum((list(i) for i in ls), [])
Esempio n. 11
0
def win_probability(team1, team2):
    delta_mu = team1.mu - team2.mu
    sum_sigma = (team1.sigma**2) + (team2.sigma**2)
    size = 2
    # denom = math.sqrt(size * (0.05 * 0.05) + sum_sigma)
    denom = math.sqrt(sum_sigma)
    # ts = TrueSkill()
    return TrueSkill().cdf(delta_mu / denom)
Esempio n. 12
0
 def game(self):
     TrueSkill(backend='mpmath').cdf
     players = self.choose_players()
     if players != None:
         winner = self.play(players)
         if self.ranking_game:
             self.update_score(players, winner)
             for p in self.participants.all():
                 print(p.rating)
Esempio n. 13
0
class HexPlayer(ABC):
    ENV = TrueSkill(mu=25, sigma=8.333, draw_probability=0.00)

    def __init__(self):
        self.rating = HexPlayer.ENV.create_rating()

    @abstractmethod
    def get_move(self, board, colour, renders):
        pass
Esempio n. 14
0
 def __init__(self, size, players):
     self.size = size
     self.players = players
     self.durations = [0 for _ in range(len(players))]
     self.ratings = [
         TrueSkill(mu=25, sigma=8 + 1 / 3,
                   draw_probability=0).create_rating()
         for _ in range(len(players))
     ]
     self.ratings_log = [[0] for _ in range(len(players))]
Esempio n. 15
0
    def __init__(self):
        self.env = TrueSkill(draw_probability=0.02)
        self.trueskills = {}
        self.events = {}
        self.nicknames = {}
        self.processed_matches = set()

        self.session = requests.Session()
        self.session.headers.update(self.HEADERS)

        self.get_previous_matches()
Esempio n. 16
0
 def findRanks(self, pairs):
     from trueskill import TrueSkill, Rating
     env = TrueSkill(draw_probability=0.0)
     d = defaultdict(Rating)
     for p in pairs:
         d[p["winner"]], d[p["loser"]] = env.rate_1vs1(
             d[p["winner"]], d[p["loser"]])
     for i in d:
         well = d[i]
         d[i] = round(env.expose(d[i]), 2)
     return d
def compute_trueskill(player_names, rankings):
    env = TrueSkill()
    ratings = {name: env.create_rating() for name in player_names}

    for game_ranking in rankings:
        game_player_names, ranks = zip(*[(player_name, rank) for player_name, (_, _, rank) in game_ranking.items()])
        rating_groups = env.rate([(ratings[player_name],) for player_name in game_player_names], ranks)
        for player_name, (rating,) in zip(game_player_names, rating_groups):
            ratings[player_name] = rating

    return ratings
def generate_scores(voteFile):
    # function to generate scores using TrueSkill using a vote file given in the format:
    # image1 image2 winning-image

    f1 = open(voteFile)

    mu0 = 25
    var0 = (mu0 / 3.0)**2
    beta2 = var0 / 4.0
    tau2 = var0 / 100.0
    prob_draw = 0.1333
    ts = TrueSkill(beta2, tau2, prob_draw, var0)
    params = {}
    vals = [[], [], [], [], []]
    c = 0

    for vote in f1:
        vote = vote.strip().split()
        win = 0
        if vote[1] == vote[2]: win = 1
        if vote[0] not in params: params[vote[0]] = (mu0, var0)
        if vote[1] not in params: params[vote[1]] = (mu0, var0)
        if not win:
            params[vote[0]], params[vote[1]] = ts.update_rating(
                params[vote[0]], params[vote[1]], False)
        else:
            params[vote[1]], params[vote[0]] = ts.update_rating(
                params[vote[1]], params[vote[0]], False)
        if c % 10000 == 0:
            mu_mu, mu_sigma, sigma_mu, sigma_sigma = 0.0, 0.0, 0.0, 0.0
            mus, sigmas = [], []

            for key, value in params.iteritems():
                mu_mu += value[0]
                sigma_mu += value[1]
                mus.append(value[0])
                sigmas.append(value[1])

            mu_mu = np.mean(mus)
            sigma_mu = np.mean(sigmas)
            mu_sigma = np.std(np.array(mus))
            sigma_sigma = np.std(np.array(sigmas))

            vals[0].append(mu_mu)
            vals[1].append(sigma_mu)
            vals[2].append(mu_sigma)
            vals[3].append(sigma_sigma)
            vals[4].append(c)

        c += 1

    f1.close()

    return params, ts, vals
Esempio n. 19
0
def generate_scores(voteFile):
    # function to generate scores using TrueSkill using a vote file given in the format:
    # image1 image2 winning-image

    f1 = open(voteFile);

    mu0 = 25;
    var0 = (mu0 / 3.0) ** 2;
    beta2 = var0 / 4.0;
    tau2 = var0 / 100.0;
    prob_draw = 0.1333;
    ts = TrueSkill(beta2, tau2, prob_draw, var0);
    params = {};
    vals = [[], [], [], [], []];
    c = 0;

    for vote in f1:
        vote = vote.strip().split();
        win = 0;
        if vote[1] == vote[2]: win = 1;
        if vote[0] not in params: params[vote[0]] = (mu0, var0);
        if vote[1] not in params: params[vote[1]] = (mu0, var0);
        if not win:
            params[vote[0]], params[vote[1]] = ts.update_rating(params[vote[0]], params[vote[1]], False);
        else:
            params[vote[1]], params[vote[0]] = ts.update_rating(params[vote[1]], params[vote[0]], False);
        if c % 10000 == 0:
            mu_mu, mu_sigma, sigma_mu, sigma_sigma = 0.0, 0.0, 0.0, 0.0;
            mus, sigmas = [], [];

            for key, value in params.iteritems():
                mu_mu += value[0];
                sigma_mu += value[1];
                mus.append(value[0]);
                sigmas.append(value[1]);

            mu_mu = np.mean(mus);
            sigma_mu = np.mean(sigmas);
            mu_sigma = np.std(np.array(mus));
            sigma_sigma = np.std(np.array(sigmas));

            vals[0].append(mu_mu);
            vals[1].append(sigma_mu);
            vals[2].append(mu_sigma);
            vals[3].append(sigma_sigma);
            vals[4].append(c);

        c += 1;

    f1.close();

    return params, ts, vals;
Esempio n. 20
0
 def __init__(self,
              mu=DEFAULT_MU,
              sigma=DEFAULT_SIGMA,
              beta=DEFAULT_BETA,
              tau=DEFAULT_TAU,
              draw_probability=DEFAULT_DRAW):
     """ mu - the initial mean of ratings
         sigma - the initial standard deviation of ratings
         beta - the distance that guarantees about an 80% chance of winning
         tau - the dynamic factor
         draw_probability - the draw probability of the game"""
     self._ts = TrueSkill(mu, sigma, beta, tau, draw_probability)
     self._ratings = self._create_ratings()
Esempio n. 21
0
 def calculate(cls, game: Game):
     env = TrueSkill()
     team_won = {
         nick: cls.from_model_rating(game.rating_before(nick))
         for nick in game.nicks_won
     }
     team_lost = {
         nick: cls.from_model_rating(game.rating_before(nick))
         for nick in game.nicks_lost
     }
     rated_rating_groups = env.rate([team_won, team_lost], ranks=[0, 1])
     for team in rated_rating_groups:
         for nick in team:
             game.set_rating_after(nick, cls.to_model_rating(team[nick]))
Esempio n. 22
0
    def __init__(self, save_path: Path = DEFAULT_SAVE_PATH) -> None:
        """
        Construct statistics object from save file stored on disk.

        :param save_path: Path to file that contains saved games.
        """
        assert isinstance(save_path, Path)
        self.save_path = save_path
        self._load_data()

        # Use the same TrueSkill parameters in the entire class
        self.env = TrueSkill()

        # Enable the scipy backend of TrueSkill
        backends.choose_backend("scipy")
Esempio n. 23
0
def test_invalid_rating_groups():
    env = TrueSkill()
    with raises(ValueError):
        env.validate_rating_groups([])
    with raises(ValueError):
        env.validate_rating_groups([()])
    # need multiple groups not just one
    with raises(ValueError):
        env.validate_rating_groups([(Rating(), )])
    # empty group is not allowed
    with raises(ValueError):
        env.validate_rating_groups([(Rating(), ), ()])
    # all groups should be same structure
    with raises(TypeError):
        env.validate_rating_groups([(Rating(), ), {0: Rating()}])
Esempio n. 24
0
    def calculate_rating_change(self):
        """
        Calculate Points using the following formula
        A win is worth 5 points, second place is worth 2 points
        """
        player_records = sorted(self.player_records,
                                key=lambda r: r.player_placement)
        teams = [{
            r.player.name: r.player.trueskill_rating
        } for r in player_records]

        ranks = [r.player_placement for r in player_records]

        env = TrueSkill(mu=DEFAULT_RANK_POINTS,
                        sigma=DEFAULT_RANK_ELASTICITY,
                        draw_probability=0.0)
        rated_rating_groups = env.rate(teams, ranks=ranks)
        self._update_ranked_data(player_records, rated_rating_groups)
Esempio n. 25
0
def createTrueskillRating():
    ts = TrueSkill(draw_probability=0.01) # 0.01 is arbitary small number
    beta = 25 / 6  # default value

    def win_probability(p1, p2):
        delta_mu = p1.mu - p2.mu
        sum_sigma = p1.sigma * p1.sigma + p2.sigma * p2.sigma
        denom = np.sqrt(2 * (beta * beta) + sum_sigma)
        return ts.cdf(delta_mu / denom)

    submit = sample_sub_pd
    submit[['Season', 'Team1', 'Team2']] = submit.apply(lambda r:pd.Series([int(t) for t in r.ID.split('_')]), axis=1)

    df_tour = reg_season_compact_pd
    teamIds = np.unique(np.concatenate([df_tour.WTeamID.values, df_tour.LTeamID.values]))
    ratings = { tid:ts.Rating() for tid in teamIds }

    def feed_season_results(season):
        print("season = {}".format(season))
        df1 = df_tour[df_tour.Season == season]
        for r in df1.itertuples():
            ratings[r.WTeamID], ratings[r.LTeamID] = rate_1vs1(ratings[r.WTeamID], ratings[r.LTeamID])

    def update_pred(season):
        beta = np.std([r.mu for r in ratings.values()]) 
        print("beta = {}".format(beta))
        submit.loc[submit.Season==season, 'Pred'] = submit[submit.Season==season].apply(lambda r:win_probability(ratings[r.Team1], ratings[r.Team2]), axis=1)

    for season in sorted(df_tour.Season.unique()[:-1]): # exclude last 4 years [:-4]/ last 1 year [:-1]
        feed_season_results(season)

#    update_pred(2014)
#    feed_season_results(2014)
#    update_pred(2015)
#    feed_season_results(2015)
#    update_pred(2016)
#    feed_season_results(2016)
#    update_pred(2017)
    feed_season_results(2017)
    update_pred(2018)

    submit.drop(['Season', 'Team1', 'Team2'], axis=1, inplace=True)
    submit.to_csv('Data/Predictions/trueskill_results2018.csv', index=None)
Esempio n. 26
0
    def rateGameTrueSkill1v1(self, results):

        # Establish TrueSkill environment
        env = TrueSkill()

        # Rate the FFA game
        rating_groups = []
        for i in range(0, len(results)):
            mu = self.getPlayerMu(results[i][1])
            sigma = self.getPlayerSigma(results[i][1])
            rating = env.create_rating(mu, sigma)
            rating_groups.append((rating, ))
        rated_rating_groups = env.rate(rating_groups)

        # Record the results in the database
        for i in range(0, len(rated_rating_groups)):
            mu = rated_rating_groups[i][0].mu
            sigma = rated_rating_groups[i][0].sigma
            rating = mu - (3 * sigma)
            self.setPlayerStats(results[i][1], [rating, mu, sigma, 0])
def extract(game):
    data = json.loads(game)
    i = 0
    positions = ['team1def', 'team1of', 'team2def', 'team2of']
    games = []
    for item in data:
        game_rep = []
        for position in positions:
            id_tmp = item.get(position).get('player').get('id')
            nickname_tmp = item.get(position).get('player').get('nickname')
            playerposition_tmp = item.get(position).get('playerposition')
            mu_tmp = item.get(position).get('player').get('mu')
            sigma_tmp = item.get(position).get('player').get('sigma')
            rating_tmp = TrueSkill(mu=mu_tmp, sigma=sigma_tmp)
            rating_tmp.create_rating()
            p_tmp = Player(nickname_tmp, playerposition_tmp, 25, id_tmp)
            p_tmp.rating = rating_tmp
            p_tmp.winner_flag = item.get(position).get('winner')
            game_rep.append(p_tmp)
        games.append(game_rep)
    return games
def compute_trueskill_ranking(matches):
    env = TrueSkill(0, 0.5)
    env.beta = 0.025 * (0.5 ** 2) * len(matches)
    env.tau = 0
    player_objects = defaultdict(lambda: env.create_rating())
    for match in matches:
        system_name0, system_name1, win0, win1 = match
        player0 = player_objects[system_name0]
        player1 = player_objects[system_name1]

        if win0 == 1 and win1 == 0:
            new_player0, new_player1 = rate_1vs1(player0, player1)
        elif win1 == 1 and win0 == 0:
            new_player1, new_player0 = rate_1vs1(player1, player0)
        else:
            new_player0, new_player1 = rate_1vs1(player0, player1, drawn=True)

        player_objects[system_name0] = new_player0
        player_objects[system_name1] = new_player1
    ranking = sorted(player_objects.items(), key=lambda x: x[1].mu, reverse=True)
    return ranking
Esempio n. 29
0
def main():
    TrueSkill(backend='scipy')
    args = parseArgs()
    misspelled = loadMisspellings(args.misspelled)

    # map player name to rating object
    players = defaultdict(Rating)
    with open(args.record, 'r') as f:
        r = csv.reader(f)
        for i, row in enumerate(r):
            if i == 0:
                # header
                continue
            winner, loser, wasDraw = row
            wasDraw = bool(wasDraw)
            winner = misspelled.get(winner, winner).lower()
            loser = misspelled.get(loser, loser).lower()

            w_new, l_new = rate_1vs1(players[winner], players[loser])
            players[winner] = w_new
            players[loser] = l_new

    # for p in sorted(players.items(), key=lambda p: (-p[1].mu, p[1].sigma)):
    for p in sorted(players.items(),
                    key=lambda p: (p[1].mu - (3 * p[1].sigma)),
                    reverse=True):
        print(p, p[1].mu - (3 * p[1].sigma))

    ratings = players.values()
    mus = np.asarray([r.mu for r in ratings])
    sigmas = np.asarray([r.sigma for r in ratings])

    coeffs = np.polyfit(mus, sigmas, 2)
    xp = np.linspace(np.min(mus), np.max(mus), 100)
    plt.plot(xp, np.poly1d(coeffs)(xp))
    plt.scatter(mus, sigmas)
    plt.title('NorCal Store Champs 2016 TrueSkill')
    plt.xlabel('Player skill')
    plt.ylabel('Skill confidence')
    plt.savefig('trueskill')
Esempio n. 30
0
def make_match(teams, smash):
    #Fill results with loses (1, 2, 3...) except first one. (0) = win
    results = [0 for i in range(len(teams))]
    for index, item in enumerate(results):
        results[index] = index

    # Extract rank object of each player
    rating_teams = extract_ranks(teams)

    env = TrueSkill()
    results_teams = [[] for i in range(len(teams))]
    results_teams = env.rate(rating_teams, ranks=results)

    # Map results back to teams list
    for rank_team, team in zip(results_teams, teams):
        for rank, player in zip(rank_team, team):
            player.last_mu = player.rank.mu
            player.rank = rank

    save_ranks(teams, smash)
    match_to_confirm = None
    return "Partida registrada"