예제 #1
0
파일: elo.py 프로젝트: Taxane/dppl
def rank(player1, player2):
    """ This function will calculate the TrueSkill ranking of each player. It
    accepts two players arguments. The first is the winning player, the second
    is the losing player. Both should be Profile objects. This will not save
    the players.

    Args:
        player1: The winning user profile
        player2: The losing user profile

    Returns:
        A tuple of (player1, player2)
    """
    DRAW_PROBABILITY = 0  # It's impossible to draw 1 on 1
    approx = lambda f: round(f, 6)  # Round all floats to 6 decimals

    ts = TrueSkill(draw_probability=DRAW_PROBABILITY)
    t1 = (ts.Rating(mu=player1.mu, sigma=player1.sigma), )
    t2 = (ts.Rating(mu=player2.mu, sigma=player2.sigma), )
    r1, r2 = tuple(x[0] for x in ts.transform_ratings(rating_groups=(t1, t2)))

    player1.mu = approx(r1.mu)
    player1.sigma = approx(r1.sigma)
    player1.exposure = approx(r1.exposure)

    player2.mu = approx(r2.mu)
    player2.sigma = approx(r2.sigma)
    player2.exposure = approx(r2.exposure)

    player1.save()
    player2.save()

    return player1, player2
예제 #2
0
def test_backend():
    env = TrueSkill(backend=(NotImplemented, NotImplemented, NotImplemented))
    with raises(TypeError):
        env.rate_1vs1(Rating(), Rating())
    with raises(ValueError):
        # '__not_defined__' backend is not defined
        TrueSkill(backend='__not_defined__')
예제 #3
0
def test_backend():
    env = TrueSkill(backend=(NotImplemented, NotImplemented, NotImplemented))
    with raises(TypeError):
        env.rate_1vs1(Rating(), Rating())
    with raises(ValueError):
        # '__not_defined__' backend is not defined
        TrueSkill(backend='__not_defined__')
예제 #4
0
파일: elo.py 프로젝트: blenderbox/dppl
def rank(player1, player2):
    """ This function will calculate the TrueSkill ranking of each player. It
    accepts two players arguments. The first is the winning player, the second
    is the losing player. Both should be Profile objects. This will not save
    the players.

    Args:
        player1: The winning user profile
        player2: The losing user profile

    Returns:
        A tuple of (player1, player2)
    """
    DRAW_PROBABILITY = 0  # It's impossible to draw 1 on 1
    approx = lambda f: round(f, 6)  # Round all floats to 6 decimals

    ts = TrueSkill(draw_probability=DRAW_PROBABILITY)
    t1 = (ts.Rating(mu=player1.mu, sigma=player1.sigma),)
    t2 = (ts.Rating(mu=player2.mu, sigma=player2.sigma),)
    r1, r2 = tuple(x[0] for x in ts.transform_ratings(rating_groups=(t1, t2)))

    player1.mu = approx(r1.mu)
    player1.sigma = approx(r1.sigma)
    player1.exposure = approx(r1.exposure)

    player2.mu = approx(r2.mu)
    player2.sigma = approx(r2.sigma)
    player2.exposure = approx(r2.exposure)

    player1.save()
    player2.save()

    return player1, player2
예제 #5
0
def rate_race(race):
    entrants = race.ordered_entrants
    users = [UserRating(entrant, race) for entrant in entrants]
    if not race.team_race:
        groups = [(user, ) for user in users]
    else:
        groups = defaultdict(list)
        for user in users:
            groups[user.entrant.team_id].append(user)
        groups = list(groups.values())
        groups.sort(key=_sort_key)

    rating_groups = []
    ranks = []
    current_rank = 0
    for group in groups:
        sort_key = _sort_key(group)
        rating_groups.append(tuple(user.rating for user in group))
        ranks.append(current_rank)
        if sort_key < timedelta.max:
            current_rank += 1

    env = TrueSkill(backend='mpmath')
    rated = env.rate(rating_groups, ranks)

    with atomic():
        for ratings, group in zip(rated, groups):
            for rating, user in zip(ratings, group):
                user.set_rating(rating)
예제 #6
0
def convergence():
    ENV = TrueSkill(mu=25, sigma=8 + 1 / 3, draw_probability=0)
    p1 = ENV.create_rating()
    p2 = ENV.create_rating()
    p1_log = []
    p2_log = []
    for _ in range(400):
        p1, p2 = rate_1vs1(p1, p2, drawn=False)
        p1_log.append(p1.mu - 3 * p1.sigma)
        p2_log.append(p2.mu - 3 * p2.sigma)
    colors = ["blue", "green", "red", "cyan", "magenta", "yellow", "black"]
    legend = []
    player = 1
    for i in [p1_log, p2_log]:
        color_line = colors.pop()
        plt.plot(i, color=color_line, linewidth=2)
        color_patch = mpatches.Patch(color=color_line,
                                     label="player" + str(player))
        legend.append(color_patch)
        player += 1
    plt.ylabel('TrueSkill Value')
    plt.xlabel('Number of games played')
    plt.title("Covergence of trueskill value for each player")
    plt.legend(handles=legend)
    plt.savefig("figures/perfect_convergence.pdf")
    plt.close()
예제 #7
0
파일: utils.py 프로젝트: bchruszc/rook
def update_trueskill(scores, ratings):
    env = TrueSkill()

    # Parallel arrays!  All of these are for the given game
    players = []
    teams = []
    ranks = []
    expose_before = {}

    # Sanity check, some bad data in test systems
    if len(scores) < 4:
        return

    for s in scores:
        if s.player_id not in ratings.keys():
            ratings[s.player_id] = Rating()  # Default mu=25, sigma=8.333

        r = ratings[s.player_id]
        expose_before[s.player_id] = env.expose(r)
        players.append(s.player_id)
        teams.append([r])
        ranks.append(s.rank)

    # Crunch the numbers
    new_ratings = rate(teams, ranks)

    for i in range(0, len(new_ratings)):
        ratings[players[i]] = new_ratings[i][0]

    for s in scores:
        s.trueskill = ratings[s.player_id].mu
        s.trueskill_confidence = ratings[s.player_id].sigma
        s.trueskill_change = round(env.expose(ratings[s.player_id]) - expose_before[s.player_id], 1)
        s.save()
예제 #8
0
파일: awards.py 프로젝트: bchruszc/rook
    def add_game(self, game, all_players):
        # Load everything in to this dict
        matches_dict = {}
        for at in self.award_totals:
            matches_dict[at.player_id] = at

        env = TrueSkill()
        # Ensure that an award_totals exists for every player in this game
        for s in game.scores.all():
            if s.player_id not in matches_dict.keys():
                at = AwardTotals()
                at.player_id = s.player_id
                at.numerator = 0
                at.denominator = 0
                at.type = self.name

                matches_dict[s.player_id] = at

            at = matches_dict[s.player_id]

            at.numerator = s.rating

            if s.trueskill > 0:
                at.denominator = 10 * round(env.expose(Rating(mu=s.trueskill, sigma=s.trueskill_confidence)), 1)
            else:
                at.denominator = 0

            continue

        # Reset from the modified dict
        self.award_totals = []
        for at in matches_dict.values():
            self.award_totals.append(at)
예제 #9
0
    def __init__(self,
                 mu: float = 2500.0,
                 sigma: float = 2500.0 / 3.0,
                 beta: float = 2500.0 / 2.0,
                 tau: float = 25.0 / 3.0,
                 draw_probability: float = 0.06,
                 **kws) -> None:
        super().__init__(**kws)

        self.mu = mu
        self.sigma = sigma
        self.beta = beta
        self.tau = tau
        self.draw_probability = draw_probability

        self.env_drawable = TrueSkill(mu=mu,
                                      sigma=sigma,
                                      beta=beta,
                                      tau=tau,
                                      draw_probability=draw_probability)
        self.env_undrawable = TrueSkill(mu=mu,
                                        sigma=sigma,
                                        beta=beta,
                                        tau=tau,
                                        draw_probability=0.0)
        self.ratings = self._create_rating_jar()
예제 #10
0
 def __init__(self):
     self.trueskill_env = TrueSkill(mu=1700.0,
                                    sigma=1700.0 / 3.0,
                                    beta=1700.0 / 6.0,
                                    tau=1700.0 / 300.0,
                                    draw_probability=0.0)
     super(ReplayDatabase, self).__init__()
예제 #11
0
def analyze_teams(player_blue_offense, player_blue_defense, player_red_offense,
                  player_red_defense):
    ts = TrueSkill(mu=MU,
                   sigma=SIGMA,
                   beta=BETA,
                   tau=TAU,
                   draw_probability=DRAW_PROB)

    player_blue_offense_rating = Rating(
        mu=player_blue_offense.rating_mu,
        sigma=player_blue_offense.rating_sigma
    ) if player_blue_offense is not None else None
    player_blue_defense_rating = Rating(
        mu=player_blue_defense.rating_mu,
        sigma=player_blue_defense.rating_sigma
    ) if player_blue_defense is not None else None
    player_red_offense_rating = Rating(
        mu=player_red_offense.rating_mu, sigma=player_red_offense.rating_sigma
    ) if player_red_offense is not None else None
    player_red_defense_rating = Rating(
        mu=player_red_defense.rating_mu, sigma=player_red_defense.rating_sigma
    ) if player_red_defense is not None else None

    blue_team = _build_team(player_blue_offense_rating,
                            player_blue_defense_rating)
    red_team = _build_team(player_red_offense_rating,
                           player_red_defense_rating)

    match_balance = ts.quality([blue_team, red_team])
    win_prob = win_probability(ts, blue_team, red_team)

    return {
        "match_balance": match_balance,
        "predicted_win_prob_for_blue": win_prob
    }
예제 #12
0
 def findRanks(self, pairs):
     from trueskill import TrueSkill, Rating
     env = TrueSkill(draw_probability=0.0)
     d = defaultdict(Rating)
     for p in pairs:
         d[p["winner"]], d[p["loser"]] = env.rate_1vs1(
             d[p["winner"]], d[p["loser"]])
     for i in d:
         well = d[i]
         d[i] = round(env.expose(d[i]), 2)
     return d
def compute_trueskill(player_names, rankings):
    env = TrueSkill()
    ratings = {name: env.create_rating() for name in player_names}

    for game_ranking in rankings:
        game_player_names, ranks = zip(*[(player_name, rank) for player_name, (_, _, rank) in game_ranking.items()])
        rating_groups = env.rate([(ratings[player_name],) for player_name in game_player_names], ranks)
        for player_name, (rating,) in zip(game_player_names, rating_groups):
            ratings[player_name] = rating

    return ratings
예제 #14
0
def generate_scores(voteFile):
    # function to generate scores using TrueSkill using a vote file given in the format:
    # image1 image2 winning-image

    f1 = open(voteFile)

    mu0 = 25
    var0 = (mu0 / 3.0)**2
    beta2 = var0 / 4.0
    tau2 = var0 / 100.0
    prob_draw = 0.1333
    ts = TrueSkill(beta2, tau2, prob_draw, var0)
    params = {}
    vals = [[], [], [], [], []]
    c = 0

    for vote in f1:
        vote = vote.strip().split()
        win = 0
        if vote[1] == vote[2]: win = 1
        if vote[0] not in params: params[vote[0]] = (mu0, var0)
        if vote[1] not in params: params[vote[1]] = (mu0, var0)
        if not win:
            params[vote[0]], params[vote[1]] = ts.update_rating(
                params[vote[0]], params[vote[1]], False)
        else:
            params[vote[1]], params[vote[0]] = ts.update_rating(
                params[vote[1]], params[vote[0]], False)
        if c % 10000 == 0:
            mu_mu, mu_sigma, sigma_mu, sigma_sigma = 0.0, 0.0, 0.0, 0.0
            mus, sigmas = [], []

            for key, value in params.iteritems():
                mu_mu += value[0]
                sigma_mu += value[1]
                mus.append(value[0])
                sigmas.append(value[1])

            mu_mu = np.mean(mus)
            sigma_mu = np.mean(sigmas)
            mu_sigma = np.std(np.array(mus))
            sigma_sigma = np.std(np.array(sigmas))

            vals[0].append(mu_mu)
            vals[1].append(sigma_mu)
            vals[2].append(mu_sigma)
            vals[3].append(sigma_sigma)
            vals[4].append(c)

        c += 1

    f1.close()

    return params, ts, vals
예제 #15
0
    def __init__(self):
        self.env = TrueSkill(draw_probability=0.02)
        self.trueskills = {}
        self.events = {}
        self.nicknames = {}
        self.processed_matches = set()

        self.session = requests.Session()
        self.session.headers.update(self.HEADERS)

        self.get_previous_matches()
예제 #16
0
def generate_scores(voteFile):
    # function to generate scores using TrueSkill using a vote file given in the format:
    # image1 image2 winning-image

    f1 = open(voteFile);

    mu0 = 25;
    var0 = (mu0 / 3.0) ** 2;
    beta2 = var0 / 4.0;
    tau2 = var0 / 100.0;
    prob_draw = 0.1333;
    ts = TrueSkill(beta2, tau2, prob_draw, var0);
    params = {};
    vals = [[], [], [], [], []];
    c = 0;

    for vote in f1:
        vote = vote.strip().split();
        win = 0;
        if vote[1] == vote[2]: win = 1;
        if vote[0] not in params: params[vote[0]] = (mu0, var0);
        if vote[1] not in params: params[vote[1]] = (mu0, var0);
        if not win:
            params[vote[0]], params[vote[1]] = ts.update_rating(params[vote[0]], params[vote[1]], False);
        else:
            params[vote[1]], params[vote[0]] = ts.update_rating(params[vote[1]], params[vote[0]], False);
        if c % 10000 == 0:
            mu_mu, mu_sigma, sigma_mu, sigma_sigma = 0.0, 0.0, 0.0, 0.0;
            mus, sigmas = [], [];

            for key, value in params.iteritems():
                mu_mu += value[0];
                sigma_mu += value[1];
                mus.append(value[0]);
                sigmas.append(value[1]);

            mu_mu = np.mean(mus);
            sigma_mu = np.mean(sigmas);
            mu_sigma = np.std(np.array(mus));
            sigma_sigma = np.std(np.array(sigmas));

            vals[0].append(mu_mu);
            vals[1].append(sigma_mu);
            vals[2].append(mu_sigma);
            vals[3].append(sigma_sigma);
            vals[4].append(c);

        c += 1;

    f1.close();

    return params, ts, vals;
예제 #17
0
 def __init__(self,
              mu=DEFAULT_MU,
              sigma=DEFAULT_SIGMA,
              beta=DEFAULT_BETA,
              tau=DEFAULT_TAU,
              draw_probability=DEFAULT_DRAW):
     """ mu - the initial mean of ratings
         sigma - the initial standard deviation of ratings
         beta - the distance that guarantees about an 80% chance of winning
         tau - the dynamic factor
         draw_probability - the draw probability of the game"""
     self._ts = TrueSkill(mu, sigma, beta, tau, draw_probability)
     self._ratings = self._create_ratings()
예제 #18
0
def test_deprecated_methods():
    env = TrueSkill()
    r1, r2, r3 = Rating(), Rating(), Rating()
    deprecated_call(t.transform_ratings, [(r1, ), (r2, ), (r3, )])
    deprecated_call(t.match_quality, [(r1, ), (r2, ), (r3, )])
    deprecated_call(env.Rating)
    deprecated_call(env.transform_ratings, [(r1, ), (r2, ), (r3, )])
    deprecated_call(env.match_quality, [(r1, ), (r2, ), (r3, )])
    deprecated_call(env.rate_1vs1, r1, r2)
    deprecated_call(env.quality_1vs1, r1, r2)
    deprecated_call(lambda: Rating().exposure)
    dyn = TrueSkill(draw_probability=t.dynamic_draw_probability)
    deprecated_call(dyn.rate, [(r1, ), (r2, )])
예제 #19
0
 def calculate(cls, game: Game):
     env = TrueSkill()
     team_won = {
         nick: cls.from_model_rating(game.rating_before(nick))
         for nick in game.nicks_won
     }
     team_lost = {
         nick: cls.from_model_rating(game.rating_before(nick))
         for nick in game.nicks_lost
     }
     rated_rating_groups = env.rate([team_won, team_lost], ranks=[0, 1])
     for team in rated_rating_groups:
         for nick in team:
             game.set_rating_after(nick, cls.to_model_rating(team[nick]))
예제 #20
0
    def __init__(self, save_path: Path = DEFAULT_SAVE_PATH) -> None:
        """
        Construct statistics object from save file stored on disk.

        :param save_path: Path to file that contains saved games.
        """
        assert isinstance(save_path, Path)
        self.save_path = save_path
        self._load_data()

        # Use the same TrueSkill parameters in the entire class
        self.env = TrueSkill()

        # Enable the scipy backend of TrueSkill
        backends.choose_backend("scipy")
예제 #21
0
 def __init__(self):
     # All the Players involved in this ranking. Maps player names to
     # Ratings.
     self.players = dict()
     # The TrueSkill environment
     self.ts_env = TrueSkill()
     self.total_games = 0
예제 #22
0
def calculate_scores():
    env = TrueSkill(draw_probability=0.14)
    # trueskill.setup(draw_probability=0.14)
    scores = load_previous_scores()
    vote_count = 0

    with open(FLAGS.votes_file, 'r') as votes_file:
        for line in votes_file:
            img_1, img_2, winner = line.strip().split(",")

            img_1_ts = scores[img_1]
            img_2_ts = scores[img_2]

            if winner == "0":
                #winner left
                img_1_ts_new, img_2_ts_new = rate_1vs1(img_1_ts, img_2_ts)
            else:
                #winner right
                img_2_ts_new, img_1_ts_new = rate_1vs1(img_2_ts, img_1_ts)

            scores[img_1] = img_1_ts_new
            scores[img_2] = img_2_ts_new

            vote_count += 1

            if vote_count % 1000 == 0:
                print("Processed {} votes".format(vote_count))

    get_highest_rated(scores)

    save_scores(scores)
예제 #23
0
class RankPreviewer:
    env = TrueSkill(mu=MU, sigma=SIGMA, beta=BETA, tau=TAU, draw_probability=0.01)

    @classmethod
    def get_current_rank_for_players(cls, report) -> List[Rating]:
        return [DB.db.get_s1_player_stats(pl).get_rating() for pl in
                report.players]

    @classmethod
    def calc_new_ranks(cls, report : DB.RankedMatch, old_ranks : List[Rating]) -> List[Rating]:
        try:
            if report.scrapped:
                return [Rating(mu=i.mu-20, sigma=i.sigma) for i in old_ranks]
            new_ranks = cls.to_1d(
                cls.env.rate([(i,) for i in old_ranks], ranks=[pos for pos in report.players_pos.values()])
            )
        except ValueError as e:
            logger.error(f"{type(e).__name__}: {e}")
            return old_ranks
        return new_ranks

    @classmethod
    def get_ranks_preview(cls, report) -> Dict[int, float]:
        old_ranks = cls.get_current_rank_for_players(report)
        new_ranks = cls.calc_new_ranks(report, old_ranks)
        return dict(zip(report.players, [SKILL(new) - SKILL(old) for new, old in zip(new_ranks, old_ranks)]))

    @staticmethod
    def to_1d(ls : List[Iterable]) -> List:
        return sum((list(i) for i in ls), [])
예제 #24
0
def win_probability(team1, team2):
    delta_mu = team1.mu - team2.mu
    sum_sigma = (team1.sigma**2) + (team2.sigma**2)
    size = 2
    # denom = math.sqrt(size * (0.05 * 0.05) + sum_sigma)
    denom = math.sqrt(sum_sigma)
    # ts = TrueSkill()
    return TrueSkill().cdf(delta_mu / denom)
예제 #25
0
class HexPlayer(ABC):
    ENV = TrueSkill(mu=25, sigma=8.333, draw_probability=0.00)

    def __init__(self):
        self.rating = HexPlayer.ENV.create_rating()

    @abstractmethod
    def get_move(self, board, colour, renders):
        pass
예제 #26
0
    def calculate_rating_change(self):
        """
        Calculate Points using the following formula
        A win is worth 5 points, second place is worth 2 points
        """
        player_records = sorted(self.player_records,
                                key=lambda r: r.player_placement)
        teams = [{
            r.player.name: r.player.trueskill_rating
        } for r in player_records]

        ranks = [r.player_placement for r in player_records]

        env = TrueSkill(mu=DEFAULT_RANK_POINTS,
                        sigma=DEFAULT_RANK_ELASTICITY,
                        draw_probability=0.0)
        rated_rating_groups = env.rate(teams, ranks=ranks)
        self._update_ranked_data(player_records, rated_rating_groups)
예제 #27
0
 def __init__(self, mu=DEFAULT_MU, sigma=DEFAULT_SIGMA, beta=DEFAULT_BETA,
              tau=DEFAULT_TAU, draw_probability=DEFAULT_DRAW):
     """ mu - the initial mean of ratings
         sigma - the initial standard deviation of ratings
         beta - the distance that guarantees about an 80% chance of winning
         tau - the dynamic factor
         draw_probability - the draw probability of the game"""
     self._ts = TrueSkill(mu, sigma, beta, tau, draw_probability)
     self._ratings = self._create_ratings()
예제 #28
0
파일: models.py 프로젝트: Cheetar/ludo-AI
 def game(self):
     TrueSkill(backend='mpmath').cdf
     players = self.choose_players()
     if players != None:
         winner = self.play(players)
         if self.ranking_game:
             self.update_score(players, winner)
             for p in self.participants.all():
                 print(p.rating)
예제 #29
0
    def calculate_rating_change(self):
        """
        Calculate Points using the following formula
        A win is worth 5 points, second place is worth 2 points
        """
        player_records = sorted(
            self.player_records, key=lambda r: r.player_placement)
        teams = [
            {r.player.name: r.player.trueskill_rating} for r in player_records
        ]

        ranks = [r.player_placement for r in player_records]

        env = TrueSkill(
            mu=DEFAULT_RANK_POINTS, sigma=DEFAULT_RANK_ELASTICITY,
            draw_probability=0.0)
        rated_rating_groups = env.rate(teams, ranks=ranks)
        self._update_ranked_data(player_records, rated_rating_groups)
예제 #30
0
 def __init__(self, size, players):
     self.size = size
     self.players = players
     self.durations = [0 for _ in range(len(players))]
     self.ratings = [
         TrueSkill(mu=25, sigma=8 + 1 / 3,
                   draw_probability=0).create_rating()
         for _ in range(len(players))
     ]
     self.ratings_log = [[0] for _ in range(len(players))]
예제 #31
0
def createTrueskillRating():
    ts = TrueSkill(draw_probability=0.01) # 0.01 is arbitary small number
    beta = 25 / 6  # default value

    def win_probability(p1, p2):
        delta_mu = p1.mu - p2.mu
        sum_sigma = p1.sigma * p1.sigma + p2.sigma * p2.sigma
        denom = np.sqrt(2 * (beta * beta) + sum_sigma)
        return ts.cdf(delta_mu / denom)

    submit = sample_sub_pd
    submit[['Season', 'Team1', 'Team2']] = submit.apply(lambda r:pd.Series([int(t) for t in r.ID.split('_')]), axis=1)

    df_tour = reg_season_compact_pd
    teamIds = np.unique(np.concatenate([df_tour.WTeamID.values, df_tour.LTeamID.values]))
    ratings = { tid:ts.Rating() for tid in teamIds }

    def feed_season_results(season):
        print("season = {}".format(season))
        df1 = df_tour[df_tour.Season == season]
        for r in df1.itertuples():
            ratings[r.WTeamID], ratings[r.LTeamID] = rate_1vs1(ratings[r.WTeamID], ratings[r.LTeamID])

    def update_pred(season):
        beta = np.std([r.mu for r in ratings.values()]) 
        print("beta = {}".format(beta))
        submit.loc[submit.Season==season, 'Pred'] = submit[submit.Season==season].apply(lambda r:win_probability(ratings[r.Team1], ratings[r.Team2]), axis=1)

    for season in sorted(df_tour.Season.unique()[:-1]): # exclude last 4 years [:-4]/ last 1 year [:-1]
        feed_season_results(season)

#    update_pred(2014)
#    feed_season_results(2014)
#    update_pred(2015)
#    feed_season_results(2015)
#    update_pred(2016)
#    feed_season_results(2016)
#    update_pred(2017)
    feed_season_results(2017)
    update_pred(2018)

    submit.drop(['Season', 'Team1', 'Team2'], axis=1, inplace=True)
    submit.to_csv('Data/Predictions/trueskill_results2018.csv', index=None)
예제 #32
0
    def rateGameTrueSkill1v1(self, results):
           
        # Establish TrueSkill environment
        env = TrueSkill()

        # Rate the FFA game
        rating_groups = []
        for i in range(0,len(results)):
            mu    = self.getPlayerMu(results[i][1])
            sigma = self.getPlayerSigma(results[i][1])
            rating = env.create_rating(mu, sigma)
            rating_groups.append((rating,))            
        rated_rating_groups = env.rate(rating_groups)

        # Record the results in the database
        for i in range(0,len(rated_rating_groups)):
            mu     = rated_rating_groups[i][0].mu
            sigma  = rated_rating_groups[i][0].sigma
            rating = mu - (3 * sigma)
            self.setPlayerStats(results[i][1], [rating, mu, sigma, 0])
예제 #33
0
    def rateGameTrueSkill1v1(self, results):

        # Establish TrueSkill environment
        env = TrueSkill()

        # Rate the FFA game
        rating_groups = []
        for i in range(0, len(results)):
            mu = self.getPlayerMu(results[i][1])
            sigma = self.getPlayerSigma(results[i][1])
            rating = env.create_rating(mu, sigma)
            rating_groups.append((rating, ))
        rated_rating_groups = env.rate(rating_groups)

        # Record the results in the database
        for i in range(0, len(rated_rating_groups)):
            mu = rated_rating_groups[i][0].mu
            sigma = rated_rating_groups[i][0].sigma
            rating = mu - (3 * sigma)
            self.setPlayerStats(results[i][1], [rating, mu, sigma, 0])
def extract(game):
    data = json.loads(game)
    i = 0
    positions = ['team1def', 'team1of', 'team2def', 'team2of']
    games = []
    for item in data:
        game_rep = []
        for position in positions:
            id_tmp = item.get(position).get('player').get('id')
            nickname_tmp = item.get(position).get('player').get('nickname')
            playerposition_tmp = item.get(position).get('playerposition')
            mu_tmp = item.get(position).get('player').get('mu')
            sigma_tmp = item.get(position).get('player').get('sigma')
            rating_tmp = TrueSkill(mu=mu_tmp, sigma=sigma_tmp)
            rating_tmp.create_rating()
            p_tmp = Player(nickname_tmp, playerposition_tmp, 25, id_tmp)
            p_tmp.rating = rating_tmp
            p_tmp.winner_flag = item.get(position).get('winner')
            game_rep.append(p_tmp)
        games.append(game_rep)
    return games
def compute_trueskill_ranking(matches):
    env = TrueSkill(0, 0.5)
    env.beta = 0.025 * (0.5 ** 2) * len(matches)
    env.tau = 0
    player_objects = defaultdict(lambda: env.create_rating())
    for match in matches:
        system_name0, system_name1, win0, win1 = match
        player0 = player_objects[system_name0]
        player1 = player_objects[system_name1]

        if win0 == 1 and win1 == 0:
            new_player0, new_player1 = rate_1vs1(player0, player1)
        elif win1 == 1 and win0 == 0:
            new_player1, new_player0 = rate_1vs1(player1, player0)
        else:
            new_player0, new_player1 = rate_1vs1(player0, player1, drawn=True)

        player_objects[system_name0] = new_player0
        player_objects[system_name1] = new_player1
    ranking = sorted(player_objects.items(), key=lambda x: x[1].mu, reverse=True)
    return ranking
예제 #36
0
def make_match(teams, smash):
    #Fill results with loses (1, 2, 3...) except first one. (0) = win
    results = [0 for i in range(len(teams))]
    for index, item in enumerate(results):
        results[index] = index

    # Extract rank object of each player
    rating_teams = extract_ranks(teams)

    env = TrueSkill()
    results_teams = [[] for i in range(len(teams))]
    results_teams = env.rate(rating_teams, ranks=results)

    # Map results back to teams list
    for rank_team, team in zip(results_teams, teams):
        for rank, player in zip(rank_team, team):
            player.last_mu = player.rank.mu
            player.rank = rank

    save_ranks(teams, smash)
    match_to_confirm = None
    return "Partida registrada"
예제 #37
0
    def calculate_rating_change(self):
        """
        Calculate Points using the following formula
        A win is worth 3 points
        """
        team1 = [r for r in self.player_records if r.player_placement == 1]
        team2 = [r for r in self.player_records if r.player_placement == 2]
        player_records = sorted(
            self.player_records, key=lambda r: r.player_placement)
        teams = []
        for team_records in [team1, team2]:
            team = {}
            for record in team_records:
                team[record.player.name] = record.player.trueskill_rating
            teams.append(team)

        ranks = [0, 1]

        env = TrueSkill(
            mu=DEFAULT_RANK_POINTS, sigma=DEFAULT_RANK_ELASTICITY,
            draw_probability=0.0)
        rated_rating_groups = env.rate(teams, ranks=ranks)
        self._update_ranked_data(player_records, rated_rating_groups)
예제 #38
0
파일: utils.py 프로젝트: l1f7/foosleague
def regen_expose(match):
    env = TrueSkill(draw_probability=0)
    ratings = []
    today = datetime.today()

    season = Season.objects.filter(league=match.league, start__lte=today, end__gte=today)

    players = Player.objects.filter(
        id__in=LeagueMember.objects.filter(league=match.league).values_list('player__id', flat=True))
    player_lookup = {}
    for p in players:

        rating = env.create_rating(p.current_mu, p.current_sigma)
        sh, _ = ExposeHistory.objects.get_or_create(player=p, match=match, season=season[0])
        sh.ts_expose = env.expose(rating)
        sh.save()

        p.ts_expose = env.expose(rating)
        p.save()
        ratings.append(rating)
        player_lookup.update({rating: p})

    leaderboard = sorted(ratings, key=env.expose, reverse=True)
예제 #39
0
    def calculate_rating_change(self):
        """
        Calculate Points using the following formula
        A win is worth 3 points
        """
        team1 = [r for r in self.player_records if r.player_placement == 1]
        team2 = [r for r in self.player_records if r.player_placement == 2]
        player_records = sorted(self.player_records,
                                key=lambda r: r.player_placement)
        teams = []
        for team_records in [team1, team2]:
            team = {}
            for record in team_records:
                team[record.player.name] = record.player.trueskill_rating
            teams.append(team)

        ranks = [0, 1]

        env = TrueSkill(mu=DEFAULT_RANK_POINTS,
                        sigma=DEFAULT_RANK_ELASTICITY,
                        draw_probability=0.0)
        rated_rating_groups = env.rate(teams, ranks=ranks)
        self._update_ranked_data(player_records, rated_rating_groups)
예제 #40
0
def get_best_comp():
	env = TrueSkill(draw_probability = 0)
	comp_ratings = defaultdict(lambda: env.create_rating())
	comp_counts = defaultdict(int)
	comp_win_rate = defaultdict(lambda: [0,0])
	for i in xrange(100):
		games = get_games(i * NUM_GAMES, NUM_GAMES)
		for g in games:
			teamA_comp, teamB_comp, teamA_won = anaylze_match(g)

			if teamA_comp == teamB_comp:
				continue
			if tuple() in teamA_comp or tuple() in teamB_comp:
				continue

			teamA_rating = comp_ratings[teamA_comp]
			teamB_rating = comp_ratings[teamB_comp]

			comp_counts[teamA_comp] += 1
			comp_counts[teamB_comp] += 1

			comp_win_rate[teamA_comp][1] += 1
			comp_win_rate[teamB_comp][1] += 1
			if teamA_won:
				comp_win_rate[teamA_comp][0] += 1
				teamA_rating, teamB_rating = rate_1vs1(teamA_rating, teamB_rating)
			else:
				comp_win_rate[teamB_comp][0] += 1
				teamA_rating, teamB_rating = rate_1vs1(teamB_rating, teamA_rating)
			comp_ratings[teamA_comp] = teamA_rating
			comp_ratings[teamB_comp] = teamB_rating
		if i % 10 == 0:
			print i

	leaderboard = sorted([(comp_win_rate[k][0] / float(comp_win_rate[k][1]), v,k) for k,v in comp_ratings.items()], reverse = True)
	for l in leaderboard:
		print l, comp_counts[l[2]]
예제 #41
0
파일: utils.py 프로젝트: l1f7/foosleague
def update_trueskill(match):
    winner = match.winner

    if match.team_1 == winner:
        loser = match.team_2
    else:
        loser = match.team_1

    winners = winner.players.all()
    losers = loser.players.all()

    winner_ratings = []
    loser_ratings = []
    env = TrueSkill(draw_probability=0)
    other_players = Player.objects.all().exclude(id__in=match.team_1.players.all()).exclude(id__in=match.team_2.players.all())
    for o in other_players:
        #add current state of all other players to the environment
        env.create_rating(o.current_mu, o.current_sigma)


    for w in winners:
        winner_ratings.append(env.create_rating(w.current_mu, w.current_sigma))

    for l in losers:
        loser_ratings.append(env.create_rating(l.current_mu, l.current_sigma))

    winner_ratings, loser_ratings = env.rate([winner_ratings, loser_ratings])



    for counter, p in enumerate(winners):
        p.ts_mu = winner_ratings[counter].mu
        p.ts_sigma = winner_ratings[counter].sigma
        p.save()

        sh, _ = StatHistory.objects.get_or_create(player=p,
                                                  match=match,
                                                  )
        sh.season = match.season
        sh.ts_mu = winner_ratings[counter].mu
        sh.ts_sigma = winner_ratings[counter].sigma
        sh.save()
        # p.save()

    for counter, p in enumerate(losers):
        p.ts_mu = loser_ratings[counter].mu
        p.ts_sigma = loser_ratings[counter].sigma
        p.save()
        sh, _ = StatHistory.objects.get_or_create(player=p,
                                                  match=match,

                                                  )
        sh.season = match.season
        sh.ts_mu = loser_ratings[counter].mu
        sh.ts_sigma = loser_ratings[counter].sigma
        sh.save()
예제 #42
0
class TotalRanking:
    __instance = None
    lock = threading.Lock()

    @classmethod
    def instance(cls):
        if not cls.__instance:
            with cls.lock:
                if not cls.__instance:
                    cls.__instance = cls()
        return cls.__instance

    def __init__(self):
        # All the Players involved in this ranking. Maps player names to
        # Ratings.
        self.players = dict()
        # The TrueSkill environment
        self.ts_env = TrueSkill()
        self.total_games = 0

    def add_player(self, player_name):
        """
        Add a player to this raking. Players must have unique, consistent
        names.

        :param player_name: The player name
        """
        if player_name not in self.players:
            self.players[player_name] = {"ts": self.ts_env.create_rating(), "win": 0, "loss": 0}

    def process_game_record(self, team1, team2):
        """
        Process a game, and update the rankings accordingly. Team 1 is the
        winning team.
        TODO: Update to include 3-team (KoTH)

        :param team1: A list of player names on team one. Can contain one
            player
        :param team2: A list of player names on team two. Can contain one
            player
        """
        if not isinstance(team1, list) or not isinstance(team2, list):
            raise ValueError("Arguments must be lists of players")

        for player in team1 + team2:
            self.add_player(player)

        for player in team1:
            self.players[player]["win"] += 1
        for player in team2:
            self.players[player]["loss"] += 1

        try:
            t1_ratings = {p: self.players[p]["ts"] for p in team1}
            t2_ratings = {p: self.players[p]["ts"] for p in team2}
        except KeyError as e:
            raise ValueError("Could not find a player: {}".format(e))

        new_rankings = self.ts_env.rate([t1_ratings, t2_ratings])
        for team in new_rankings:
            for name, newrating in team.items():
                self.players[name]["ts"] = newrating
        self.total_games += 1

    def player_rankings(self):
        """
        :return: Players, with their rankings, sorted by rank.
        """
        ranks = [(p, r) for p, r in self.players.items()]
        ranks = sorted(ranks, key=lambda pr: self.ts_env.expose(pr[1]["ts"]), reverse=True)
        return ranks
예제 #43
0
파일: models.py 프로젝트: bchruszc/rook
    def rankings(self, season=None, rating_system=TRUESKILL):
        if season:
            games = Game.objects.season(season)
        else:
            games = Game.objects.all()

        games.prefetch_related('scores')
        all_players = self.all_as_dict()

        # We've got the games - run through them and calculate a ranking
        ratings = {}
        trueskill_ratings = {}

        # For the selected season, create a time/ratings pair for each player
        ratings_history = {}

        game_counts = {}
        win_counts = {}

        env = TrueSkill()

        for g in games:
            scores = g.scores.all()

            utils.update_elo(scores, ratings)
            utils.update_trueskill(scores, trueskill_ratings)

            for s in scores:
                if s.player_id not in game_counts.keys():
                    game_counts[s.player_id] = 1
                    ratings_history[all_players[s.player_id]] = []
                    win_counts[s.player_id] = 0
                else:
                    game_counts[s.player_id] += 1

                if s.rank is 1:
                    win_counts[s.player_id] += 1

                # For every player in this game, log their updated ratings at this point in time
                elo = round(env.expose(trueskill_ratings[s.player_id]), 1)
                ratings_history[all_players[s.player_id]].append({'x': g.played_date.timestamp(), 'y': elo})

            last_game_scores = scores

        ranked_player_ids = ratings.keys()
        ranked_players = []

        for player_id in ranked_player_ids:
            player = all_players[player_id]
            player.rating = round(ratings[player_id])
            player.trueskill = round(env.expose(trueskill_ratings[player_id]), 1)
            player.trueskill_hover = "mu={0:0.1f}, sigma={1:0.2f}".format(trueskill_ratings[player_id].mu,
                                                                          trueskill_ratings[player_id].sigma)
            ranked_players.append(player)

        ranked_players = utils.sortAndRankPlayers(ranked_players, rating_system)

        for p in ranked_players:
            p.rating_change = None
            p.game_count = game_counts[p.id]
            p.win_count = win_counts[p.id]
            for s in last_game_scores:
                if p.id == s.player_id:
                    p.rating_change = round(s.rating_change)
                    p.trueskill_change = round(s.trueskill_change, 2)
                    break

        rankings = Rankings()
        rankings.game_count = len(games)
        rankings.player_list = ranked_players

        return rankings, ratings_history
예제 #44
0
class FrcTrueSkill:
    # Constants for sending requests to TBA.
    TBA_API_BASE = 'https://www.thebluealliance.com/api/v2'
    HEADERS = {"X-TBA-App-Id": "frc-4774:TrueSkill:1.0"}

    # Ranks for TrueSkill.rate. Lower is better.
    WON = 0
    LOST = 1

    TIE = (WON, WON)
    RED_WIN = (WON, LOST)
    BLUE_WIN = (LOST, WON)

    def __init__(self):
        self.env = TrueSkill(draw_probability=0.02)
        self.trueskills = {}
        self.events = {}
        self.nicknames = {}
        self.processed_matches = set()

        self.session = requests.Session()
        self.session.headers.update(self.HEADERS)

        self.get_previous_matches()

    def init_teams(self, red_alliance, blue_alliance):
        for team in red_alliance + blue_alliance:
            if team not in self.trueskills:
                self.trueskills[team] = self.env.Rating()

    def update(self, match_data):
        if match_data['key'] in self.processed_matches:
            return None

        alliances = match_data['alliances']
        red_teams = [int(x[3:]) for x in alliances['red']['teams']]
        blue_teams = [int(x[3:]) for x in alliances['blue']['teams']]

        self.init_teams(red_teams, blue_teams)
        # Update ratings based on result
        corrected_scores = self.correct_scores(match_data)

        if corrected_scores.red == corrected_scores.blue:  # Tied
            if corrected_scores.red == -1:
                return None  # No result yet
            ranks = self.TIE
        elif corrected_scores.red > corrected_scores.blue:  # Red beat blue
            ranks = self.RED_WIN
        else:
            ranks = self.BLUE_WIN

        new_red, new_blue = self.env.rate([
            [self.trueskills[t] for t in red_teams],
            [self.trueskills[t] for t in blue_teams]], ranks)

        # Store the new values
        for team, rating in zip(red_teams + blue_teams, new_red + new_blue):
            self.trueskills[team] = rating
        self.processed_matches.add(match_data['key'])
        return ranks

    def predict(self, red_alliance, blue_alliance):
        self.init_teams(red_alliance, blue_alliance)
        a = [self.trueskills[t] for t in red_alliance]
        b = [self.trueskills[t] for t in blue_alliance]
        delta_mu = sum([x.mu for x in a]) - sum([x.mu for x in b])
        sum_sigma = sum([x.sigma ** 2 for x in a + b])
        player_count = len(a) + len(b)
        denominator = (player_count * (self.env.beta**2) + sum_sigma) ** 0.5
        return backends.cdf(delta_mu / denominator)

    def skill(self, team):
        if team not in self.trueskills:
            self.trueskills[team] = self.env.Rating()
        return self.env.expose(self.trueskills[team])

    def get_teams_at_event(self, event):
        if event not in self.events:
            # We haven't got this one yet
            teams = self.session.get("%s/event/%s/teams" % (self.TBA_API_BASE, event))
            teams = teams.json()
            self.events[event] = [team["team_number"] for team in teams]
            for team in teams:
                self.nicknames[team["team_number"]] = team["nickname"]
        return self.events[event]

    def get_previous_matches(self):
        all_matches = []
        events = self.session.get(self.TBA_API_BASE + "/events/2017")
        events = events.json()

        for event in events:
            if event['event_type'] > 5:
                continue
            if event['start_date'] <= str(datetime.date(datetime.today()+timedelta(days=1))):
                matches = self.session.get("%s/event/%s/matches" % (self.TBA_API_BASE, event['key']))
                matches = matches.json()
                all_matches += matches
        all_matches.sort(key=lambda m: m['time'])

        for match in all_matches:
            self.update(match)

    def correct_scores(self, match):
        alliances = match['alliances']
        red = alliances['red']
        blue = alliances['blue']

        score = match['score_breakdown']
        red_score = red['score']
        blue_score = blue['score']
        if score is None:
            return Scores(red_score, blue_score)

        red_stats = score['red']
        blue_stats = score['blue']

        if red_stats["rotorRankingPointAchieved"]:
            red_score += 100
        if red_stats["kPaRankingPointAchieved"]:
            red_score += 20

        if blue_stats["rotorRankingPointAchieved"]:
            blue_score += 100
        if blue_stats["kPaRankingPointAchieved"]:
            blue_score += 20

        return Scores(red_score, blue_score)
예제 #45
0
 def assert_predictable_draw_probability(r1, r2, drawn=False):
     dyn = TrueSkill(draw_probability=t.dynamic_draw_probability)
     sta = TrueSkill(draw_probability=calc((r1,), (r2,), dyn))
     assert dyn.rate_1vs1(r1, r2, drawn) == sta.rate_1vs1(r1, r2, drawn)
예제 #46
0
def test_invalid_rating_groups():
    env = TrueSkill()
    with raises(ValueError):
        env.validate_rating_groups([])
    with raises(ValueError):
        env.validate_rating_groups([()])
    # need multiple groups not just one
    with raises(ValueError):
        env.validate_rating_groups([(Rating(),)])
    # empty group is not allowed
    with raises(ValueError):
        env.validate_rating_groups([(Rating(),), ()])
    # all groups should be same structure
    with raises(TypeError):
        env.validate_rating_groups([(Rating(),), {0: Rating()}])
예제 #47
0
def test():

    msg='Hello World!\n'

    payload = {'teams':[{'alice':{}},{'bob':{}}]}

    # environment
    t = TrueSkill(backend='mpmath')

    def new_repr(self):
        return '(mu={:.3f}, sigma={:.3f}, exposure={:.3f})'.format(self.mu, self.sigma, t.expose(self))
    Rating.__repr__ = new_repr

    # pre-match we build the teams and players
    teams = []
    for team in payload.get('teams'):
        players = {}
        for name, data in team.iteritems():
            players[name] = Rating(mu=data.get('mu'),sigma=data.get('sigma'))
        teams.append(players)

    msg += '{}'.format(teams) + '\n'

    # and assess the quality of the matchup
    quality = t.quality(teams)
    fair = not (quality<.5)
    msg += '{:.1%} chance to tie. {}'.format(quality, 'game on!' if fair else 'this may be a stomp') + '\n'

    #
    # # # # MATCH IS PLAYED
    #

    msg += "the match was won by {}".format(teams[0].keys()) + '\n'

    # post match we get new rating on the results (winner=rank 0, loser=rank 1), so alice wins this time

    diffmu1 = teams[1]['bob'].mu
    diffexposure1 = teams[1]['bob'].exposure

    teams = t.rate(teams)

    diffmu1 = teams[1]['bob'].mu - diffmu1
    diffexposure1 = teams[1]['bob'].exposure - diffexposure1

    print 'after first match the mu diff is {mu} and the exposure diff is {exp}'.format(mu=diffmu1, exp=diffexposure1)

    for _ in xrange(1000):
        teams = [teams[1],teams[0]]
        teams = t.rate(teams)
        print 'quality={:.1%}'.format(t.quality(teams))


    diffmu2 = teams[1]['bob'].mu
    diffexposure2 = teams[1]['bob'].exposure

    teams = t.rate(teams)

    diffmu2 = teams[1]['bob'].mu - diffmu2
    diffexposure2 = teams[1]['bob'].exposure - diffexposure2

    print 'after ~1,000 matches the mu diff is {mu} and the exposure diff is {exp}'.format(mu=diffmu2, exp=diffexposure2)


    print 'quality={}'.format(t.quality(teams))

    for team in teams:
        for name,rating in team.iteritems():
            rating.realexposure = t.expose(rating)

    msg += '{}'.format(teams) + '\n'

    """Return a friendly HTTP greeting."""
    return '<pre>\n'+msg+'\n</pre>'
예제 #48
0
def test_exposure():
    env = TrueSkill()
    assert env.expose(env.create_rating()) == 0
    env = TrueSkill(1000, 200)
    assert env.expose(env.create_rating()) == 0
예제 #49
0
class HorseModel(object):
    def __init__(self, mu=DEFAULT_MU, sigma=DEFAULT_SIGMA, beta=DEFAULT_BETA,
                 tau=DEFAULT_TAU, draw_probability=DEFAULT_DRAW):
        """ mu - the initial mean of ratings
            sigma - the initial standard deviation of ratings
            beta - the distance that guarantees about an 80% chance of winning
            tau - the dynamic factor
            draw_probability - the draw probability of the game"""
        self._ts = TrueSkill(mu, sigma, beta, tau, draw_probability)
        self._ratings = self._create_ratings()

    def _create_ratings(self):
        return defaultdict(lambda: {'rating': (self._ts.create_rating(),),
                                    'n_races': 0,
                                    'n_wins': 0})

    def get_params(self):
        return {'mu': self._ts.mu,
                'sigma': self._ts.sigma,
                'beta': self._ts.beta,
                'tau': self._ts.tau,
                'draw_probability': self._ts.draw_probability}

    def fit_race(self, race):
        runners = race['selection']
        rating_groups = [self._ratings[r]['rating'] for r in runners]
        new_ratings = self._ts.rate(rating_groups, race['ranking'])

        for i, runner in enumerate(runners):
            horse = self._ratings[runner]
            horse['rating'] = new_ratings[i]
            horse['n_races'] += 1
            if runner in race['winners']:
                horse['n_wins'] += 1

    def fit(self, sorted_races, log_incremental=None):
        ratings = self._ratings
        stats = {'n_races': 0}

        for (i, race) in enumerate(sorted_races):
            runners = list(race['selection'])
            if len(runners) < 2:
                continue
            stats['n_races'] += 1
            rating_groups = [ratings[r]['rating'] for r in runners]
            new_ratings = self._ts.rate(rating_groups, race['ranking'])

            # assert len(new_ratings) == len(runners)
            diff = []
            for i, runner in enumerate(runners):
                horse = ratings[runner]
                horse['rating'] = new_ratings[i]
                horse['n_races'] += 1
                if runner in race['winners']:
                    horse['n_wins'] += 1
                if log_incremental is not None:
                    diff.append((runner, horse))

            if log_incremental is not None:
                log_incremental(race, dict(diff))

            if i % 100 == 0:
                logging.info('HorseModel.fit: %d races done' % i)

        stats['n_runners'] = len(ratings)
        return stats

    def pwin_mc(self, runners, nwins=1, prior_for_unobs=True):
        assert prior_for_unobs
        N = 1000
        R = empty((len(runners), N))
        for i, r in enumerate(self.get_ratings(runners)):
            R[i, :] = randn(N) * r.sigma + r.mu

        for i in xrange(N):
            ar = argsort(-array(R[:, i])).tolist()
            R[:, i] = [ar.index(x) for x in range(len(ar))]
        return np.sum(R < nwins, 1) / float(N)

    def pwin_trapz(self, runners):
        ratings = self.get_ratings(runners)
        N = len(ratings)
        mus = array(map(lambda x: x.mu, ratings))
        sigmas = array(map(lambda x: x.sigma, ratings))

        pwin = empty(N)
        start, end, nsteps = min(mus) - 3 * max(sigmas), max(mus) + 3 * max(sigmas), 5000

        us, p = linspace(start, end, nsteps), empty(nsteps)
        cdfs = [norm.cdf(us, loc=mus[i], scale=sigmas[i]) for i in xrange(N)]
        for i in xrange(N):
            p.fill(1.0)
            for j in xrange(N):
                if i == j:
                    p *= norm.pdf(us, loc=mus[j], scale=sigmas[j])
                else:
                    p *= cdfs[j]
            pwin[i] = trapz(p, dx=(end - start) / float(nsteps))
        return pwin

    def get_ratings(self, runners):
        return [self._ratings[x]['rating'][0] for x in runners]

    def get_runs(self, runners):
        return array([self._ratings[x]['n_races'] for x in runners])

    def to_dict(self):
        items = self._ratings.items()
        ratings = map(lambda x: {'runner': x[0],
                                 'mu': x[1]['rating'][0].mu,
                                 'sigma': x[1]['rating'][0].sigma,
                                 'n_races': x[1]['n_races'],
                                 'n_wins': x[1]['n_wins']}, items)
        return {
            'ts': {
                'mu': self._ts.mu,
                'sigma': self._ts.sigma,
                'beta': self._ts.beta,
                'tau': self._ts.tau,
                'draw_probability': self._ts.draw_probability
            },
            'ratings': ratings
        }

    @staticmethod
    def from_dict(hm_dict):
        hm = HorseModel(**hm_dict['ts'])
        for r in hm_dict['ratings']:
            hm._ratings[r['runner']] = {'rating': (Rating(r['mu'], r['sigma']), ),
                                        'n_races': r['n_races'],
                                        'n_wins': r['n_wins']}
        return hm
예제 #50
0
def test_custom_environment():
    env = TrueSkill(draw_probability=.50)
    t1, t2 = generate_teams([1, 1], env=env)
    rated = env.rate([t1, t2])
    assert almost(rated) == [(30.267, 7.077), (19.733, 7.077)]
예제 #51
0
파일: match.py 프로젝트: artnez/faceoff
def rebuild_rankings(db, league_id):
    # exclusive lock is needed to prevent race conditions when multiple people
    # are simultaneously reporting a match.
    if not db.is_building:
        db.execute('begin exclusive')

    # delete all existing rankings
    db.execute('DELETE FROM ranking WHERE league_id=?', [league_id])

    skill = TrueSkill()

    # generate a local player ranking profile based on user id. all matches
    # will be traversed and this object will be populated to build the
    # rankings.
    players = {}
    for match in db.search('match', league_id=league_id):
        w = match['winner_id']
        l = match['loser_id']

        # create ranking profile if hasn't been added yet
        for p in [w, l]:
            if not p in players:
                players[p] = {
                    'id': p, 'win': 0, 'loss': 0, 'win_streak': 0,
                    'loss_streak': 0, 'games': 0, 'rating': skill.Rating()}

        # define ranking profile properties, this will go into the db and will
        # be viewable on the standings page
        players[w]['games'] += 1
        players[w]['win'] += 1
        players[w]['win_streak'] += 1
        players[w]['loss_streak'] = 0
        players[l]['games'] += 1
        players[l]['loss'] += 1
        players[l]['win_streak'] = 0
        players[l]['loss_streak'] += 1

        # finally, record the match with trueskill and let it calculate ranks
        wr = players[w]['rating']
        lr = players[l]['rating']
        (wr, lr) = skill.transform_ratings([(wr,), (lr,)])
        players[w]['rating'] = wr[0]
        players[l]['rating'] = lr[0]

    # sort the players based on their ranking
    rankings = [p for p in players.values()]
    rankings.sort(key=lambda p: p['rating'].exposure)
    rankings.reverse()

    # create rankings
    for (i, r) in enumerate(rankings):
        fields = {
            'league_id': league_id, 'user_id': r['id'], 'rank': (i+1),
            'mu': r['rating'].mu, 'sigma': r['rating'].sigma, 'wins': r['win'],
            'losses': r['loss'], 'win_streak': r['win_streak'],
            'loss_streak': r['loss_streak'], 'games': r['games']
            }
        db.insert('ranking', pk=False, **fields)

    if not db.is_building:
        db.commit()
예제 #52
0
파일: utils.py 프로젝트: l1f7/foosleague
def catch_up(match):

    winner = match.winner

    if match.team_1 == winner:
        loser = match.team_2
    else:
        loser = match.team_1

    winners = winner.players.all()
    losers = loser.players.all()

    winner_ratings = []
    loser_ratings = []

    for w in winners:
        winner_ratings.append(Rating(w.current_mu, w.current_sigma))

    for l in losers:
        loser_ratings.append(Rating(l.current_mu, l.current_sigma))

    winner_ratings, loser_ratings = rate([winner_ratings, loser_ratings])
    for counter, p in enumerate(winners):
        # p.ts_mu = winner_ratings[counter].mu
        # p.ts_sigma = winner_ratings[counter].sigma

        sh, _ = StatHistory.objects.get_or_create(player=p,
                                                  match=match,
                                                  )
        sh.season = match.season
        sh.ts_mu = winner_ratings[counter].mu
        sh.ts_sigma = winner_ratings[counter].sigma
        sh.save()
        # p.save()

    for counter, p in enumerate(losers):
        # p.ts_mu = loser_ratings[counter].mu
        # p.ts_sigma = loser_ratings[counter].sigma
        sh, _ = StatHistory.objects.get_or_create(player=p,
                                                  match=match,
                                                  )
        sh.season = match.season
        sh.ts_mu = loser_ratings[counter].mu
        sh.ts_sigma = loser_ratings[counter].sigma
        sh.save()

    env = TrueSkill(draw_probability=0)
    ratings = []

    players = Player.objects.filter(
        id__in=LeagueMember.objects.filter(league=match.league).values_list('player__id', flat=True))
    player_lookup = {}

    today = datetime.today()

    season = Season.objects.filter(league=match.league, start__lte=today, end__gte=today)

    for p in players:

        rating = env.create_rating(p.current_mu, p.current_sigma)
        sh, _ = ExposeHistory.objects.get_or_create(player=p, match=match, season=season)
        sh.ts_expose = env.expose(rating)
        sh.save()

        p.ts_expose = env.expose(rating)
        p.save()
        ratings.append(rating)
        player_lookup.update({rating: p})

    leaderboard = sorted(ratings, key=env.expose, reverse=True)
예제 #53
0
from flask_restful import Resource, Api, reqparse
from bson import json_util, CodecOptions, SON
from pymongo import MongoClient, DESCENDING
from trueskill import Rating, rate_1vs1, TrueSkill
from tabulate import tabulate
import datetime, os, requests
import math

app = Flask(__name__)
api = Api(app)
parser = reqparse.RequestParser()
parser.add_argument('token')
parser.add_argument('text')
parser.add_argument('response_url')

env = TrueSkill(draw_probability = 0.0, backend = 'mpmath', tau = .41666666667)
env.make_as_global()

def floor(mu, sigma):
    floor_factor = 2.0
    return mu - floor_factor * sigma

def win_chance(player1, player2):
    deltaMu = player1['mu'] - player2['mu']
    sumSigma = player1['sigma'] ** 2 + player2['sigma'] ** 2
    denominator = math.sqrt( 2 * env.beta ** 2 + sumSigma )
    return env.cdf(deltaMu / denominator)

class Root(Resource):
    def post(self):
        args = parser.parse_args()