def evaluate_game(game: Game) -> float: """ Returns the expected win probability of the blue team over the red team """ blue_team_ratings = [ trueskill.Rating(mu=p.trueskill_mu, sigma=p.trueskill_sigma) for p in game.teams.BLUE ] red_team_ratings = [ trueskill.Rating(mu=p.trueskill_mu, sigma=p.trueskill_sigma) for p in game.teams.RED ] delta_mu = sum(r.mu for r in blue_team_ratings) - sum(r.mu for r in red_team_ratings) sum_sigma = sum( r.sigma**2 for r in itertools.chain(blue_team_ratings, red_team_ratings)) size = len(blue_team_ratings) + len(red_team_ratings) denominator = math.sqrt(size * (trueskill.BETA * trueskill.BETA) + sum_sigma) ts = trueskill.global_env() return ts.cdf(delta_mu / denominator)
def win_probability(team1, team2): delta_mu = sum(r.mu for r in team1) - sum(r.mu for r in team2) sum_sigma = sum(r.sigma**2 for r in itertools.chain(team1, team2)) size = len(team1) + len(team2) denom = math.sqrt(size * (BETA * BETA) + sum_sigma) trueskill = ts.global_env() return round(trueskill.cdf(delta_mu / denom), 2)
def win_probability(team1, team2): delta_mu = sum(r.mu for r in team1) - sum(r.mu for r in team2) sum_sigma = sum(r.sigma ** 2 for r in chain(team1, team2)) size = len(team1) + len(team2) denom = sqrt(size * (BETA * BETA) + sum_sigma) ts = global_env() return ts.cdf(delta_mu / denom)
def win_proba(user, goal): delta_mu = user.mu - goal.mu sum_sigma = user.sigma**2 + goal.sigma**2 size = 2 denom = math.sqrt(size * (ts.BETA * ts.BETA) + sum_sigma) tse = ts.global_env() return tse.cdf(delta_mu / denom)
def confidence_interval_z(confidence_level: float) -> float: if confidence_level >= 1.0 or confidence_level <= 0.0: raise ValueError( 'confidence_interval {} is out of range (0, 1)'.format( confidence_level)) alpha = 1 - confidence_level return -trueskill.global_env().ppf(alpha / 2.0)
def rate_team(self, matches): """ This function runs the TrueSkill rating system to determine the skill estimates of each team. """ trueskill.setup(draw_probability=self.draw_prob(matches)) ratings = dict( zip(self.all_teams, [trueskill.global_env().create_rating()] * len(self.all_teams))) for match in matches: wteam = str(match['Wteam']) lteam = str(match['Lteam']) wscore = match['Wscore'] lscore = match['Lscore'] if wteam in ratings and lteam in ratings: if wscore < lscore: wteam, lteam = lteam, wteam wscore, lscore = lscore, wscore ratings[wteam], ratings[lteam] = \ trueskill.rate_1vs1(ratings[wteam], ratings[lteam], drawn=self.is_equal_score(wscore, lscore)) wscore -= self.vic_margin # while wscore - lscore >= self.vic_margin: # ratings[wteam], ratings[lteam] = \ # trueskill.rate_1vs1(ratings[wteam], ratings[lteam], drawn=self.is_equal_score(wscore, lscore)) # wscore -= self.vic_margin return ratings
def ts_win_prob(team1, team2): delta_mu = sum(r.mu for r in team1) - sum(r.mu for r in team2) sum_sigma = sum(r.sigma ** 2 for r in itertools.chain(team1, team2)) size = len(team1) + len(team2) denom = math.sqrt(size * (ts_env.beta * ts_env.beta) + sum_sigma) ts = trueskill.global_env() return ts.cdf(delta_mu / denom)
def from_division(normalised_value: int) -> 'Skill': from slapp_py.core_classes.division import DIVISION_UNKNOWN_VAL if normalised_value != DIVISION_UNKNOWN_VAL and normalised_value < 9: return Skill(rating=global_env().create_rating( mu=(9 - normalised_value) * trueskill.MU * 0.5)) else: return Skill()
def win_probability(self, player, opponent): delta_mu = player.mu - opponent.mu sum_sigma = player.sigma**2 + opponent.sigma**2 ts = trueskill.global_env() BETA = ts.beta denom = math.sqrt(2 * (BETA * BETA) + sum_sigma) return ts.cdf(delta_mu / denom)
def win_probability(self, team1, team2): delta_mu = sum(r['mu'] for r in team1) - sum(r['mu'] for r in team2) sum_sigma = sum(r['sigma'] ** 2 for r in itertools.chain(team1, team2)) size = len(team1) + len(team2) denom = math.sqrt(size * (BETA * BETA) + sum_sigma) ts = global_env() return ts.cdf(delta_mu / denom)
def startEvaluation(self): import trueskill self.home_adv_adjust() trueskill.setup(draw_probability=self.draw_prob()) team_rating = dict( zip(self.teams, [trueskill.global_env().create_rating()] * len(self.teams))) for match in self.regular_matches: wteam = match[self.regular_titles.index('Wteam')] lteam = match[self.regular_titles.index('Lteam')] wscore = match[self.regular_titles.index('Wscore')] lscore = match[self.regular_titles.index('Lscore')] if wteam in team_rating and lteam in team_rating: if wscore < lscore: wteam, lteam = lteam, wteam team_rating[wteam], team_rating[lteam] = trueskill.rate_1vs1(team_rating[wteam], \ team_rating[lteam], drawn = self.is_equal_score(wscore, lscore)) probs = [] for i in range(0, len(teams)): for j in range(i + 1, len(teams)): prob = self.win_probability(team_rating[teams[i]], team_rating[teams[j]]) probs.append(prob) return probs
def win_probability(team1, team2, env=None): env = env if env else trueskill.global_env() delta_mu = sum(r.mu for r in team1) - sum(r.mu for r in team2) sum_sigma = sum(r.sigma**2 for r in itertools.chain(team1, team2)) size = len(team1) + len(team2) denom = math.sqrt(size * (env.beta * env.beta) + sum_sigma) return env.cdf(delta_mu / denom)
def win_probability(Acomp, Bcomp, env = trueskill.global_env()): delta_mu = sum(Acomp[r].mu for r in Acomp.keys()) - sum(Bcomp[r].mu for r in Bcomp.keys()) sum_sigma = sum(Acomp[r].sigma ** 2 for r in Acomp.keys()) + sum(Bcomp[r].sigma ** 2 for r in Bcomp.keys()) size = len(Acomp) + len(Bcomp) denom = math.sqrt(size * (env.beta * env.beta) + sum_sigma) ts = env return ts.cdf(delta_mu / denom)
def get_leaderboard(cls): """ Returns with sorted list of the players. The best player is the first in the list. :rtype : list[Player] """ players = cls.get_all_players() return sorted(players, key=lambda player: trueskill.global_env().expose(player.rating), reverse=True)
def win_probability(team1, team2): delta_mu = team1.mu - team2.mu sum_sigma = sum([team1.sigma**2, team2.sigma**2]) size = 2 denom = math.sqrt(size * (0.05 * 0.05) + sum_sigma) ts = trueskill.global_env() return ts.cdf(delta_mu / denom)
def win_probability_teams(self, team1, team2): delta_mu = sum(r.mu for r in team1) - sum(r.mu for r in team2) sum_sigma = sum(r.sigma**2 for r in team1) + sum(r.sigma**2 for r in team2) size = len(team1) + len(team2) ts: trueskill = trueskill.global_env() denom = math.sqrt(size * (ts.beta**2) + sum_sigma) return ts.cdf(delta_mu / denom)
def win_probability_teams (team1,team2): delta_mu = team1.mu - team2.mu sum_sigma = (team1.sigma ** 2)+(team2.sigma ** 2) size = 2 denom = math.sqrt(size * (trueskill.BETA * trueskill.BETA) + sum_sigma) ts = trueskill.global_env() return ts.cdf(delta_mu / denom)
def standard_normal_percentile_range(estimate: Gaussian) -> (float, float): cdf = trueskill.global_env().cdf z_star = confidence_interval_z(CONFIDENCE_LEVEL) lower_bound = cdf(estimate.mu - z_star * estimate.sigma) upper_bound = cdf(estimate.mu + z_star * estimate.sigma) return lower_bound, upper_bound
def win_probability(team1, team2, ts_env=None): beta = ts_env.beta if ts_env is not None else trueskill.BETA delta_mu = sum(r.mu for r in team1) - sum(r.mu for r in team2) sum_sigma = sum(r.sigma**2 for r in itertools.chain(team1, team2)) size = len(team1) + len(team2) denom = math.sqrt(size * (beta * beta) + sum_sigma) ts = trueskill.global_env() return ts.cdf(delta_mu / denom)
def win_probability(team1, team2): delta_mu = sum(r.mu for r in team1) - sum(r.mu for r in team2) sum_sigma = sum(r.sigma**2 for r in itertools.chain(team1, team2)) size = len(team1) + len(team2) denom = math.sqrt(size * (4.166666666666667 * 4.166666666666667) + sum_sigma) ts = trueskill.global_env() return ts.cdf(delta_mu / denom)
def ts_winp(a, b, env=None): """Win probability of player a over b given their trueskill ratings. Formula found at https://github.com/sublee/trueskill/issues/1#issuecomment-244699989""" if not env: env = trueskill.global_env() epsilon = trueskill.calc_draw_margin(env.draw_probability, 2) denom = math.sqrt(a.sigma**2 + b.sigma**2 + (2 * env.beta**2)) return phi((a.mu - b.mu - epsilon) / denom)
def win_probability_single(team1, team2, team1sigma, team2sigma): delta_mu = team1 - team2 sum_sigma = (team1sigma ** 2)+(team2sigma ** 2) size = 2 denom = math.sqrt(size * (trueskill.BETA * trueskill.BETA) + sum_sigma) ts = trueskill.global_env() return ts.cdf(delta_mu / denom)
def win_probability(team1, team2): team1 = list(map(get_rating, team1)) team2 = list(map(get_rating, team2)) delta_mu = sum(r.mu for r in team1) - sum(r.mu for r in team2) sum_sigma = sum(r.sigma**2 for r in itertools.chain(team1, team2)) size = len(team1) + len(team2) denom = math.sqrt(size * (trueskill.BETA * trueskill.BETA) + sum_sigma) ts = trueskill.global_env() return ts.cdf(delta_mu / denom)
def get_winrate(rating_1: Rating, rating_2: Rating): delta_mu = rating_1.mu - rating_2.mu if delta_mu >= 0: beta = trueskill.global_env().beta denom = sqrt(2 * (2 * beta * beta + rating_1.sigma * rating_1.sigma + rating_2.sigma * rating_2.sigma)) return (erf(delta_mu / denom) + 1.0) / 2.0 else: return 1.0 - get_winrate(rating_2, rating_1)
def win_probability(team1, team2, home_advantage=0): delta_mu = sum(r.mu for r in team1) - (sum(r.mu for r in team2) + home_advantage) sum_sigma = sum(r.sigma**2 for r in itertools.chain(team1, team2)) size = len(team1) + len(team2) denom = math.sqrt(size * (trueskill.BETA * trueskill.BETA) + sum_sigma) ts = trueskill.global_env() return ts.cdf(delta_mu / denom)
def get_leaderboard(cls): """ Returns with sorted list of the players. The best player is the first in the list. :rtype : list[Player] """ players = cls.get_all_players() return sorted( players, key=lambda player: trueskill.global_env().expose(player.rating), reverse=True)
def win_probability(team1, team2): """Ripped from trueskill.org""" team1r = [p.rating for p in team1] team2r = [p.rating for p in team2] delta_mu = sum(r.mu for r in team1r) - sum(r.mu for r in team2r) sum_sigma = sum(r.sigma ** 2 for r in chain(team1r, team2r)) size = len(team1r) + len(team2r) denom = math.sqrt(size * (BETA * BETA) + sum_sigma) ts = trueskill.global_env() return ts.cdf(delta_mu / denom)
def win_probability(team1, team2): env = ts.global_env() delta_mu = sum(team1[r].mu for r in team1) - sum(team2[r].mu for r in team2) sum_sigma = sum(team1[r].sigma**2 for r in team1) sum_sigma += sum(team2[r].sigma**2 for r in team2) #sum_sigma = sum(r.sigma ** 2 for r in itertools.chain(team1, team2)) size = len(team1) + len(team2) denom = math.sqrt(size * (env.beta * env.beta) + sum_sigma) return env.cdf(delta_mu / denom)
def compute_division_boundaries(): divisions = [ f'{tier} {division}' for tier in [ 'Boosted Animal', 'Bonobo', 'House Cat', 'Pug', 'Wolf', 'Tiger', 'Tiger Shark', 'Blue Whale', ] for division in ['IV', 'III', 'II', 'I'] ] division_boundaries = [(float('-inf'), divisions[0])] min_rating = trueskill.global_env().mu - 3 * trueskill.global_env().sigma max_rating = trueskill.global_env().mu + 3 * trueskill.global_env().sigma division_boundaries.extend(( min_rating + i * (max_rating - min_rating) / (len(divisions) - 1), divisions[i + 1], ) for i in range(len(divisions) - 1)) return division_boundaries
def _win_probability(self, team1, team2): ''' Adapted from code from Juho Snellman https://github.com/sublee/trueskill/issues/1#issuecomment-149762508 ''' delta_mu = sum(r.mu for r in team1) - sum(r.mu for r in team2) sum_sigma = sum(r.sigma**2 for r in itertools.chain(team1, team2)) size = len(team1) + len(team2) ts = global_env() denom = math.sqrt(size * (ts.beta * ts.beta) + sum_sigma) return ts.cdf(delta_mu / denom)
def win_probability(team1, team2): """ :param team1: list of Rating objects :param team2: list of Rating objects :return: expected win probability of team1 over team2 """ delta_mu = sum(r.mu for r in team1) - sum(r.mu for r in team2) sum_sigma = sum(r.sigma**2 for r in itertools.chain(team1, team2)) size = len(team1) + len(team2) denominator = math.sqrt(size * (trueskill.BETA * trueskill.BETA) + sum_sigma) ts = trueskill.global_env() return ts.cdf(delta_mu / denominator)
def substituted_trueskill(*args, **kwargs): env = trueskill.global_env() params = [['mu', env.mu], ['sigma', env.sigma], ['beta', env.beta], ['tau', env.tau], ['draw_probability', env.draw_probability], ['backend', env.backend]] # merge settings with previous TrueSkill object for x, arg in enumerate(args): params[x][1] = arg params = dict(params) for kw, arg in kwargs.items(): params[kw] = arg try: # setup the environment yield trueskill.setup(**params) finally: # revert the environment trueskill.setup(env=env)
def get_score(self): """ :rtype : float """ return (trueskill.expose(self.rating) + trueskill.global_env().mu) * 3
def win_probability(rating1, rating2, env=None): if env is None: env = trueskill.global_env() exp = (rating1.mu - rating2.mu) / env.beta n = 4. ** exp return n / (n + 1)
def trueskillExpectedWinRate(r1, r2, blueSide=0): deltaMu = r1.mu - r2.mu + blueSide sumSigma = r1.sigma ** 2 + r2.sigma ** 2 denominator = math.sqrt(4 * (200 * 200) + sumSigma) return ts.global_env().cdf(deltaMu / denominator)