Example #1
0
 def __init__(self, allow_draws=False):
     if not ENABLED:
         self._env = _DummyEnv()
         LOGGER.info("Skill module disabled")
     elif allow_draws:
         self._env = trueskill.TrueSkill(
             draw_probability=DEFAULT_DRAW_CHANCE,
             backend='mpmath')
         LOGGER.info("Created 'draws' model with %s backend, dps %s",
                     self._env.backend, mpmath.mp.dps)
     else:
         self._env = trueskill.TrueSkill(draw_probability=0.0,
                                         backend='mpmath')
         LOGGER.info("Created 'normal' model with %s backend, dps %s",
                     self._env.backend, mpmath.mp.dps)
Example #2
0
def eval_tsorg(env, pid1, pid2, scores):
    nplayers = 1 + max(np.max(pid1), np.max(pid2))
    ratings = np.zeros((nplayers, 2), dtype=np.float)
    ratings[:, 0] = env[0]
    ratings[:, 1] = env[1]

    env = ts.TrueSkill(*list(env))
    r1_progression = np.zeros((len(pid1), 2), dtype=np.float)
    r2_progression = np.zeros((len(pid2), 2), dtype=np.float)

    for n in range(len(pid1)):
        r1 = env.create_rating(*list(ratings[pid1[n], :]))
        r2 = env.create_rating(*list(ratings[pid2[n], :]))

        r1_progression[n, :] = ratings[pid1[n], :]
        r2_progression[n, :] = ratings[pid2[n], :]

        if scores[n] > 0:
            r1, r2 = env.rate_1vs1(r1, r2, drawn=False)

        elif scores[n] == 0:
            r1, r2 = env.rate_1vs1(r1, r2, drawn=True)

        else:
            r2, r1 = env.rate_1vs1(r2, r1, drawn=False)

        ratings[pid1[n], 0] = r1.mu
        ratings[pid1[n], 1] = r1.sigma
        ratings[pid2[n], 0] = r2.mu
        ratings[pid2[n], 1] = r2.sigma

    return r1_progression, r2_progression, ratings
Example #3
0
def multiplayer_trueskill_applier(df, race_id_col, player1_col, player2_col, starting_mu=25.0, starting_std=25.0/3.0):
    env = trueskill.TrueSkill(draw_probability=0.015)
    # Get a list of unique players
    unique_players = df[player1_col].unique().tolist() + df[player2_col].unique().tolist()

    # Initialise a dictionary with default elos for each player
    ratings_dict = {team: Rating(mu=starting_mu, sigma=starting_std) for team in unique_players}
    
    # Create dict where we will update based on the race_id and horse_id
    before_game_ratings = []
    after_game_ratings = []
    # Loop over races in each group
    for idx, row in df.iterrows():
       
        before_game_ratings.append([ratings_dict[row[player1_col]], ratings_dict[row[player2_col]]])
        
        if row.winner_1 == 1:
            new_r1, new_r2 = trueskill.rate_1vs1(ratings_dict[row[player1_col]], ratings_dict[row[player2_col]])
            ratings_dict[row[player1_col]] = new_r1
            ratings_dict[row[player2_col]] = new_r2
        elif row.winner_2 == 1:
            new_r1, new_r2 = trueskill.rate_1vs1(ratings_dict[row[player2_col]], ratings_dict[row[player1_col]])
            ratings_dict[row[player1_col]] = new_r2
            ratings_dict[row[player2_col]] = new_r1
        elif row.draw == 1:
            new_r1, new_r2 = trueskill.rate_1vs1(ratings_dict[row[player1_col]], ratings_dict[row[player2_col]], drawn=True)
            ratings_dict[row[player1_col]] = new_r1
            ratings_dict[row[player2_col]] = new_r2
        else:
            print('error')
        after_game_ratings.append([ratings_dict[row[player1_col]], ratings_dict[row[player2_col]]])
    return before_game_ratings, after_game_ratings, ratings_dict
Example #4
0
def win_probability(team1, team2):
    delta_mu = sum(r.mu for r in team1) - sum(r.mu for r in team2)
    sum_sigma = sum(r.sigma**2 for r in itertools.chain(team1, team2))
    size = len(team1) + len(team2)
    TS = ts.TrueSkill()
    denom = math.sqrt(size * (ts.BETA * ts.BETA) + sum_sigma)
    return TS.cdf(delta_mu / denom)
Example #5
0
    def Pwin_singles_lowsigma(self, name1, name2):
        """
        Useful for when both players have a low uncertainty (sigma < 2)
        and in professional matches, where absolute differences in skill are less
        
        Uses an original empirical formula calibrated on all professional games from BWF website
        before and during 2017.
        """

        rA = self.ratings[name1]
        rB = self.ratings[name2]
        deltaMu = rA.mu - rB.mu
        rsss = math.sqrt(rA.sigma**2 + rB.sigma**2)
        trueskill_winprob = trueskill.TrueSkill().cdf(deltaMu / rsss)
        if ((ndtri(trueskill_winprob) /
             (np.exp(-(rsss / 2.0) + 3.8) + 1) + 0.5) > 0.0
                and (ndtri(trueskill_winprob) /
                     (np.exp(-(rsss / 2.0) + 3.8) + 1) + 0.5) < 1):
            return ndtri(trueskill_winprob) / (
                np.exp(-(rsss / 2.0) + 3.8) + 1
            ) + 0.5  #scaled with best fit probit function (created using historical bwf data)
        elif ((ndtri(trueskill_winprob) /
               (np.exp(-(rsss / 2.0) + 3.8) + 1) + 0.5) < 0.0):
            return 0.0
        elif ((ndtri(trueskill_winprob) /
               (np.exp(-(rsss / 2.0) + 3.8) + 1) + 0.5) > 1.0):
            return 1.0
Example #6
0
def get_skill_ranking(rounds):
    """
    Returns an array of player rankings from calculated skill.

    rounds is an iterable of pairs, where first element is a list of winning team members,
    and seconds is a list of losing team members
    """
    skiller = trueskill.TrueSkill(draw_probability=0)

    players = defaultdict(skiller.create_rating)

    for (winners, losers) in rounds:
        win_team = [players[p] for p in winners]
        lose_team = [players[p] for p in losers]

        if len(win_team) and len(lose_team):
            (win_team_rated,
             lose_team_rated) = skiller.rate([win_team, lose_team], [0, 1])

            for (player_id, rating) in zip(winners, win_team_rated):
                players[player_id] = rating

            for (player_id, rating) in zip(losers, lose_team_rated):
                players[player_id] = rating

    leaderboard = [p for p in players.items()]
    leaderboard.sort(key=lambda x: skiller.expose(x[1]), reverse=True)

    return leaderboard
Example #7
0
 def Pwin_singles(self, name1, name2):
     """
     Win probability in singles
     """
     rA = self.ratings[name1]
     rB = self.ratings[name2]
     deltaMu = rA.mu - rB.mu
     rsss = math.sqrt(rA.sigma**2 + rB.sigma**2)
     return trueskill.TrueSkill().cdf(deltaMu / rsss)
Example #8
0
 def __init__(self, name):
     self.name = name
     self.rankingParam = dict()
     self.importedFiles = dict()
     self.namesDict = dict()
     self.ratingHist = dict()
     self.scoreHist = dict()
     self.env = trueskill.TrueSkill()
     self.namesDictInv = dict()
Example #9
0
 def env(self):
     if self._env is None:
         self._env = trueskill.TrueSkill(
             mu=self.mu,
             sigma=self.sigma,
             beta=self.beta,
             tau=self.tau,
             draw_probability=self.draw_probability)
     return self._env
Example #10
0
 def trueskill_environ(self):
     return trueskill.TrueSkill(
         mu=self.mu,
         sigma=self.sigma,
         beta=self.beta,
         tau=self.tau,
         draw_probability=self.draw_probability,
         backend='scipy'
     )
Example #11
0
def team_score_trueskill(ranks, team):
    team = team[team != 0]
    scores = [ranks.get(member, None) for member in team]
    # ignore players not in the ranks

    scores = np.array(scores)
    scores_mask = [False if score is None else True for score in scores]
    scores = scores[scores_mask]

    if len(scores) is 0:
        return trueskill.TrueSkill(mu=MU, sigma=SIGMA).create_rating()

    mean_mu = np.mean([score.mu for score in scores])

    mean_var = np.sum([score.sigma**2 for sigma in scores]) / (len(scores)**2)
    mean_sigma = np.sqrt(mean_var)

    return trueskill.TrueSkill().create_rating(mean_mu, mean_sigma)
Example #12
0
def ts_update_error(x, rA1, rA2, rB1, rB2, scoreA):
    (beta, tau, dp) = x
    env = trueskill.TrueSkill(beta=beta, tau=tau,
                              draw_probability=dp,
                              backend='mpmath')
    tss = TrueSkillSystem('trueskill', env)
    (rAp, rBp) = tss.rate2p(rA1, rB1, scoreA)
    err = math.sqrt(distance(rAp, rA2)**2 + distance(rBp, rB2)**2)
    return err
Example #13
0
 def Pwin_doubles(self, name1, name2, name3, name4):
     """
     Win probability for a doubles pair
     """
     rAlist = [self.ratings[name1], self.ratings[name2]]
     rBlist = [self.ratings[name3], self.ratings[name4]]
     deltaMu = sum([x.mu for x in rAlist]) - sum([x.mu for x in rBlist])
     rsss = math.sqrt(
         sum([x.sigma**2
              for x in rAlist]) + sum([x.sigma**2 for x in rBlist]))
     return trueskill.TrueSkill().cdf(deltaMu / rsss)
Example #14
0
    def get_trueskill_environment(self, rating_type):
        trueskill_environment = self.trueskill_environment_lookup.get(rating_type.name)
        if trueskill_environment:
            return trueskill_environment

        trueskill_environment = trueskill.TrueSkill(
            beta=trueskill.SIGMA,
            draw_probability=Logs2DB.rating_type_to_draw_probability[rating_type.name],
        )

        self.trueskill_environment_lookup[rating_type.name] = trueskill_environment
        return trueskill_environment
Example #15
0
def doRating(dfResults):

    env = ts.TrueSkill()
    ts.setup(tau=0.2)

    columns = ['Name', 'mu_minus_3sigma', 'numRaces', 'Rating']

    dfRatings = pd.DataFrame(columns=columns, index=dfResults.index)
    dfRatings = pd.merge(dfRatings,
                         dfResults[['numRegattas', 'lastRegatta']],
                         left_index=True,
                         right_index=True,
                         how='outer')

    dfRatings['numRaces'] = dfResults.count(axis=1)
    dfRatings['Rating'] = pd.Series(np.repeat(
        env.Rating(), len(dfRatings))).T.values.tolist()

    for raceCol in dfResults:
        if (raceCol != 'numRegattas') or (raceCol != 'lastRegatta'):
            competed = dfRatings.index.isin(
                dfResults.index[dfResults[raceCol].notnull()])
            rating_group = list(
                zip(dfRatings['Rating'][competed].T.values.tolist()))
            ranking_for_rating_group = dfResults[raceCol][
                competed].T.values.tolist()
            dfRatings.loc[competed,
                          'Rating'] = ts.rate(rating_group,
                                              ranks=ranking_for_rating_group)

    dfRatings = pd.DataFrame(dfRatings)  #convert to dataframe

    dfRatings['mu_minus_3sigma'] = pd.Series(np.repeat(
        0.0, len(dfRatings)))  #calculate mu - 3 x sigma: MSFT convention

    for index, row in dfRatings.iterrows():
        dfRatings.loc[dfRatings.index == index, 'mu_minus_3sigma'] = float(
            row['Rating'].mu) - 3 * float(row['Rating'].sigma)

    #competed in at least 5 races and 1 regatta and has competed in the last 12 months
    dfRatings = dfRatings[dfRatings['numRaces'] > 4]
    dfRatings = dfRatings[dfRatings['numRegattas'] > 1]
    dfRatings = dfRatings[
        (datetime.datetime.now() - dfRatings['lastRegatta']) /
        np.timedelta64(1, 'D') < 365]

    dfRatings['Name'] = dfRatings.index
    dfRatings.index = dfRatings['mu_minus_3sigma'].rank(
        ascending=False).astype(int)  #set index to ranking
    dfRatings.index.names = ['Rank']

    return dfRatings.sort_values('mu_minus_3sigma', ascending=False)
Example #16
0
 def __init__(self, player_manager,
              mu=25, sigma=25/3, beta=25/6, tau=25/300):
     self.player_manager = player_manager
     self.rank_to_player = OrderedDict()
     self.player_to_rank = {}
     self.wins = {}
     self.ts_env = trueskill.TrueSkill(
         draw_probability=0.0,
         mu=mu,
         sigma=sigma,
         beta=beta,
         tau=tau)
     self.ts_env.make_as_global()
Example #17
0
 def __init__(self,
              name,
              daily_sigma_decay=0,
              k=3,
              established_sigma=None,
              **kwargs):
     super(TrueSkillSystem, self).__init__(name)
     self.env = trueskill.TrueSkill(**kwargs)
     self.daily_sigma_decay = daily_sigma_decay
     self.established_sigma = established_sigma
     self.k = k
     self.two_p_draw_margin = trueskill.calc_draw_margin(
         self.env.draw_probability, 2, env=self.env)
Example #18
0
 def __init__(self):
     with open('auth.json', 'r') as auth_file:
         cred = json.load(auth_file)
         self.cred = service_account.Credentials.from_service_account_info(
             cred)
         self.proj = cred['project_id']
     self.tsdb = sqlite3.connect("Data/TrueSkill.sqlite")
     self.tsrate = trueskill.TrueSkill(backend='mpmath',
                                       draw_probability=0.2)
     self.oriday = date(2008, 7, 31)
     self.fresh = trueskill.Rating(8.3333, 2.7778)
     self.tags = self._query(TAGS.format(self.proj))
     tqdm.write("-" * 50 + "\nTrueSkill Runner")
Example #19
0
    def __init__(self, run_folder, self_play):
        self.run_folder = run_folder

        # Server from SEPIA
        self.server = EnvironmentServiceImpl(self.env_callback,
                                             self.winner_callback)

        # Which two agents are currently being played against each other
        self.active_agents = [0, 0]

        # List of agents in tournament. Will be ~50MB total
        self.agents = []

        # List of agent associated data, such as names, used for saving later
        self.agents_data = []

        # Trueskill ratings for each agent
        self.ratings = []

        # How many times each agent as been played against each other
        self.trials = 0

        # How many total trials to do before we are done
        self.num_trials = 200

        # True if player 1 is a sepia agent, not another one of our agents
        self.playing_against_sepia = not self_play

        # How many game steps have occurred
        self.iterations = 0

        # Start time of run
        self.start_time = time.time()

        # Load agents from disk
        self.load_agents()

        # Wins, losses, ties
        self.win_stats = np.zeros((len(self.agents), 3))

        # Set up the trueskill environment and make it a global env
        env = trueskill.TrueSkill(mu=25.0,
                                  sigma=8.33,
                                  beta=4.17,
                                  tau=0,
                                  draw_probability=0.05)
        env.make_as_global()

        # Initialize ratings to be all the same
        self.ratings = [trueskill.Rating() for _ in range(len(self.agents))]
        self.sepia_rating = trueskill.Rating(30, 0.00000000001)
    def __init__(self, chat_id, conn=None, db_dir=None):
        # TODO Keep a rating snap-shot in database, to allow rollbacks.
        # TODO Script to recalculate the rankings from matches.

        self.chat_id = chat_id
        self.db_dir = db_dir

        if db_dir is None:
            # Default
            self.db_dir = Ranking.DB_DEFAULT
        elif not os.path.isdir(self.db_dir):
            logging.log(
                logging.WARNING,
                "db_dir was not a directory, defaulting to {}".format(
                    Ranking.DB_DEFAULT))
            self.db_dir = Ranking.DB_DEFAULT

        # TODO No need to keep all of the games in memory, keep current ranking in memory
        self.singles = []
        self.duals = []

        self.known_users = {}
        self.user_to_id = {}

        self.ratings = {}
        self.trueskill_env = trueskill.TrueSkill(mu=25,
                                                 sigma=8.333333333333334,
                                                 beta=4.166666666666667,
                                                 tau=0.08333333333333334,
                                                 draw_probability=0.0)
        # TODO Include leaderboard to be kept in memory

        self.rate_limits = {}
        self.rate_limit = 5

        if conn is None:

            cid = int(self.chat_id)
            str_chat = str(cid)

            db_name = str_chat if str_chat[0] != "-" else "a" + str_chat[1:]

            db_fp = os.path.join(self.db_dir, db_name + ".db")
            self.__conn = sqlite3.connect(db_fp)
        else:
            self.__conn = conn

        self.__c = self.__conn.cursor()

        self._load_from_db()
Example #21
0
def evalBattle(path, matchCount=18):
    dataSet = open(path).read().split('\n')
    random.shuffle(dataSet)
    cnt = len(dataSet)
    manager = Manager()
    evalData = manager.list()

    que = []

    for i in range(cnt):
        for j in range(matchCount // 2):
            if i + j == cnt - 1:
                break
            que.append(
                [evalData, i, i + j + 1, dataSet[i], dataSet[i + j + 1]])

        if i < matchCount // 2:
            for j in range(matchCount // 2 - i):
                que.append([
                    evalData, i, cnt - j - 1, dataSet[i], dataSet[cnt - j - 1]
                ])

    # s = time()
    core = cpu_count()

    p = Pool(core)
    s = time()
    print('start time:', datetime.datetime.now())
    print('length:', len(que))
    p.map(wrapper, que)
    print('end time:', datetime.datetime.now())
    print('took time:', time() - s)

    env = trueskill.TrueSkill(beta=5, draw_probability=0)
    players = [env.create_rating()] * cnt
    for i in evalData:
        players[i[0]], players[i[1]] = trueskill.rate_1vs1(
            players[i[0]], players[i[1]])

    mulist = [mu.mu for mu in players]
    dataSet = [str(mu) + poke for mu, poke in zip(mulist, dataSet)]
    dataSet = [i.split('|') for i in dataSet]

    dataSet = sorted(dataSet, key=lambda x: (x[1], float(x[0])), reverse=True)
    # priority: pokemon's Name > rating

    dataSet = ['|'.join(i) for i in dataSet]

    return dataSet
Example #22
0
def goko_variant(name,
                 beta_factor=1,
                 tau_factor=1,
                 draw_prob=0.05,
                 decay=0.01):
    return BoundedTrueSkillSystem('Goko %s' % name,
                                  trueskill.TrueSkill(
                                      mu=5500,
                                      sigma=2250,
                                      beta=beta_factor * 1375,
                                      tau=tau_factor * 27.5,
                                      draw_probability=draw_prob,
                                      backend='scipy'),
                                  mu_lower_bound=0,
                                  daily_sigma_decay=decay)
Example #23
0
 def __init__(self, *, margin, sigma, tau):
     self.env = ts.TrueSkill(
         # Initial mean of rating.
         mu=0.0,
         # Initial std. dev. of rating.
         sigma=sigma,
         # Scales the sigmoid function (denominator is sqrt(2) * beta).
         beta=1 / sqrt(2.0),
         # Std. dev. of brownian dynamics.
         tau=tau,
         # Draw probability if skill difference is 0.
         draw_probability=(2 * ndtr(margin) - 1.0),
     )
     self.margin = margin
     self.rating = collections.defaultdict(self.env.create_rating)
Example #24
0
def isotropish_variant(name,
                       beta_multiplier=1,
                       sigma_multiplier=1,
                       tau_multiplier=1,
                       noise_factor=0,
                       draw_prob=0.05):
    return NoisyTrueSkillSystem(
        'Isotropish %s' % name,
        trueskill.TrueSkill(mu=25,
                            sigma=sigma_multiplier * 25,
                            beta=25 * beta_multiplier,
                            tau=tau_multiplier * sigma_multiplier * 25 / 100,
                            draw_probability=draw_prob,
                            backend='scipy'),
        noise_factor=noise_factor)
Example #25
0
def get_leaderboard(guildid):
    '''
    Gets list of userids and TrueSkill ratings, sorted by current rating
    :return: list of (userid, TrueSkill.Rating) tuples, sorted by rating
    '''
    with shelve.open(str(guildid), writeback=True) as db:
        if 'ratings' in db:
            ratings = {
                id: ts.TrueSkill(db['ratings'][id][0], db['ratings'][id][1])
                for id in db['ratings']
            }
            #ratings = {id : get_skill(id, guildid) for id in db['ratings'].keys()}
            return sorted(ratings.items(),
                          key=lambda x: (x[1].mu, -x[1].sigma),
                          reverse=True)
        return None
Example #26
0
def win_probability(team1, team2):
#    delta_mu = team1['mu'] - team2['mu']
    delta_mu = team1.mu - team2.mu
#    delta_mu = sum(r.mu for r in team1) - sum(r.mu for r in team2)
#    sum_sigma = sum(r.sigma ** 2 for r in itertools.chain(team1, team2))
#    sum_sigma = team1['sigma']**2 + team2['sigma']**2
    sum_sigma = team1.sigma**2 + team2.sigma**2
#    return trueskill.TrueSkill().cdf(delta_mu/sum_sigma)
    #normDist = scipy.stats.norm(delta_mu, sum_sigma).cdf(0)
#    size = len(team1) + len(team2)
#    size = 2
    denom = math.sqrt(2 * trueskill.BETA**2 + sum_sigma)
#    print delta_mu, denom
#    return normDist
#    ts = trueskill.global_env()
    return trueskill.TrueSkill().cdf(delta_mu / denom)
Example #27
0
def score_horses():
	db = MongoClient().racing
	cache = {}
	query = {
	           "status": "downloaded",
	           "results_extracted": { "$exists": True },
	           "ratings_calculated": { "$exists": False },
	           "$where": "this.results.length > 4"
	        }
	warm_cache(db, cache)
	env = trueskill.TrueSkill()
	cursor = db.races.find(query,no_cursor_timeout=True).sort("race_date", 1).batch_size(500)
	for race in cursor:
		rating_groups = []
		runner_groups = []
		non_finisher_runner_group = []
		non_finisher_ratings = []
		for runner in race["results"]:
			rating = get_rating(db, cache, runner["hid"])
			if is_finisher(runner):
				runner_groups.append((runner,))
				if rating == None:
					rating_groups.append((env.create_rating(),))
				else:
					rating_groups.append((env.create_rating(rating["mu"], rating["sigma"]),))
			else:
				non_finisher_runner_group.append(runner)
				if rating == None:
					non_finisher_ratings.append(env.create_rating())
				else:
					non_finisher_ratings.append(env.create_rating(rating["mu"], rating["sigma"]))
		if len(non_finisher_runner_group) > 0:
			runner_groups.append(tuple(non_finisher_runner_group))
			rating_groups.append(tuple(non_finisher_ratings))

		if len(runner_groups) >= 2:
			rated_rating_groups = env.rate(rating_groups)
			for runner_group, rating_group in zip(runner_groups, rated_rating_groups):
				for runner, rating in zip(runner_group, rating_group):
					save_rating(db, cache, {"hid": runner["hid"],
																	"date": race["race_date"],
																	"mu": rating.mu,
																	"sigma": rating.sigma})
		race["ratings_calculated"] = True
		db.races.save(race)
	cursor.close()
Example #28
0
def create_trueskill():

    mu = MU
    sigma = SIGMA

    beta = sigma / 2.
    tau = sigma / 100.
    # draw_probability = 0.1
    draw_probability = 0  #warzoneではほぼ引き分けは無いに等しいのでデフォルト値から変更
    backend = None

    env = trueskill.TrueSkill(mu=mu,
                              sigma=sigma,
                              beta=beta,
                              tau=tau,
                              draw_probability=draw_probability,
                              backend=backend)

    return env
Example #29
0
def trueskill_rank(match_history, players, mu=5.00, sigma=.5):
    '''
    https://www.kaggle.com/devinanzelmo/dota-2-skill-rating-with-trueskill
    '''

    ts = trueskill.TrueSkill(draw_probability=0, mu=mu, sigma=sigma)

    # need to create a dictionary for all players containting the ratings
    rating_dict = {account_id: ts.create_rating() for account_id in players}

    rating_dict[0] = ts.create_rating(mu=0, sigma=10)

    for n, row in match_history.iterrows():
        radiance = row[['player0', 'player1', 'player2', 'player3', 'player4']]
        dire = row[['player5', 'player6', 'player7', 'player8', 'player9']]

        # ignore the anonymous players
        radiance = radiance[radiance > 0]
        dire = dire[dire > 0]

        if radiance.empty or dire.empty:
            continue

        rad_dict = {
            account_id: rating_dict[account_id]
            for account_id in radiance
        }
        dire_dict = {
            account_id: rating_dict[account_id]
            for account_id in dire
        }

        new_rad, new_dire = ts.rate([rad_dict, dire_dict], ranks=[1, 0])

        for account_id in new_rad.keys():
            rating_dict[account_id] = new_rad[account_id]
        for account_id in new_dire.keys():
            rating_dict[account_id] = new_dire[account_id]

    return [rating.mu for rating in rating_dict.values()
            ], [rating.sigma for rating in rating_dict.values()]
Example #30
0
def run_evaluation(connection, beta, tau, sample):
    teams = get_all_teams(connection)
    rounds = get_all_rounds(connection, None)

    offset = int(len(rounds) * sample)
    training_sample = rounds[:offset]
    testing_sample = rounds[offset:]
    environment = trueskill.TrueSkill(SKILL_MEAN, SKILL_STDEV, beta, tau, 0.0)

    ratings = compute_player_skills(training_sample, teams)[0]

    total = 1.0

    for round in testing_sample:
        winning_team = [ratings[player_id]
                        for player_id in teams[round['winner']]]
        losing_team = [ratings[player_id]
                       for player_id in teams[round['loser']]]
        total *= win_probability(environment, winning_team, losing_team)

    return total ** (1 / float(len(testing_sample)))