Exemplo n.º 1
0
 def rate_match(self):
     """Use TrueSkill to modify players skill"""
     # TrueSkill setup
     setup(mu=TRUESKILL_MU,
           sigma=TRUESKILL_SIGMA,
           beta=TRUESKILL_BETA,
           tau=TRUESKILL_TAU)
     if 'scipy' in backends.available_backends():
         # scipy can be used in the current environment
         backends.choose_backend(backend='scipy')
     self.player_1_trueskill_mu_before_match = self.player_1.trueskill_mu
     self.player_1_trueskill_sigma_before_match = self.player_1.trueskill_sigma
     self.player_2_trueskill_mu_before_match = self.player_2.trueskill_mu
     self.player_2_trueskill_sigma_before_match = self.player_2.trueskill_sigma
     player_1_trueskill = Rating(self.player_1_trueskill_mu_before_match,
                                 self.player_1_trueskill_sigma_before_match)
     player_2_trueskill = Rating(self.player_2_trueskill_mu_before_match,
                                 self.player_2_trueskill_sigma_before_match)
     if self.winner == self.player_1:
         new_player_1_trueskill, new_player_2_trueskill = rate_1vs1(
             player_1_trueskill, player_2_trueskill)
     else:
         new_player_2_trueskill, new_player_1_trueskill = rate_1vs1(
             player_2_trueskill, player_1_trueskill)
     self.player_1_trueskill_mu_after_match = new_player_1_trueskill.mu
     self.player_1_trueskill_sigma_after_match = new_player_1_trueskill.sigma
     self.player_2_trueskill_mu_after_match = new_player_2_trueskill.mu
     self.player_2_trueskill_sigma_after_match = new_player_2_trueskill.sigma
     self.player_1.trueskill_mu = self.player_1_trueskill_mu_after_match
     self.player_1.trueskill_sigma = self.player_1_trueskill_sigma_after_match
     self.player_2.trueskill_mu = self.player_2_trueskill_mu_after_match
     self.player_2.trueskill_sigma = self.player_2_trueskill_sigma_after_match
     self.player_1.save()
     self.player_2.save()
Exemplo n.º 2
0
    def rate_team(self, matches):
        """
        This function runs the TrueSkill rating system to
        determine the skill estimates of each team. 
        """
        trueskill.setup(draw_probability=self.draw_prob(matches))
        ratings = dict(
            zip(self.all_teams, [trueskill.global_env().create_rating()] *
                len(self.all_teams)))
        for match in matches:
            wteam = str(match['Wteam'])
            lteam = str(match['Lteam'])
            wscore = match['Wscore']
            lscore = match['Lscore']

            if wteam in ratings and lteam in ratings:
                if wscore < lscore:
                    wteam, lteam = lteam, wteam
                    wscore, lscore = lscore, wscore

                ratings[wteam], ratings[lteam] = \
                    trueskill.rate_1vs1(ratings[wteam], ratings[lteam], drawn=self.is_equal_score(wscore, lscore))
                wscore -= self.vic_margin

                # while wscore - lscore >= self.vic_margin:
                #     ratings[wteam], ratings[lteam] = \
                #         trueskill.rate_1vs1(ratings[wteam], ratings[lteam], drawn=self.is_equal_score(wscore, lscore))
                #     wscore -= self.vic_margin

        return ratings
    def __init__(self, match_data, score_weighting=False):
        if match_data is None:
            raise ValueError("Data cannot be null")

        self._trained = False

        _, _, team_num_map = process.match_vectors(match_data)

        self.team_num_map = team_num_map

        homes = pd.unique(match_data.home_team)
        aways = pd.unique(match_data.away_team)
        teams = np.union1d(homes, aways)

        self.team_ratings = {}
        for team in teams:
            self.team_ratings[team] = Rating()

        self.avg_margin = 0
        self.num_draws = 0
        trueskill.setup(mu=25.0,
                        sigma=8.333333333333334,
                        beta=4.1666666666666,
                        tau=0.08333333333333334,
                        draw_probability=0.26)
        self.train(match_data, score_weighting)

        self.team_skills = np.zeros((len(teams), ))
        for team in teams:
            self.team_skills[self.team_num_map[team]] = \
                             self.team_ratings[team].mu
        return
Exemplo n.º 4
0
def add_match(conn, bots, results):
    winner = None
    for id_str, stats in results['stats'].items():
        if stats['rank'] == 1:
            winner = int(id_str)

    if winner is None:
        raise ValueError('Could not detect winner of game')

    query = 'insert into games (datetime, winner, participants, results) values (?, ?, ?, ?)'
    del results['final_snapshot']
    current_time = datetime.datetime.now().isoformat()
    conn.execute(query, (current_time, bots[winner]['id'], json.dumps(bots),
                         json.dumps(results)))

    for bot in bots:
        history_query = 'insert into rank_history (bot_id, datetime, rank, mu, sigma) values (?, ?, ?, ?, ?)'
        conn.execute(
            history_query,
            (bot['id'], current_time, bot['rank'], bot['mu'], bot['sigma']))
        games_played_query = 'update bots set games_played=games_played + 1 where id = ?'
        conn.execute(games_played_query, (bot['id'], ))

    trueskill.setup(tau=0.008, draw_probability=0.001)
    teams = [[trueskill.Rating(mu=bot["mu"], sigma=bot["sigma"])]
             for bot in bots]
    ranks = [results["stats"][str(b)]["rank"] - 1 for b in range(len(bots))]
    new_ratings = trueskill.rate(teams, ranks)

    update_query = 'update bots set mu=?, sigma=? where id=?'
    for bot, rating in zip(bots, new_ratings):
        conn.execute(update_query, (rating[0].mu, rating[0].sigma, bot['id']))

    rerank_bots(conn)
Exemplo n.º 5
0
    def get_leaderboard(self):
        self._load_matches()

        with self._lock:
            trueskill.setup(backend='scipy', draw_probability=0)
            for player in self._players.values():
                player.rating = trueskill.Rating()

            for _, match in self._matches.iterrows():
                win_update, loss_update = trueskill.rate((
                    {
                        name: self._players[name].rating
                        for name in match['winners']
                    },
                    {
                        name: self._players[name].rating
                        for name in match['losers']
                    },
                ))
                self._update_player_ratings(win_update)
                self._update_player_ratings(loss_update)

            leaderboard = pd.DataFrame([
                p.to_series() for p in self._players.values()
            ]).sort_values('rating', ascending=False)
        return leaderboard
Exemplo n.º 6
0
 def __init__(self):
     trueskill.setup(trueskill.MU,
                     trueskill.SIGMA,
                     trueskill.BETA,
                     trueskill.TAU,
                     draw_probability=0)
     self.snapshots = {
     }  # self.snapshots[tournament] = {'playername' = , 'trueskill' = }
     self.matches: [] = []
     self.playerscenes = {}
     self.playerteams = {}
     self.playerratings = {}
     self.playertournaments = {
     }  # playertournaments[playername] = ["BB4","KQ30",...]
     self.playergames = {}
     self.playerwins = {}
     self.playerlosses = {}
     self.incomplete_players = []  # list of playernames w/0 scenes
     self.tournaments = []
     self.tournamentdates = {
     }  # source data only ties matches directly to a date.
     self.teams = {}  # [tournament][team name] = {p1, p2, p3...}
     self.output_file_name: str = '../PlayerSkill.csv'
     self.ratings_change_by_opponent = RatingsChangeByOpponent(self.teams)
     self.ratings_change_by_teammate = RatingsChangeByTeammate(self.teams)
     self.observers = [
         self.ratings_change_by_opponent, self.ratings_change_by_teammate
     ]
     self.process_approved_datasets()
Exemplo n.º 7
0
async def matchmaking(context):
    """Finds a player to play against"""
    discord_id = context.message.author.id
    mention = context.message.author.mention
    try:
        player = Player.objects.get(discord_id=discord_id)
    except Player.DoesNotExist:
        await context.send(
            mention + ' you are not registered in the league!'
            'type ' + BOT.command_prefix +
            'register <jstris nickname> to register in the league')
        return
    # TrueSkill setup
    setup(mu=TRUESKILL_MU,
          sigma=TRUESKILL_SIGMA,
          beta=TRUESKILL_BETA,
          tau=TRUESKILL_TAU)
    if 'scipy' in backends.available_backends():
        # scipy can be used in the current environment
        backends.choose_backend(backend='scipy')
    # Setup the Rating for the asking player
    rating_asking = Rating(mu=player.trueskill_mu,
                           sigma=player.trueskill_sigma)
    players = Player.objects.exclude(discord_id=discord_id)
    best_match = None
    best_quality = 0.0
    for player_match in players:
        player_rating = Rating(mu=player_match.trueskill_mu,
                               sigma=player_match.trueskill_sigma)
        quality = quality_1vs1(rating_asking, player_rating)
        if quality > best_quality:
            best_quality = quality
            best_match = player_match
    await context.send(player.discord_nickname + ' should play against ' +
                       best_match.discord_nickname)
Exemplo n.º 8
0
    def startEvaluation(self):
        import trueskill
        self.home_adv_adjust()
        trueskill.setup(draw_probability=self.draw_prob())
        team_rating = dict(
            zip(self.teams,
                [trueskill.global_env().create_rating()] * len(self.teams)))

        for match in self.regular_matches:
            wteam = match[self.regular_titles.index('Wteam')]
            lteam = match[self.regular_titles.index('Lteam')]
            wscore = match[self.regular_titles.index('Wscore')]
            lscore = match[self.regular_titles.index('Lscore')]

            if wteam in team_rating and lteam in team_rating:
                if wscore < lscore:
                    wteam, lteam = lteam, wteam
                team_rating[wteam], team_rating[lteam] = trueskill.rate_1vs1(team_rating[wteam], \
                    team_rating[lteam], drawn = self.is_equal_score(wscore, lscore))

        probs = []
        for i in range(0, len(teams)):
            for j in range(i + 1, len(teams)):
                prob = self.win_probability(team_rating[teams[i]],
                                            team_rating[teams[j]])
                probs.append(prob)

        return probs
Exemplo n.º 9
0
def init():
    global conn, c, last_match
    dbexists = isfile("database.sqlite3")
    conn = sqlite3.connect("database.sqlite3")
    conn.row_factory = sqlite3.Row
    c = conn.cursor()
    #set up trueskill here:
    ts.setup(draw_probability=0.01)
    if dbexists:
        try:
            check_db()
        except Exception as e:
            console.display(e)
            console.terminate()
            return
    else:
        console.display("DATATBASE| Creating new database...")
        create_tables()

    c.execute("SELECT pickup_id from pickups ORDER BY pickup_id DESC LIMIT 1")
    result = c.fetchone()
    if result:
        last_match = result[0]
    else:
        last_match = -1
Exemplo n.º 10
0
    def calculate_score(self, comparison_pairs):
        """
        Calculate scores for a set of comparison_pairs
        :param comparison_pairs: array of comparison_pairs
        :return: dictionary key -> ScoredObject
        """
        self.storage = {}
        self.opponents = {}
        self.ratings = {}
        trueskill.setup()

        keys = self.get_keys_from_comparison_pairs(comparison_pairs)
        # create default ratings for every available key
        for key in keys:
            rating = trueskill.Rating()
            self.ratings[key] = rating
            self.opponents[key] = set()

            self.storage[key] = ScoredObject(key=key,
                                             score=trueskill.expose(rating),
                                             variable1=rating.mu,
                                             variable2=rating.sigma,
                                             rounds=0,
                                             opponents=0,
                                             wins=0,
                                             loses=0)

        # calculate rating by for every match
        for comparison_pair in comparison_pairs:
            key1 = comparison_pair.key1
            key2 = comparison_pair.key2
            winner = comparison_pair.winner

            # skip incomplete comparisosns
            if winner is None:
                self._update_rounds_only(key1)
                self._update_rounds_only(key2)
                continue

            r1 = self.ratings[key1]
            r2 = self.ratings[key2]

            key1_winner = winner == ComparisonWinner.key1
            key2_winner = winner == ComparisonWinner.key2

            if key1_winner:
                r1, r2 = trueskill.rate_1vs1(r1, r2)
            elif key2_winner:
                r2, r1 = trueskill.rate_1vs1(r2, r1)
            elif winner == ComparisonWinner.draw:
                r1, r2 = trueskill.rate_1vs1(r1, r2, drawn=True)
            else:
                raise InvalidWinnerException

            self._update_rating(key1, r1, key2, key1_winner, key2_winner)
            self._update_rating(key2, r2, key1, key2_winner, key1_winner)

        # return comparison results
        return self.storage
Exemplo n.º 11
0
 def setup():
     trueskill.setup(
         mu=50.,
         sigma=50. / 3.,
         beta=50. / 6.,
         tau=50. / 300.,
         draw_probability=.03,
     )
Exemplo n.º 12
0
def test_setup_global_environment():
    try:
        setup(draw_probability=.50)
        t1, t2 = generate_teams([1, 1])
        rated = rate([t1, t2])
        assert almost(rated) == [(30.267, 7.077), (19.733, 7.077)]
    finally:
        # rollback
        setup()
Exemplo n.º 13
0
def test_setup_global_environment():
    try:
        setup(draw_probability=.50)
        t1, t2 = generate_teams([1, 1])
        rated = rate([t1, t2])
        assert almost(rated) == [(30.267, 7.077), (19.733, 7.077)]
    finally:
        # rollback
        setup()
Exemplo n.º 14
0
    def calculate_score(self, comparison_pairs):
        """
        Calculate scores for a set of comparisons
        :param comparisons: array of
        :return: dictionary key -> ScoredObject
        """
        self.storage = {}
        self.opponents = {}
        self.ratings = {}
        trueskill.setup()

        keys = self.get_keys_from_comparison_pairs(comparison_pairs)
        # create default ratings for every available key
        for key in keys:
            rating = trueskill.Rating()
            self.ratings[key] = rating
            self.opponents[key] = set()

            self.storage[key] = ScoredObject(
                key=key,
                score=trueskill.expose(rating),
                variable1=rating.mu,
                variable2=rating.sigma,
                rounds=0,
                opponents=0,
                wins=0,
                loses=0,
            )

        # calculate rating by for every match
        for comparison_pair in comparison_pairs:
            key1 = comparison_pair.key1
            key2 = comparison_pair.key2
            winning_key = comparison_pair.winning_key

            # skip incomplete comparisosns
            if winning_key is None:
                self._update_rounds_only(key1)
                self._update_rounds_only(key2)
                continue

            r1 = self.ratings[key1]
            r2 = self.ratings[key2]

            if winning_key == comparison_pair.key1:
                r1, r2 = trueskill.rate_1vs1(r1, r2)
            elif winning_key == comparison_pair.key2:
                r2, r1 = trueskill.rate_1vs1(r2, r1)
            else:
                raise InvalidWinningKeyException

            self._update_rating(key1, r1, key2, winning_key)
            self._update_rating(key2, r2, key1, winning_key)

        # return comparison results
        return self.storage
Exemplo n.º 15
0
def init():
    trueskill.setup(
        mu=1600,  # mu of the prior
        sigma=400,  # sigma of the prior
        beta=200,  # distance to guarantee 76% of winning (rec = sigma/2)
        tau=
        4,  # dynamic ratings factor (increases sigma over time) (rec = sigma/100)
        draw_probability=0.0,  # probability of a draw
        backend=None
    )  # allows for mpmath/scipy normal distribution implementations
Exemplo n.º 16
0
def mm_setup_environment(mu=ELO_AVG_RATING,
                         sigma=ELO_RANK_INCREMENT,
                         beta=ELO_INCREMENT_RANGE,
                         tau=5,
                         draw_prob=0.10):
    """ Sets up the global environment for Trueskill """
    trueskill.setup(mu=mu,
                    sigma=sigma,
                    beta=beta,
                    tau=tau,
                    draw_probability=draw_prob)
Exemplo n.º 17
0
def calculate_trueskill(results):
    trueskill.setup(mu=TRUESKILL_MU,
                    sigma=TRUESKILL_SIGMA,
                    beta=TRUESKILL_BETA,
                    tau=TRUESKILL_TAU,
                    draw_probability=0.0)
    players = defaultdict(lambda: trueskill.Rating())
    players_by_event = defaultdict(dict)
    outcome_by_event = defaultdict(lambda: defaultdict(list))
    accuracy_by_event = defaultdict(list)
    probability_by_event = defaultdict(list)
    match_rating_pairs = []

    for (event_name, round_no, player_a, player_b, winner) in results:
        if player_a is None or player_b is None:
            continue  # This is a BYE match

        if winner is None:
            outcome_by_event[player_a][event_name].append(0)
            outcome_by_event[player_b][event_name].append(0)
            continue  # This is a double loss

        loser = player_a if winner == player_b else player_b
        rank_winner = trueskill_rank(players[winner])
        rank_loser = trueskill_rank(players[loser])
        match_rating_pairs.append((rank_winner, rank_loser))

        probability = trueskill_win_prob(players[winner], players[loser])
        probability_by_event[event_name].append(probability)
        accuracy = 1.0 if probability > 0.5 else 0.0
        accuracy_by_event[event_name].append(accuracy)

        players[winner], players[loser] = trueskill.rate_1vs1(
            players[winner], players[loser])
        players_by_event[winner][event_name] = trueskill_rank(players[winner])
        players_by_event[loser][event_name] = trueskill_rank(players[loser])

        outcome_by_event[winner][event_name].append(1)
        outcome_by_event[loser][event_name].append(0)

    playerranks = {}
    for (player_name, rating) in players.items():
        playerranks[player_name] = round(trueskill_rank(rating), 2)

    statistics = {
        'players_by_event': players_by_event,
        'accuracy_by_event': accuracy_by_event,
        'probability_by_event': probability_by_event,
        'outcome_by_event': outcome_by_event,
        'match_rating_pairs': match_rating_pairs
    }

    return (playerranks, statistics)
Exemplo n.º 18
0
def generate_true_skill(matches):
    matches_no = {}
    forfeits_no = {}
    for match in matches:
        for i, player in enumerate(match.players):
            matches_no[player] = matches_no.get(player, 0) + 1
            forfeits_no[player] = forfeits_no.get(
                player, 0) + (1 if match.times[i] >= 1e9 else 0)

    ts.setup(tau=args.tau, draw_probability=args.drawprob)
    players = extract_all_players(matches)
    ratings = {player: ts.Rating() for player in players}
    for match in matches:
        m_ratings = [{
            match.players[i]: ratings[match.players[i]]
        } for i in range(len(match.players))]
        ranks = [0] * len(match.players)
        for i in range(1, len(match.players)):
            if match.times[i] == match.times[i - 1]:
                ranks[i] = ranks[i - 1]
            elif match.times[i] > match.times[i - 1]:
                ranks[i] = ranks[i - 1] + 1
        new_ratings = ts.rate(m_ratings, ranks=ranks)
        new_ratings = {k: v for d in new_ratings for k, v in d.items()}
        for player, new_rating in new_ratings.items():
            ratings[player] = new_rating

    players = sorted(
        players,
        key=lambda p: ratings[p].mu - args.sigmaweight * ratings[p].sigma,
        reverse=True)

    if args.addheader:
        columns = ['Player', 'Rating', 'Mu', 'Sigma', '#Matches', '#Forfeits']
        if args.addplace:
            columns.insert(0, 'Place')
        print(*columns, sep=args.delim)

    place = 1
    for player in players:
        if matches_no[player] < args.minmatches:
            continue
        rating = ratings[player]
        row = [
            player,
            round(rating.mu - args.sigmaweight * rating.sigma, 3),
            round(rating.mu, 3),
            round(rating.sigma, 3), matches_no[player], forfeits_no[player]
        ]
        if args.addplace:
            row.insert(0, place)
            place += 1
        print(*row, sep=args.delim)
Exemplo n.º 19
0
def doRating(dfResults):

    env = ts.TrueSkill()
    ts.setup(tau=0.2)

    columns = ['Name', 'mu_minus_3sigma', 'numRaces', 'Rating']

    dfRatings = pd.DataFrame(columns=columns, index=dfResults.index)
    dfRatings = pd.merge(dfRatings,
                         dfResults[['numRegattas', 'lastRegatta']],
                         left_index=True,
                         right_index=True,
                         how='outer')

    dfRatings['numRaces'] = dfResults.count(axis=1)
    dfRatings['Rating'] = pd.Series(np.repeat(
        env.Rating(), len(dfRatings))).T.values.tolist()

    for raceCol in dfResults:
        if (raceCol != 'numRegattas') or (raceCol != 'lastRegatta'):
            competed = dfRatings.index.isin(
                dfResults.index[dfResults[raceCol].notnull()])
            rating_group = list(
                zip(dfRatings['Rating'][competed].T.values.tolist()))
            ranking_for_rating_group = dfResults[raceCol][
                competed].T.values.tolist()
            dfRatings.loc[competed,
                          'Rating'] = ts.rate(rating_group,
                                              ranks=ranking_for_rating_group)

    dfRatings = pd.DataFrame(dfRatings)  #convert to dataframe

    dfRatings['mu_minus_3sigma'] = pd.Series(np.repeat(
        0.0, len(dfRatings)))  #calculate mu - 3 x sigma: MSFT convention

    for index, row in dfRatings.iterrows():
        dfRatings.loc[dfRatings.index == index, 'mu_minus_3sigma'] = float(
            row['Rating'].mu) - 3 * float(row['Rating'].sigma)

    #competed in at least 5 races and 1 regatta and has competed in the last 12 months
    dfRatings = dfRatings[dfRatings['numRaces'] > 4]
    dfRatings = dfRatings[dfRatings['numRegattas'] > 1]
    dfRatings = dfRatings[
        (datetime.datetime.now() - dfRatings['lastRegatta']) /
        np.timedelta64(1, 'D') < 365]

    dfRatings['Name'] = dfRatings.index
    dfRatings.index = dfRatings['mu_minus_3sigma'].rank(
        ascending=False).astype(int)  #set index to ranking
    dfRatings.index.names = ['Rank']

    return dfRatings.sort_values('mu_minus_3sigma', ascending=False)
Exemplo n.º 20
0
def load_env() -> None:
    env = None
    try:
        with open(ENV_FILE, "r") as f:
            env = json.load(f)
    except FileNotFoundError:
        pass
    except json.JSONDecodeError:
        print(f"Failed to read '{ENV_FILE}'")
    finally:
        if env:
            trueskill.setup(mu=env["mu"], sigma=env["sigma"])
        else:
            trueskill.setup(mu=DEFAULT_MU, sigma=DEFAULT_SIGMA)
Exemplo n.º 21
0
def main():
    parser = argparse.ArgumentParser(description='MadCars Bot Competition')
    parser.add_argument('-n',
                        '--num-games',
                        type=int,
                        help='Total number of games',
                        required=True)
    parser.add_argument('-d',
                        '--model-dir',
                        type=str,
                        help='NN model root directory',
                        required=True)
    parser.add_argument('-l',
                        '--log-interval',
                        type=int,
                        help='Rating print interval',
                        default=50)
    parser.add_argument('-c',
                        '--cache-path',
                        type=str,
                        default='competition_cache.json',
                        help='Ragings JSON cache')
    args = parser.parse_args()

    model_dir: str = args.model_dir
    num_games: int = args.num_games
    cache_path: str = args.cache_path
    log_interval: int = args.log_interval

    ts.setup(draw_probability=0.0001)
    loop = events.new_event_loop()
    events.set_event_loop(loop)

    clients = get_simple_bots() + get_nn_bots(model_dir)
    clients = load_ratings(cache_path, clients)

    games_played = 0

    while games_played < num_games:
        games_played += log_interval
        ratings = run_competition(clients, log_interval)
        ratings = sorted(ratings, key=lambda t: -ts.expose(t[1]))
        save_ratings(cache_path, clients)
        print(f'-- RATINGS {games_played} --')
        for name, rating in ratings:
            print(
                f'{ts.expose(rating):4.1f} ({rating.mu:4.1f} +- {rating.sigma * 3:4.1f}): {name:<32}'
            )
Exemplo n.º 22
0
def run_cv(mu, sigma):
    cv_data = []
    fix_variance = True
    if fix_variance:
        opponents_for_games, game_outcomes, true_elliot_skill = sim_games(
            None, True)

    for sigma in [mu / 50, mu / 25, mu / 10, mu / 3]:
        for beta in [sigma / 1.5, sigma / 2, sigma / 5]:
            for tau in [sigma / 200, sigma / 100, sigma / 50]:
                env = setup(mu=mu,
                            sigma=sigma,
                            beta=beta,
                            tau=tau,
                            draw_probability=0)
                if not fix_variance:
                    opponents_for_games, game_outcomes, true_elliot_skill = sim_games(
                        env, False)
                skill_mean, skill_var = update_skill(env, opponents_for_games,
                                                     game_outcomes)
                cv_data.append([
                    mu, sigma, beta, tau,
                    relative_mse(np.array(true_elliot_skill), skill_mean),
                    skill_mean, skill_var
                ])

    cv_res = pd.DataFrame(columns=[
        'mu', 'sigma', 'beta', 'tau', 'RMSE', 'skill_mean', 'skill_var'
    ],
                          data=cv_data)
    best_vals = cv_res.sort_values('RMSE').reset_index()
    best_vals[['mu', 'sigma', 'beta', 'tau',
               'RMSE']].to_csv('cv_as_ts_Mar_19.csv', index=False)
    return best_vals, true_elliot_skill
Exemplo n.º 23
0
def substituted_trueskill(*args, **kwargs):
    env = trueskill.global_env()
    params = [['mu', env.mu], ['sigma', env.sigma], ['beta', env.beta],
              ['tau', env.tau], ['draw_probability', env.draw_probability],
              ['backend', env.backend]]
    # merge settings with previous TrueSkill object
    for x, arg in enumerate(args):
        params[x][1] = arg
    params = dict(params)
    for kw, arg in kwargs.items():
        params[kw] = arg
    try:
        # setup the environment
        yield trueskill.setup(**params)
    finally:
        # revert the environment
        trueskill.setup(env=env)
Exemplo n.º 24
0
def win_probability_heroes(team1, team2, mmr_dict, env=trueskill.setup()):
    ratings1 = []
    for hero in team1:
        r = env.Rating(mmr_dict[hero]["mu"], mmr_dict[hero]["sigma"])
        ratings1.append(r)
    ratings2 = []
    for hero in team2:
        r = env.Rating(mmr_dict[hero]["mu"], mmr_dict[hero]["sigma"])
        ratings2.append(r)
    return win_probability(ratings1, ratings2, env)
Exemplo n.º 25
0
def calculate_mmr(replays, env=trueskill.setup()):
    mmr_dict = {}
    print("Calculating mmr...")
    for index, replay in replays.iterrows():
        game_time = replay["game_date"]
        patch = replay["game_version"]
        heroes = replay[["hero" + str(i) for i in range(1, 11)]]
        winner = replay["winner"]
        mmr_dict = rate_game(game_time, patch, heroes, winner, mmr_dict, env)
    return mmr_dict
Exemplo n.º 26
0
def compute_true_skills(user):
    """ recalculate TrueSkills from the entire history of fights
    """
    # True Skill parameters for Movie ratings
    # Details and reasoning can be found at: 
    # https://www.evernote.com/Home.action#st=p&n=ea2365e1-fe1c-4f4c-97b6-18cf78431fa4
    ts.setup(mu=3.0, sigma=1.0, beta=0.3, tau=0.005, draw_probability=0.05)

    # initiate TrueSkill dict
    seededTS = {}

    # initialize TrueSkill for each rated movie
    for score in Score.objects.filter(user=user).exclude(starRating=0):
        imdb_id = score.movie_imdb_id()
        starRating = score.starRating
        seededTS[imdb_id] = ts.Rating(mu=1.*starRating, sigma=0.5)
    # iterate over fightes (over time) and incrementally update
    # TrueSkill dict
    count = 0
    for fight in sorted(Fight.objects.filter(user=user),
                       key = lambda m: m.timestamp):
        update_true_skill(fight, seededTS)
        if count and count % 200 == 0: print >> sys.stderr, '%i fights processed.' % count
        count += 1


    # record new ratings in the database
    count = 0
    for score in Score.objects.filter(user=user):
        movie_id = score.movie_imdb_id()
        try:
            new_TS = seededTS[movie_id]
        except KeyError:
            # this movie was just saved in between starting the TrueSkill
            # calculations and recording the results. we'll get it in the
            # next round.
            continue
        update_score(user, movie_id, new_TS)
        count += 1
    #log('--- a total of %i ratings recorded. ---' % (count))
    return count
Exemplo n.º 27
0
def rate_skill(names, results, metrics):
    from trueskill import Rating, rate, setup
    MU = 1000
    setup(MU, MU / 3, MU * 5, MU / 5000)
    
    started = time.clock()
    ratings = {x : Rating() for x in names}
    for entry in results:
        adjusted = rate([(ratings[x[0]],) for x in entry],
                        [(int(not x[1]),) for x in entry])
        for index, rating in enumerate(adjusted):
            ratings[entry[index][0]] = rating[0]

    print("Completed rankings in", str(int(time.clock() - started)), "seconds")
    print("")

    descending = sorted(ratings.items(), key=lambda x:x[1], reverse=True)
    for index, (name, rating) in enumerate(descending):
        metrics[name]["Rank"] = index
        metrics[name]["Mu"] = "%.0f" % rating.mu
        metrics[name]["Sigma"] = "%.0f" % rating.sigma
Exemplo n.º 28
0
def substituted_trueskill(*args, **kwargs):
    """Setup the global environment only within the context::

       assert Rating().mu == 25
       with substituted_trueskill(mu=0):
           assert Rating().mu == 0
    """
    env = trueskill.global_env()
    params = [['mu', env.mu], ['sigma', env.sigma], ['beta', env.beta],
              ['tau', env.tau], ['draw_probability', env.draw_probability],
              ['backend', env.backend]]
    # merge settings with previous TrueSkill object
    for x, arg in enumerate(args):
        params[x][1] = arg
    params = dict(params)
    for kw, arg in kwargs.items():
        params[kw] = arg
    try:
        # setup the environment
        yield trueskill.setup(**params)
    finally:
        # revert the environment
        trueskill.setup(env=env)
Exemplo n.º 29
0
    def rate_team(self):
        trueskill.setup(draw_probability = self.draw_prob())
        team_rating = dict(zip(self.teams, [trueskill.global_env().create_rating()] * len(self.teams)))
        for match in self.regular_matches:
            wteam = match[self.regular_titles.index('Wteam')]
            lteam = match[self.regular_titles.index('Lteam')]
            wscore = match[self.regular_titles.index('Wscore')]
            lscore = match[self.regular_titles.index('Lscore')]

            if wteam in team_rating and lteam in team_rating:
                if wscore < lscore:
                    wteam, lteam = lteam, wteam
                    wscore, lscore = lscore, wscore
                    
                team_rating[wteam], team_rating[lteam] = trueskill.rate_1vs1(team_rating[wteam], \
                    team_rating[lteam], drawn = self.is_equal_score(wscore, lscore))
                wscore -= self.victory_margin
                
                while wscore - lscore >= self.victory_margin:
                    team_rating[wteam], team_rating[lteam] = trueskill.rate_1vs1(team_rating[wteam], \
                        team_rating[lteam], drawn = self.is_equal_score(wscore, lscore))
                    wscore -= self.victory_margin
        return team_rating
Exemplo n.º 30
0
def calc_new_ranking(ranking, bots, result, tau):
    ts.setup(tau=tau, draw_probability=0.0001)
    ratings = [[ts.Rating(mu=ranking[bot]['mu'], sigma=ranking[bot]['sigma'])]
               for bot in bots]
    ratings_ffa = [[
        ts.Rating(mu=ranking[bot]['mu_ffa'], sigma=ranking[bot]['sigma_ffa'])
    ] for bot in bots]
    ratings_1v1 = [[
        ts.Rating(mu=ranking[bot]['mu_1v1'], sigma=ranking[bot]['sigma_1v1'])
    ] for bot in bots]
    ratings = ts.rate(ratings, ranks=result)
    if len(bots) == 2:
        ratings_1v1 = ts.rate(ratings_1v1, ranks=result)
    else:
        ratings_ffa = ts.rate(ratings_ffa, ranks=result)
    for i, bot in enumerate(bots):
        ranking[bot]['mu'] = ratings[i][0].mu
        ranking[bot]['sigma'] = ratings[i][0].sigma
        ranking[bot]['mu_ffa'] = ratings_ffa[i][0].mu
        ranking[bot]['sigma_ffa'] = ratings_ffa[i][0].sigma
        ranking[bot]['mu_1v1'] = ratings_1v1[i][0].mu
        ranking[bot]['sigma_1v1'] = ratings_1v1[i][0].sigma
        ranking[bot]['games'] += 1
Exemplo n.º 31
0
def accuracy(replays, mmr_dict, env=trueskill.setup()):
    correct = 0
    total = 0

    for i, row in replays.iterrows():
        team1 = row[TEAM0]
        team2 = row[TEAM1]
        wp = win_probability_heroes(team1, team2, mmr_dict, env)
        pred_win = 0 if wp > 0.5 else 1

        if pred_win == row["winner"]:
            correct += 1
        total += 1

    return correct / total
Exemplo n.º 32
0
def rating_env_setup():
  """Sets up Rating Environment, an object from trueskill module.

  Args:
    None, but setup uses Global variables defined above.

  Returns:
    Global env created=
  """
  global_env = setup(mu=MU, 
			sigma=SIGMA, 
			beta=BETA, 
			tau=TAU, 
			draw_probability=DRAW_PROBABILITY,
			backend=None
			)
  return global_env
Exemplo n.º 33
0
def rate_game(game_time,
              patch,
              heroes,
              winner,
              mmr_dict,
              env=trueskill.setup()):
    ratings = []
    for hero in heroes:
        if hero in mmr_dict:
            mu = mmr_dict[hero]["mu"]
            if patch not in mmr_dict[hero]["history"]["patches"]:
                sigma = env.sigma
            else:
                sigma = mmr_dict[hero]["sigma"]
            r = env.Rating(mu=mu, sigma=sigma)
        else:
            r = env.Rating()

        ratings.append(r)
    team1 = dict(zip(heroes[:5], ratings[:5]))
    team2 = dict(zip(heroes[5:], ratings[5:]))

    rankings = [1, 1]
    rankings[winner] = 0

    rated_groups = env.rate([team1, team2], rankings)

    for hero in team1:
        new_r = rated_groups[0][hero]
        mmr_dict = update_mmr_dict(hero, game_time, patch, new_r.mu,
                                   new_r.sigma, rankings[0], mmr_dict)

    for hero in team2:
        new_r = rated_groups[1][hero]
        mmr_dict = update_mmr_dict(hero, game_time, patch, new_r.mu,
                                   new_r.sigma, rankings[0], mmr_dict)

    return mmr_dict
Exemplo n.º 34
0
from .decorators import with_logger

# Logging setup
TRACE = 5
logging.addLevelName(TRACE, "TRACE")
logging.getLogger("aiomeasures").setLevel(logging.INFO)
logging.getLogger("aio_pika").setLevel(logging.INFO)

# Constants
FFA_TEAM = 1

# Credit to Axle for parameter changes,
# see: http://forums.faforever.com/viewtopic.php?f=45&t=11698#p119599
# Optimum values for ladder here, using them for global as well.
trueskill.setup(mu=1500, sigma=500, beta=240, tau=10, draw_probability=0.10)


@with_logger
class ConfigurationStore:
    def __init__(self):
        """
        Change default values here.
        """
        self.CONFIGURATION_REFRESH_TIME = 300
        self.LOG_LEVEL = "DEBUG"
        self.PROFILING_COUNT = 300
        self.PROFILING_DURATION = 2
        self.PROFILING_INTERVAL = -1

        self.CONTROL_SERVER_PORT = 4000
Exemplo n.º 35
0
totalNumberGames = len(df)
stripesWin = 0.0
breakWins = 0.0

df['gameID'] = range(1, len(df) + 1)
df.set_index('gameID', inplace=True)

df = fix_initials(df, 'RAL', 'RL')
df = fix_initials(df, 'REL', 'RL')
df = fix_initials(df, 'NS', 'NAS')

p1 = df['Player1'].unique()
p2 = df['Player2'].unique()
playerList = np.unique(np.concatenate((p1, p2)))

ts.setup(mu=ELOMEAN, sigma=(ELOMEAN / 3)**2, draw_probability=0.00)
rankings = {p: ts.Rating() for p in playerList}
trends = {p: [] for p in playerList}

for g in range(len(df)):
    won = df.Winner[g + 1]
    if df.Player1[g + 1] == won:
        lost = df.Player2[g + 1]
        winner = rankings[won]
        loser = rankings[lost]
        newP1, newP2 = ts.rate_1vs1(winner, loser)
        rankings[won] = newP1
        rankings[lost] = newP2
        if df.Stripes[g + 1] == won:
            stripesWin = stripesWin + 1.0
        breakWins = breakWins + 1.0
Exemplo n.º 36
0
def fight_result(request, movie_1_id, movie_2_id, lock):

    if not request.user.is_authenticated():
        args = (movie_1_id, movie_2_id, lock)
        return redirect_to_login('fight_result', args=args)

    # get the movies                                                                                                                                                                 
    movie1 = get_object_or_404(Movie, pk=movie_1_id)
    movie2 = get_object_or_404(Movie, pk=movie_2_id)
    # get the user
    user = request.user
    # get the posted result                                                                                                                                                          
    result = request.POST['result']
    # a result of -1 means "skip", no recording
    # in that case. Only record a valid result
    if int(result) in (0,1,2):
        #                                                                                                                                                                                
        # ~~~~Design choice~~~~~~~~~~~~~                                                                                                                                                 
        # When two movies that were previously compared are compared again,                                                                                                              
        # we will not treat is as another match. Instead of multiple matches,                                                                                                            
        # we will have a single result for any given pair. This is to allow                                                                                                              
        # changes in opinion to have a more immediate effect, to remove                                                                                                                  
        # any confusion as to preferences between any two movies, and to keep                                                                                                            
        # the database compact, simple and straightforward.                                                                                                                              
        #                                                                                                                                                                                
        # Temporarily, score will be changed by a single update (as if                                                                                                               
        # there have been multiple matches), but this will be corrected when                                                                                                             
        # The main way of calculating TrueSkill is going over the entire chain                                                                                                           
        # of Fights from scratch.                                                                                                                                                 
        # ~~~~~~~~~~~~~~~~                                                                                                                                                               
        #                                                                                                                                                                                
        # find the fight if these two movies were compared before                                                                                                                        
        # or create a new fight
        try:
            fight = Fight.objects.filter(
                                       user=user
                                        ).filter(
                                       contestants=movie1
                                       ).filter(
                                       contestants=movie2
                                       ).get()
            # make sure the movies are in the right order                                                                                                                                
            # so that the result integer is accurate                                                                                                                                     
            fight.movie1, fight.movie2 = movie1, movie2
        except Fight.DoesNotExist:
            # match not in database, create a new match                                                                                                                                  
            fight = Fight(user=user,
                          movie1=movie1,
                          movie2=movie2)
        # record the result                                                                                                                                                              
        fight.result = result
        # record the date and time (this is not necessary if a new match                                                                                                                 
        # is created, since this is done by default, but it is necessary                                                                                                                 
        # if an old match is retrieved.                                                                                                                                                  
        fight.timestamp = now()
        # put the match in the database                                                                                                                                                  
        fight.save()
        # --end of recording (end of if)

    # update scores (until periodic cleaner recalculation)
    # set parameters
    ts.setup(mu=3.0, sigma=1.0, beta=0.3, tau=0.005, draw_probability=0.05)
    # retrieve scores to update
    score1 = Score.objects.get(user=user, movie=movie1)
    score2 = Score.objects.get(user=user, movie=movie2)
    # create VS score objects
    old_VS_1 = ts.Rating(mu=score1.mu, sigma=score1.sigma)
    old_VS_2 = ts.Rating(mu=score2.mu, sigma=score2.sigma)
    # update: get the new VS score objects
    if int(result) == 0:
        new_VS_1, new_VS_2 = ts.rate_1vs1(old_VS_1, old_VS_2, drawn=True)
    elif int(result) == 1:
        new_VS_1, new_VS_2 = ts.rate_1vs1(old_VS_1, old_VS_2, drawn=False)
    elif int(result) == 2:
        new_VS_2, new_VS_1 = ts.rate_1vs1(old_VS_2, old_VS_1, drawn=False)
    else:
        new_VS_1, new_VS_2 = None, None
    # save new scores to the db
    if new_VS_1:
        update_score(user, movie1, new_VS_1)
    if new_VS_2:
        update_score(user, movie2, new_VS_2)
    # scores are updated
    
    # pick new movies to fight and redirect to the fight                                                                                                                                                         
    new_fighters = {}
    if lock == '1':
        new_fighters['movie_1_id'] = movie_1_id
        name = 'fight_a'
    elif lock == '2':
        new_fighters['movie_2_id'] = movie_2_id
        name = 'fight_b'
    else:
        name = 'fight'
    return HttpResponseRedirect(reverse(name, kwargs=new_fighters))
Exemplo n.º 37
0
import trueskill
from trueskill import TrueSkill, Rating
from flask import Flask, request, jsonify
from flask_swagger import swagger

# global FLASK setup
app = Flask(__name__, static_url_path='')
app.config['DEBUG'] = True

# global TRUESKILL setup
trueskill.setup(backend='mpmath')

# Note: We don't need to call run() since our application is embedded within
# the App Engine WSGI application server.

class InvalidAPIUsage(Exception):
    status_code = 400

    def __init__(self, message, status_code=None, payload=None):
        Exception.__init__(self)
        self.message = message
        if status_code is not None:
            self.status_code = status_code
        self.payload = payload

    def to_dict(self):
        rv = dict(self.payload or ())
        rv['message'] = self.message
        return rv

Exemplo n.º 38
0
def save_result(request):
	#Neues Ergebnis eintragen
	#Muss noch Fehlerabfrage einbauen (nur positive Zahlen und kein Draw erlaubt)
	g = Game.objects.get(id=request.POST['game'])
	g.goals_1 = request.POST['goals_1']
	g.goals_2 = request.POST['goals_2']
	g.save()
	if g.goals_1 > g.goals_2:
		foo=1
	else:
		foo=0
	result=[1-foo,foo]
	#Globalen TrueSkill berechnen
	setup(beta=666, draw_probability=0)
	off1 = Rating(mu=g.off_player1.mu,sigma=g.off_player1.sigma)
	def1 = Rating(mu=g.def_player1.mu,sigma=g.def_player1.sigma)
	off2 = Rating(mu=g.off_player2.mu,sigma=g.off_player2.sigma)
	def2 = Rating(mu=g.def_player2.mu,sigma=g.def_player2.sigma)
	team1 = [off1,def1]
	team2 = [off2,def2]
	(new_off1,new_def1),(new_off2,new_def2)=rate([team1,team2],result)
	g.off_player1.mu = new_off1.mu
	g.off_player1.sigma = new_off1.sigma
	g.off_player1.save()

        g.off_player2.mu = new_off2.mu
        g.off_player2.sigma = new_off2.sigma
        g.off_player2.save()

        g.def_player1.mu = new_def1.mu
        g.def_player1.sigma = new_def1.sigma
        g.def_player1.save()

        g.def_player2.mu = new_def2.mu
        g.def_player2.sigma = new_def2.sigma
        g.def_player2.save()
	#Positionsskill berechnen
        off1 = Rating(mu=g.off_player1.mu_off,sigma=g.off_player1.sigma_off)
        def1 = Rating(mu=g.def_player1.mu_def,sigma=g.def_player1.sigma_def)
        off2 = Rating(mu=g.off_player2.mu_off,sigma=g.off_player2.sigma_off)
        def2 = Rating(mu=g.def_player2.mu_def,sigma=g.def_player2.sigma_def)
        team1 = [off1,def1]
        team2 = [off2,def2]
        (new_off1,new_def1),(new_off2,new_def2)=rate([team1,team2],result)
        g.off_player1.mu_off = new_off1.mu
        g.off_player1.sigma_off = new_off1.sigma
        g.off_player1.save()

        g.off_player2.mu_off = new_off2.mu
        g.off_player2.sigma_off = new_off2.sigma
        g.off_player2.save()

        g.def_player1.mu_def = new_def1.mu
        g.def_player1.sigma_def = new_def1.sigma
        g.def_player1.save()

        g.def_player2.mu_def = new_def2.mu
        g.def_player2.sigma_def = new_def2.sigma
        g.def_player2.save()
	archiv=Archiv(game=g,player=g.off_player1,mu=g.off_player1.mu,sigma=g.off_player1.sigma,mu_off=g.off_player1.mu_off,sigma_off=g.off_player1.sigma_off,mu_def=g.off_player1.mu_def,sigma_def=g.off_player1.sigma_def)
	archiv.save()
	archiv=Archiv(game=g,player=g.def_player1,mu=g.def_player1.mu,sigma=g.def_player1.sigma,mu_off=g.def_player1.mu_off,sigma_off=g.def_player1.sigma_off,mu_def=g.def_player1.mu_def,sigma_def=g.def_player1.sigma_def)
        archiv.save()
	archiv=Archiv(game=g,player=g.off_player2,mu=g.off_player2.mu,sigma=g.off_player2.sigma,mu_off=g.off_player2.mu_off,sigma_off=g.off_player2.sigma_off,mu_def=g.off_player2.mu_def,sigma_def=g.off_player2.sigma_def)
        archiv.save()
	archiv=Archiv(game=g,player=g.def_player2,mu=g.def_player2.mu,sigma=g.def_player2.sigma,mu_off=g.def_player2.mu_off,sigma_off=g.def_player2.sigma_off,mu_def=g.def_player2.mu_def,sigma_def=g.def_player2.sigma_def)
        archiv.save()
	
	return HttpResponseRedirect(reverse('kicker_app:open_games'))
Exemplo n.º 39
0
    board_size = int(config["GLOBAL"]["board_size"])
    log_dir = config["GLOBAL"]["log_dir"]
    out_dir = config["expertEval"]["dir"]
    out_dir = out_dir.format(board_size=board_size)
    base_dir = "/".join([log_dir, out_dir])
    os.makedirs(base_dir, exist_ok=True)

    exit_dir = config["ExpertIteration"]["dir"]
    exit_dir = exit_dir.format(board_size=board_size)
    step_location = config["ExpertIteration"]["step_location"]
    agent_template = "/".join([log_dir, exit_dir, step_location, "model.h5"])

    trueskill.setup(mu=float(config["TrueSkill"]["initial_mu"]),
                    sigma=float(config["TrueSkill"]["initial_sigma"]),
                    beta=float(config["TrueSkill"]["beta"]),
                    tau=float(config["TrueSkill"]["tau"]),
                    draw_probability=float(
                        config["TrueSkill"]["draw_probability"]),
                    backend="scipy")

    num_threads = int(config["GLOBAL"]["num_threads"])
    iterations = int(config["ExpertIteration"]["iterations"])
    board_size = int(config["GLOBAL"]["board_size"])

    model_file = config["expertEval"]["model_file"]
    nn_agent = NNAgent(model_file)
    depths = [
        0, 50, 100, 500, 1000, 1500, 2000, 2500, 3000, 3500, 4000, 4500, 5000
    ]
    num_matches = int(config["GLOBAL"]["num_matches"])
Exemplo n.º 40
0
            'games': 0
        }
    for (team, nGames) in games.groupby('t1').size().iteritems():
        teams[team]['games'] += nGames
    for (team, nGames) in games.groupby('t2').size().iteritems():
        teams[team]['games'] += nGames
    for ((team, result), nGames) in games.groupby(['t1', 'result']).size().iteritems():
        teams[team]['wins'] += nGames if result == 1 else 0
        teams[team]['losses'] += nGames if result == 0 else 0
    for ((team, result), nGames) in games.groupby(['t2', 'result']).size().iteritems():
        teams[team]['wins'] += nGames if result == 0 else 0
        teams[team]['losses'] += nGames if result == 1 else 0
    return teams


ts.setup(mu=1500, sigma=300, beta=200, draw_probability=0)


def trueskillExpectedWinRate(r1, r2, blueSide=0):
    deltaMu = r1.mu - r2.mu + blueSide
    sumSigma = r1.sigma ** 2 + r2.sigma ** 2
    denominator = math.sqrt(4 * (200 * 200) + sumSigma)
    return ts.global_env().cdf(deltaMu / denominator)


def g(variance):
    return 1 / math.sqrt(1 + 3 * math.pow(math.log(10) / 400 / math.pi, 2) * variance)


def glickoExpectedWinRate(r1, r2, blueSide=0):
    return 1 / (1 + math.pow(10, g(r1.getRd() * r1.getRd() + r2.getRd() * r2.getRd()) * (r2.getRating() - r1.getRating() - blueSide) / 400))
Exemplo n.º 41
0
from . import version
import os
import logging
import trueskill
from PyQt4 import QtCore
from logging.handlers import RotatingFileHandler, MemoryHandler

trueskill.setup(mu=1500, sigma=500, beta=250, tau=5, draw_probability=0.10)

_settings = QtCore.QSettings(QtCore.QSettings.IniFormat, QtCore.QSettings.UserScope, "ForgedAllianceForever", "FA Lobby")
_unpersisted_settings = {}


class Settings:
    """
    This wraps QSettings, fetching default values from the
    selected configuration module if the key isn't found.
    """

    @staticmethod
    def get(key, default=None, type=str):
        # Get from a local dict cache before hitting QSettings
        # this is for properties such as client.login which we
        # don't necessarily want to persist
        if key in _unpersisted_settings:
            return _unpersisted_settings[key]
        # Hit QSettings to see if the user has defined a value for the key
        if _settings.contains(key):
            return _settings.value(key, type=type)
        # Try out our defaults for the current environment
        return defaults.get(key, default)
Exemplo n.º 42
0
from mako.template import Template
import mechanize
import os
import re
import trueskill

CACHE = 'cache'
DATE_STR = '%Y-%m-%d'

parser = argparse.ArgumentParser(description='Create Elo rankings from Challonge brackets')
parser.add_argument('--cache', action='store_true', help="Don't fetch from web, just use results in cache")
parser.add_argument('--html', action='store_true', help='Output to html page')
parser.add_argument('-v', '--verbose', action='store_true', help='Verbose output')
args = parser.parse_args()

trueskill.setup(draw_probability=0)

if args.verbose:
    logging.basicConfig(level=logging.INFO)

class Player:
    def title(self, name):
        if len(name) == 1:
            return name[0].upper()
        else:
            return name[0].upper() + name[1:]

    def clean_up(self, name):
        name = name.lower()

        # remove the ones that include the classes or #number
Exemplo n.º 43
0
STATIC_ROOT = os.path.join(ABSOLUTE_BASE, 'static2')

# General settings
SUPPORTED_YEARS = list(range(2010, 2018))
LEADERBOARD_COUNT = 200

# For Elo/Trueskill system
import trueskill

DEFAULT_MU = elo.MU.value
DEFAULT_SIGMA = elo.SIGMA.value
DEFAULT_BETA = elo.BETA.value
DEFAULT_TAU = elo.TAU.value
DEFAULT_DRAW_PROBABILITY = elo.DRAW_PROBABILITY.value
ELO_DECAY_ALPHA = elo.DECAY_ALPHA.value
trueskill.setup(mu=DEFAULT_MU, sigma=DEFAULT_SIGMA, beta=DEFAULT_BETA, tau=DEFAULT_TAU,
                draw_probability=DEFAULT_DRAW_PROBABILITY, backend="mpmath")

# For model visualization at /plate/
SPAGHETTI_SAUCE = {
    'apps': ['TBAW'],
    'show_fields': False,
    'exclude': {'auth': ['user']}
}

# NPlusOne Logging
LOGGING_DIR = base.LOGGING_DIR.value
LOGGING = {
    'version': 1,
    'handlers': {
        'console': {
            'level': 'DEBUG',
Exemplo n.º 44
0
def main():
	mpmath.mp.dps = 1000
	start_time = time.time()
	years = range(1985,2015)
	
	#these parameters values were not varied during optimization
	#initial values for (AP) ranked and unranked teams
	ratedFactor = 30 
	unratedMu = 1 
	unratedSigma = 10 
	ratedSigma = 10 
	beta = 4 
	tau = 0.6 
	unratedRating = 5000 #only used for validation against other metrics
	
	#these parameters were optimized using a truncated Newton algorithm to search
	#8 years were optimized, and each year's values were cross-validated with all other years
	newseasonSigma = 32.038469 
	continuingSigma = 0.0001 
	hometau = 0.0982 
	awaytau = 0.54607 
	diffclose = 9.262603 
	diffblowout = 10.538232 
	
	cols = ['season','Week','Team','RatingMu','RatingSigma','Rating']
	ratingsTable = pd.DataFrame(columns = cols)
	
	ts.setup(mu=unratedMu,sigma=unratedSigma,beta=beta,tau=tau)
	
	#connect to SQL
	login = pd.read_pickle('credentials.pkl')
	username = login.usernames.ix[0]
	password = login.passwords.ix[0]
	con = mdb.connect(host='localhost',user=username,passwd=password,db='NCAA',unix_socket="/tmp/mysql.sock")
	
	#create lists that can hold Rating values
	ratings = []
	ratings.append([])
	ratings.append([])
	
	#season DataFrames
	seasonTeams = pd.read_sql_query('select team from games where season = ' + str(years[0]) + ' group by team',con)
	initialSeasonRankings = pd.read_sql_query('select Team, APrank from rankings where Year = ' + str(years[0]) + ' and Week = 0',con)
	
	#initialize preseason values
	for team in seasonTeams:
		ratings[0].append(team)
		try:
			initialRank = initialSeasonRankings[initialSeasonRankings.Team == team].APrank.values
			teamRating = ts.Rating(mu=(ratedFactor/int(initialRank)), sigma = ratedSigma)
		except (ValueError, TypeError):
			teamRating = ts.Rating()
		ratings[1].append(teamRating)
	del initialSeasonRankings
	del seasonTeams
	
	
	def run_season(year,ratings, ratingsTable):
		#initialize parameters
		year = [year]
	
		for year in year:
			year = str(year)
			year_start_time = time.time()
			
			#season DataFrames
			seasonGames = pd.read_sql_query('select Team, Wk from games where season = ' + year,con)
			
			#initializing values
			for team in seasonGames.Team.unique():
				try:
					teamindex = ratings[0].index(team)
					ratings[1][teamindex] = ts.Rating(mu=ratings[1][teamindex].mu,sigma=newseasonSigma)
				except ValueError:
					ratings[0].append(team)
					teamRating = ts.Rating()
					ratings[1].append(teamRating)			
				
			for week in seasonGames.Wk.unique():
				#initialize week
				week_start_time = time.time()
				print 'week ' + str(week)
				week = str(week)
				weekGames = pd.read_sql_query('select winner, loser, winnerPts, loserPts from games where Wk = ' + week + ' and season = ' + year,con)
				weekGames = weekGames.dropna()
				
				for game in weekGames.winner.unique():
					#getting values
					winnerIndex = ratings[0].index(game)
					loserteam = weekGames[weekGames.winner == game].iloc[0].loser
					loserIndex = ratings[0].index(weekGames[weekGames.winner == game].iloc[0].loser)
					winnerRating = ratings[1][winnerIndex]
					loserRating = ratings[1][loserIndex]
					
					#get location
					location = pd.read_sql_query('select location from games where season = ' + str(year) + ' and Wk = ' + str(week) + ' and winner = \'' + str(game) + '\'',con).values[0]
						
					#get points differential
					winnerPts = weekGames[(weekGames.winner == game) & (weekGames.winnerPts != None)].winnerPts.unique()[0]
					loserPts = weekGames[(weekGames.winner == game) & (weekGames.loserPts != None)].loserPts.unique()[0]
					differential = int(winnerPts) - int(loserPts)
					if differential < 3:
						differential = 1
					elif differential < 14:
						differential = diffclose
					else:
						differential = diffblowout
						
					#playing the game
					winnerRating = ts.Rating(mu=winnerRating.mu,sigma=continuingSigma*winnerRating.sigma)
					loserRating = ts.Rating(mu=loserRating.mu,sigma=continuingSigma*loserRating.sigma)
					
					if location == game:
						env = ts.TrueSkill(tau=hometau*differential, backend='mpmath')
					elif location == loserteam:
						env = ts.TrueSkill(tau=awaytau*differential, backend='mpmath')
					else:
						env = ts.TrueSkill(backend='mpmath')
					winnerRating, loserRating = ts.rate_1vs1(winnerRating, loserRating, env=env)
					del location
					del env
					ratings[1][winnerIndex] = winnerRating
					ratings[1][loserIndex] = loserRating
					
				#saving ranking information'
				ratingsmu = []
				ratingssigma = []
				TSrating = []
				for i in range(len(ratings[0])):
					ratingsmu.append(ratings[1][i].mu)
					ratingssigma.append(ratings[1][i].sigma)
					TSrating.append(ratings[1][i].mu - 3*ratings[1][i].sigma)
				data = {'season': year, 'Week': week, 'Team': ratings[0], 'RatingMu': ratingsmu, 'RatingSigma': ratingssigma, 'Rating': TSrating}
				teamRatings = pd.DataFrame(data) #,index=[1])
				teamRatings['Rank'] = len(teamRatings.index)-np.argsort(teamRatings.sort(['Rating'],ascending=False),axis=0).Rating
				ratingsTable = ratingsTable.append(teamRatings,ignore_index=True)
				del teamRatings
						
				print time.time() - week_start_time
			print time.time() - year_start_time
		
		return ratings, ratingsTable
		
	for year in years:
		print 'running year ' + str(year)
		ratings, ratingsTable = run_season(year,ratings,ratingsTable)
		
	#saving results
	ratingsTable.to_pickle('ratingsTable_final.pkl')
	
	print (time.time() - start_time)
Exemplo n.º 45
0
def main():
	mpmath.mp.dps = 1000
	start_time = time.time()
	seasonYear = datetime.now().year
	weeks = [1,2,3,4,5,6]
	week = 6
	seasonYear = 2013
	iterations = 10
	years = []
	for i in range(iterations):
		years.append(seasonYear)
	
	#these parameters values were not varied during optimization
	#initial values for (AP) ranked and unranked teams
	ratedFactor = 30 
	unratedMu = 1 
	unratedSigma = 10 
	ratedSigma = 10 
	beta = 4 
	tau = 0.6 
	unratedRating = 5000 #only used for validation against other metrics
	
	#these parameters were optimized using a truncated Newton algorithm to search
	#8 years were optimized, and each year's values were cross-validated with all other years
	newseasonSigma = 32.038469 
	continuingSigma = 0.0001 
	hometau = 0.0982 
	awaytau = 0.54607 
	diffclose = 9.262603 
	diffblowout = 10.538232 
	
	ts.setup(mu=unratedMu,sigma=unratedSigma,beta=beta,tau=tau)
	
	#connect to SQL
	login = pd.read_pickle('credentials.pkl')
	username = login.usernames.ix[0]
	password = login.passwords.ix[0]
	con = mdb.connect(host='localhost',user=username,passwd=password,db='NCAA',unix_socket="/tmp/mysql.sock")
	
	#create lists that can hold Rating values
	ratings = []
	ratings.append([])
	ratings.append([])
	
	#season DataFrames
	seasonTeams = pd.read_sql_query('select team from currentgames where season = ' + str(year) + ' group by team',con)
	initialSeasonRankings = pd.read_sql_query('select Team, APrank from rankings where Year = ' + str(year) + ' and Week = 0',con)
	
	#initialize preseason values
	for team in seasonTeams.team.unique():
		ratings[0].append(team)
		try:
			initialRatings = pd.read_sql_query('select RatingMu, RatingSigma from finalratings where Team = \'' + team + '\' and season = \'' + str(year) + '\' and Week = \'' + str(int(week)-1) + '\'',con).ix[0]
			ratings[1].append(ts.Rating(mu=initialRatings[0],sigma=initialRatings[1]))
		except (ValueError, TypeError):
			teamRating = ts.Rating()
			ratings[1].append(teamRating)
	del initialSeasonRankings
	del seasonTeams
	
	def run_season(year,ratings):
		#initialize parameters
		year = [year]
	
		for year in year:
			year = str(year)
			year_start_time = time.time()
			
			#season DataFrames
			seasonGames = pd.read_sql_query('select Team, Wk from currentgames where season = ' + year,con)
			
			#initializing values
			for team in seasonGames.Team.unique():
				try:
					teamindex = ratings[0].index(team)
					ratings[1][teamindex] = ts.Rating(mu=ratings[1][teamindex].mu,sigma=newseasonSigma)
				except ValueError:
					ratings[0].append(team)
					teamRating = ts.Rating()
					ratings[1].append(teamRating)			
				
			for week in range(1,6):
				#initialize week
				week_start_time = time.time()
				print 'week ' + str(week)
				week = str(week)
				weekGames = pd.read_sql_query('select winner, loser, winnerPts, loserPts from currentgames where Wk = ' + week + ' and season = ' + year,con)
				weekGames = weekGames.dropna()
				weekGames = weekGames[weekGames.winnerPts != 'None']
				
				for game in weekGames.winner.unique():
					#getting values
					winnerIndex = ratings[0].index(game)
					loserteam = weekGames[weekGames.winner == game].iloc[0].loser
					loserIndex = ratings[0].index(weekGames[weekGames.winner == game].iloc[0].loser)
					winnerRating = ratings[1][winnerIndex]
					loserRating = ratings[1][loserIndex]
						
					#get location
					location = pd.read_sql_query('select location from currentgames where season = ' + str(year) + ' and Wk = ' + str(week) + ' and winner = \'' + str(game) + '\'',con).values[0]
						
					#get points differential
					winnerPts = weekGames[(weekGames.winner == game) & (weekGames.winnerPts != None)].winnerPts.unique()[0]
					loserPts = weekGames[(weekGames.winner == game) & (weekGames.loserPts != None)].loserPts.unique()[0]
					differential = int(winnerPts) - int(loserPts)
					if differential < 3:
						differential = 1
					elif differential < 14:
						differential = diffclose
					else:
						differential = diffblowout
						
					#playing the game
					winnerRating = ts.Rating(mu=winnerRating.mu,sigma=continuingSigma*winnerRating.sigma)
					loserRating = ts.Rating(mu=loserRating.mu,sigma=continuingSigma*loserRating.sigma)
					
					if location == game:
						env = ts.TrueSkill(tau=hometau*differential, backend='mpmath')
					elif location == loserteam:
						env = ts.TrueSkill(tau=awaytau*differential, backend='mpmath')
					else:
						env = ts.TrueSkill(backend='mpmath')
					winnerRating, loserRating = ts.rate_1vs1(winnerRating, loserRating, env=env)
					del location
					del env
					ratings[1][winnerIndex] = winnerRating
					ratings[1][loserIndex] = loserRating
						
				print time.time() - week_start_time
			print time.time() - year_start_time
					
		return ratings
		
	for year in years:
		print 'running year ' + str(year)
		ratings = run_season(year,ratings)
		
		ratingsmu = []
		ratingssigma = []
		TSrating = []
		for i in range(len(ratings[0])):
			ratingsmu.append(ratings[1][i].mu)
			ratingssigma.append(ratings[1][i].sigma)
			TSrating.append(ratings[1][i].mu - 3*ratings[1][i].sigma)
		
	data = {'season': str(year), 'Week': str(week), 'Team': ratings[0], 'RatingMu': ratingsmu, 'RatingSigma': ratingssigma, 'Rating': TSrating}
	teamRatings = pd.DataFrame(data) #,index=[1])
	D1A = pd.read_pickle('D1A.pkl')
	teamRatings = teamRatings[teamRatings.Team.isin(list(D1A.team))]
	teamRatings['Rank'] = len(teamRatings.index)-np.argsort(teamRatings.sort(['Rating'],ascending=False),axis=0).Rating
	ratingsTable = teamRatings.copy()	
		
	oldRatingsTable = pd.read_pickle('ratingsTable_final3.pkl')
	newRatingsTable = oldRatingsTable.merge(ratingsTable,how='outer')
	print newRatingsTable[(newRatingsTable.Team == 'Michigan') & (newRatingsTable.Week == str(week)) & (newRatingsTable.season == str(year))]
	
	#saving results
	newRatingsTable.to_pickle('ratingsTable_final6.pkl')
	newratingsTable.to_sql(con=con, name='finalratings6',if_exists='replace',flavor='mysql')
	
	print (time.time() - start_time)
Exemplo n.º 46
0
parser.add_argument("--end-date", action="store", dest="end_date", default=datetime.now().isoformat().split("T")[0],
    help="Scrape end date in yyyy-mm-dd")

args = parser.parse_args()

conn = sqlite3.connect(args.db, timeout=10)
c = conn.cursor()

end_date = datetime.strptime(args.end_date, '%Y-%m-%d')

if args.begin_date != None:
  begin_date = datetime.strptime(args.begin_date, '%Y-%m-%d')
elif args.scrape_lookback != None:
  begin_date = datetime.now() - timedelta(days=int(args.scrape_lookback))

trueskill.setup(draw_probability=0.0018469)

FENCER_STATE = dict()

def updateRank(bout):
  def getLatestRating(fencerid, weapon):
    if (fencerid, weapon) in FENCER_STATE:
      return FENCER_STATE[(fencerid, weapon)]
    query = c.execute("""
      SELECT r.ts_mu, r.ts_sigma FROM ratings r , bouts b, events e, tournaments t
      WHERE r.fencerid = %(fencerid)s
        AND r.boutid = b.boutid
        AND e.weapon = '%(weapon)s'
        AND b.eventid = e.eventid
        AND e.tournamentid = t.tournamentid
        AND t.start_date < '%(begin_date)s'
Exemplo n.º 47
0
    def calculate_score_1vs1(self, key1_scored_object, key2_scored_object, winning_key, other_comparison_pairs):
        """
        Calcualtes the scores for a new 1vs1 comparison without re-calculating all previous scores
        :param key1_scored_object: Contains score parameters for key1
        :param key2_scored_object: Contains score parameters for key2
        :param winning_key: indicates with key is the winning key
        :param other_comparison_pairs: Contains all previous comparison_pairs that the 2 keys took part in.
            This is a subset of all comparison pairs and is used to calculate round, wins, loses, and opponent counts
        :return: tuple of ScoredObject (key1, key2)
        """
        self.storage = {}
        self.opponents = {}
        self.ratings = {}
        trueskill.setup()

        key1 = key1_scored_object.key
        key2 = key2_scored_object.key

        # Note: if value are None, trueskill.Rating will use default mu 25 and sigma 8.333
        r1 = trueskill.Rating(mu=key1_scored_object.variable1, sigma=key1_scored_object.variable2)

        r2 = trueskill.Rating(mu=key2_scored_object.variable1, sigma=key2_scored_object.variable2)

        if winning_key == key1:
            r1, r2 = trueskill.rate_1vs1(r1, r2)
        elif winning_key == key2:
            r2, r1 = trueskill.rate_1vs1(r2, r1)
        else:
            raise InvalidWinningKeyException

        self.ratings[key1] = r1
        self.ratings[key2] = r2

        for key in [key1, key2]:
            self.opponents[key] = set()
            self.storage[key] = ScoredObject(
                key=key,
                score=trueskill.expose(self.ratings[key]),
                variable1=self.ratings[key].mu,
                variable2=self.ratings[key].sigma,
                rounds=0,
                opponents=0,
                wins=0,
                loses=0,
            )

        # calculate opponents, wins, loses, rounds for every match for key1 and key2
        for comparison_pair in other_comparison_pairs + [ComparisonPair(key1, key2, winning_key)]:
            cp_key1 = comparison_pair.key1
            cp_key2 = comparison_pair.key2
            cp_winning_key = comparison_pair.winning_key

            if cp_key1 == key1 or cp_key1 == key2:
                if cp_winning_key is None:
                    self._update_rounds_only(cp_key1)
                else:
                    self._update_result_stats(cp_key1, cp_key2, cp_winning_key)

            if cp_key2 == key1 or cp_key2 == key2:
                if cp_winning_key is None:
                    self._update_rounds_only(cp_key2)
                else:
                    self._update_result_stats(cp_key2, cp_key1, cp_winning_key)

        return (self.storage[key1], self.storage[key2])
from database.SessionFactory import SessionFactory

from database.User import User
from database.Match import Match
from database.Game import Game
from database.Score import Score
from database.Participation import Participation
from database.TrueSkillCache import TrueSkillCache
        
import trueskill
import time

from resultdict import resultdict

trueskill.setup()

def get_most_recent_ratings(user_id):
    from sqlalchemy.orm.exc import NoResultFound
    session = SessionFactory()
    try:
        participation_matches = session.query( Match.id.label("match_id"), 
                                               Match.date_recorded.label("date") ).\
                                filter( Participation.user_id == user_id ).\
                                filter( Participation.match_id == Match.id ).subquery()
                                
        most_recent_match_date = session.query( participation_matches.c.match_id.label("match_id"),
                                                func.max(participation_matches.c.date).label("date") ).\
                                subquery()
                                
        most_recent_match = session.query( participation_matches.c.match_id.label("match_id") ).\