Example #1
0
def update_players(p1, p2, p1_score, p2_score):

    p1.played_game()
    p2.played_game()

    p1_elo = p1.elo
    p2_elo = p2.elo

    elo_obj = Elo(p1_elo, p2_elo, p1_score, p2_score, elo_settings['K'],
                  elo_settings['beta'])

    # elo predictions
    p1_elo_pred = elo_obj.p1_expected
    p2_elo_pred = elo_obj.p2_expected

    #win-loss predictions
    p1_wl_pred = (p1.wins + 0.5 * p1.losses) / p1.games_played
    p2_wl_pred = (p2.wins + 0.5 * p2.losses) / p2.games_played

    # adjust player ratings
    p1.elo = elo_obj.p1_adjust()
    p2.elo = elo_obj.p2_adjust()

    if p1_score > p2_score:
        # player 1 wins
        p1.add_win()
        p2.add_loss()
        p1_outcome = 1
    elif p2_score > p1_score:
        # player 2 wins
        p2.add_win()
        p1.add_loss()
        p1_outcome = 0
    else:
        # tie
        p1.add_tie()
        p2.add_tie()
        p1_outcome = 0.5

    # compare outcome to predictions
    p2_outcome = 1 - p1_outcome
    p1_elo_error, p1_wl_error = error_calc(p1_elo_pred, p1_wl_pred, p1_outcome)
    p2_elo_error, p2_wl_error = error_calc(p2_elo_pred, p2_wl_pred, p2_outcome)

    # record errors
    p1.elo_error += p1_elo_error
    p1.wl_error += p1_wl_error
    p2.elo_error += p2_elo_error
    p2.wl_error += p2_wl_error

    # nudge true ratings
    p1.nudge_rating()
    p2.nudge_rating()

    return p1, p2
Example #2
0
    def add_to_sqlite(self):
        conn = sqlite3.connect(results_database)
        self.create_sqlite()
        c = conn.cursor()
        if self.winner and self.loser != "":
            c.execute(
                """INSERT INTO {} ('winner', 'loser', 'winner_score', 'loser_score')
                            VALUES (?, ?, ?, ?);""".format(self.game),
                (self.winner, self.loser, self.winner_score, self.loser_score))

            conn.commit()
            conn.close()
            elo = Elo(self.winner, self.loser, self.game)
            elo.get_new_elo()
    def add_to_sqlite(self):
        conn = sqlite3.connect(config.DB_CONN)
        self.create_sqlite()
        # 1 = Win 0 - Loss
        c = conn.cursor()
        if self.usr_score > self.opp_score:
            result = 1
        elif self.opp_score > self.usr_score:
            result = 0
        else:
            result = None

        c.execute(
            """INSERT INTO {} ('player', 'opponent', 'usr_score', 'opp_score', 'result')
                VALUES (?, ?, ?, ?, ?);""".format(self.game),
            (self.usr, self.opp, self.usr_score, self.opp_score, result))

        conn.commit()
        elo = Elo(self.usr, self.opp, result, self.game)
        elo.get_new_elo()
Example #4
0
def get_expected(pairs):

    p1_name = pairs[0]
    p2_name = pairs[1]

    if p2_name == 'Charlie Hoffman':
        p2_name = 'Charley Hoffman'

    p1_row = fpr.loc[pr['name'] == p1_name]
    print(p1_row)

    p2_row = fpr.loc[pr['name'] == p2_name]
    print(p2_row)

    p1_elo = p1_row['ielo'].values[0]
    p2_elo = p2_row['ielo'].values[0]

    elo_expected = Elo.x(p1_elo, p2_elo)

    impact = Glicko.reduce_impact(p2_row['gvar'])
    mu = (p1_row['glicko'].values[0] - MU) / ratio
    opp_mu = (p2_row['glicko'].values[0] - MU) / ratio
    glicko_expected = Glicko.get_expected(mu, opp_mu, impact)

    p1_asg = p1_row['asg'].values[0]
    p1_var = p1_row['pvar'].values[0]
    p2_asg = p2_row['asg'].values[0]
    p2_var = p2_row['pvar'].values[0]
    sg_x = asg_pred(p1_asg, p1_var, p2_asg, p2_var)

    p1_ldate = p1_row['last_date'].values[0]
    p1_ldate = datetime.datetime.strptime(str(p1_ldate), '%b %d %Y').date()
    current_date = datetime.datetime.strptime(str('Jul 4 2019'),
                                              '%b %d %Y').date()
    p1_dsl = current_date - p1_ldate
    p1_days = p1_dsl.days

    p2_ldate = p2_row['last_date'].values[0]
    p2_ldate = datetime.datetime.strptime(str(p2_ldate), '%b %d %Y').date()
    p2_dsl = current_date - p2_ldate
    p2_days = p2_dsl.days

    p1_rnds_played = p1_row['rnds_played'].values[0]
    p2_rnds_played = p2_row['rnds_played'].values[0]

    # row = [elo_expected, sg_x, glicko_expected, 2, p1_days,p1_rnds_played,p2_days,p2_rnds_played]
    # return row
    return elo_expected
Example #5
0
    def gameStatus(self, agents):
        agents[self.player2].EloWhileTrain.append(agents[self.player2].rating)
        winStatus = self.whoWonThisGame()
        if winStatus[self.player1] == 1:
            self.totalScore[self.player1] += 1
        elif winStatus[self.player2] == 1:
            self.totalScore[self.player2] += 1
        else:
            self.totalScore['Tie'] += 1
        Elo(agents[self.player2],
            agents[self.player1],
            winStatus[self.player2],
            K=(100000000 / (self.nGamePlay + 1))**(1 / 4))

        self.wins.append(winStatus[self.player2])

        self.Runningwinrate = sum(self.wins[-100:]) / len(self.wins[-100:])
        #self.Runningwinrate = (self.totalScore[self.player2] + self.totalScore['Tie']/2) / (self.totalScore[self.player2] + self.totalScore['Tie'] + self.totalScore[self.player1])
        return f'Game {self.nGamePlay:03}, Length: {self.dicesThatHaveBeenRolled:03},      CurrentScore: {self.getCurrentScore()},      TotalScore: {self.totalScore},  Winrate: {round(self.Runningwinrate,2)}'
Example #6
0
def generic_league(df,
                   score_column,
                   file_path='./pickled-elo.p',
                   lsw=False,
                   save=True):
    '''
    df: pd.df: where the scores are recored. This has 3 required columns.
        Two of those columns must be named 'Date' and 'Player'
        (captialization does not matter)
        The other needs to contain the score.

    score_column: string: name of column in df that holds the score

    file_path: string: path whre you would like to save your league

    lsw: bool: Low Score Wins. If you play a game where low score wins, pass in True

    save: bool: if you would like to save your league to the designated spot.
                if you want a temporary score,
                or see temporary results, I recomend leaving this False

    ::Steps::
        1) Create the elo League object
        2) it will attempt to read from your pickled file
            it updates the rating dictionary as well as the completed games
        3) It calls the .run() function that will calculate elo
            this will calculate for every date found that is not included
            in the ".games_completed" variable (list) of the league object
        4) Once complete, it will save back to the designated file location
        5) Returns the leauge object
    '''
    #1
    league = Elo(lsw=lsw)
    #2
    try:
        league.ratingDict = pickle_read(file_path).ratingDict
        league.games_completed = pickle_read(file_path).games_completed
        print('LEAGUE FROM PICKLE')
    except:
        print('NEW LEAGUE')

    #3 run the algo
    league.run(df, score_column)

    #4 save for later
    if save:
        print("SAVING")
        pickle_write(file_path, league)
    #5
    return league
Example #7
0
from elo import Elo

e = Elo(1000, 1000, 64, 1)
rating = e.calculate_new_rating()
print(rating)
Example #8
0
from django.core.exceptions import ObjectDoesNotExist

from django.contrib.auth.decorators import login_required


# producer = KafkaProducer(value_serializer = lambda v: json.dumps(v).encode('utf-8'),
# 			bootstrap_servers='54.183.195.208:9092')

keys = ["loc","cc","n","volume","prolength","difficulty","effort","bug",
    "time_est","lOCode","lOComment","lOBlank","lOCodeAndComment",
    "uniq_Op","uniq_Opnd","total_Op","total_Opnd"]


global env
global conn
env = Elo(k_factor=10,initial=1500)

try:
    conn = psycopg2.connect("dbname='ratemycode' user='******' host='localhost' password='******'")
    print("Connected to database successfully")
except:
    print("Error while connecting database")


def CalculateRating():

	try:
		cur = conn.cursor()
		cur.execute("select * from rating_rating")
		rows = cur.fetchall()
		d = defaultdict(list)
import os

import numpy as np
import pandas as pd
from elasticsearch import Elasticsearch, helpers

from elo import Elo

elo = Elo(5000)

es = Elasticsearch(
    hosts=[{
        'host': os.environ["ES_HOST"],
        'port': os.environ["ES_PORT"]
    }],
    http_auth=(
        os.environ["ES_USER"],
        os.environ["ES_PASS"]
    )
)

data = pd.DataFrame([thing["_source"] for thing in list(
    helpers.scan(
        es,
        index="assessment",
        query={"query": {"match_all": {}}}
    ))
])

for candidate in np.unique(data[['candidate_a', 'candidate_b']]):
    elo.addPlayer(candidate)
Example #10
0
# sort by oldest first
sdf['end_date'] = pd.to_datetime(sdf['end_date'], format='%b %d %Y')
sdf = sdf.sort_values(by='end_date', ascending=True)

del cdf
gc.collect()

# iterate tournaments
# START = 0
# END = 2000
# sdf = sdf[START:END]
# print(sdf.head())

# initialize ratings objects
Glicko = Glicko()
Elo = Elo()

# initialize Map
Map = Map()

# divide sg between people who have sample size and not
# 0 means less than 100 rounds
all_sg0_loss = []
all_sg1_loss = []

all_elo_loss = []
all_glicko_loss = []

# elo & glicko by rounds played

# track error by season
Example #11
0
def play_game(t1, t2):
    # calculating game score for each team with in-game variance added in
    # in-game variance allows lower-skilled teams to upset better teams on a given day
    t1_score = int(np.round(random.gauss(t1.rting, season_settings['game_var']), 0))
    t2_score = int(np.round(random.gauss(t2.rting, season_settings['game_var']), 0))

    #play game
    if t1_score > t2_score:
        t1.add_win()
        t2.add_loss()
        t1_outcome = 1

    elif t2_score > t1_score:
        t2.add_win()
        t1.add_loss()
        t1_outcome = 0

    else:
        # recursive call to play game again until winner is found
        # acts as overtime
        # is able to take regulation performance into account due to updated Elo scores (lower skilled team gets boost,
        # higher skilled team takes a hit)
        t1_outcome = play_game(t1, t2)

    # PROBLEM: t1_outcome, in certain cases, not being assigned before the following line compiles
    t2_outcome = 1 - t1_outcome


    # assigning Elo scores to new pointers -- don't think this is necessary
    t1_elo = t1.elo
    t2_elo = t2.elo

    # creating Elo class object with Elo scores, game scores, and Elo settings
    elo_obj = Elo(t1_elo, t2_elo, t1_outcome, t2_outcome, elo_settings['K'], elo_settings['beta'])

    # calculating the expected scores from team 1, team 2 given their Elo scores
    t1_elo_exp, t2_elo_exp = elo_obj.calc_expected_scores()

    # update Elo scores based on expected scores, actual scores
    t1.elo, t2.elo = elo_obj.update_elo(t1_elo_exp, t2_elo_exp)

    # calculating error in Elo predictions
    t1_elo_error = (t1_elo_exp - t1_outcome) ** 2
    t2_elo_error = (t2_elo_exp - t2_outcome) ** 2

    # updating total Elo error for each team object
    t1.elo_error += t1_elo_error
    t2.elo_error += t2_elo_error

    # utilize nudge factor to boost true rating of lower-ranked team if upset occurs
    # first scenario: t1 scores an underdog win
    if t1.rting < t2.rting and t1_score > t2_score:
        t1.nudge_rating_up()
        t2.nudge_rating_down()

    # scenario 2: t2 scores an underdog win
    elif t2.rting < t1.rting and t2_score > t1_score:
        t2.nudge_rating_up()
        t1.nudge_rating_down()

    return t1_outcome
Example #12
0
    def play_game(self):

        elo = Elo()

        elo_diff = self.team1.elo - self.team2.elo
        relo_diff = self.team1.relo - self.team2.relo

        # baseline: 1.215, 1.187, 1.189, 1.1646
        if self.loc == "H":
            elo_diff += h_adv
            relo_diff += relo_h_adv
        elif self.loc == "A":
            elo_diff += h_adv
            relo_diff -= relo_h_adv

        elox = elo.get_expected(elo_diff)
        relox = elo.get_expected(relo_diff)

        self.team1.played_game()
        self.team2.played_game()

        self.team1.add_win()
        self.team2.add_loss()

        if self.reb_margin > 0:
            reb_result = 1
        elif self.reb_margin < 0:
            reb_result = 0
        elif self.reb_margin == 0:
            reb_result = 0.5

        self.team1.add_errors(1, reb_result, elox, relox)
        self.team2.add_errors(0, reb_result, (1 - elox), 1 - relox)

        # update ratings

        # else:
        #     dimv_K = 170.59 * (self.games_played ** -0.673)

        if self.games_played <= 4:
            elo_K = 60
        elif self.games_played <= 8:
            elo_K = 45
        elif self.games_played <= 12:
            elo_K = 31
        elif self.games_played <= 16:
            elo_K = 28.5
        elif self.games_played <= 20:
            elo_K = 22
        elif self.games_played <= 24:
            elo_K = 18
        elif self.games_played <= 28:
            elo_K = 18
        elif self.games_played <= 32:
            elo_K = 18
        else:
            elo_K = 18

        if self.games_played <= 4:
            relo_K = 60
        elif self.games_played <= 8:
            relo_K = 45
        elif self.games_played <= 12:
            relo_K = 31
        elif self.games_played <= 16:
            relo_K = 28.5
        elif self.games_played <= 20:
            relo_K = 22
        elif self.games_played <= 24:
            relo_K = 18
        elif self.games_played <= 28:
            relo_K = 18
        elif self.games_played <= 32:
            relo_K = 18
        else:
            relo_K = 18

        elo_delta = elo.get_mov_delta(1, elox, self.margin,
                                      (self.team1.elo - self.team2.elo), elo_K)
        relo_delta = elo.get_mov_delta(1, relox, self.reb_margin,
                                       (self.team1.relo - self.team2.relo),
                                       relo_K)

        self.team1.elo += elo_delta
        self.team1.relo += relo_delta
        self.team2.elo -= elo_delta
        self.team2.relo -= relo_delta

        return self.team1, self.team2
Example #13
0
def test_systems():
    # load season data
    sdf = pd.read_csv('./data/SeasonResults.csv')

    # separate data into individual seasons
    seasons = list(sdf.Season.unique())

    # set how long before glicko updates
    g_resolve = glicko_set['resolve_time']

    # track error per season
    sea_error = []
    wkbywk_err = None
    first_season = seasons[0]
    for season in tqdm(seasons):
        sea_df = sdf.loc[sdf.Season == season]

        # sort in order
        sea_df = sea_df.sort_values(by='DayNum')
        sea_df = sea_df[[
            'Season', 'DayNum', 'WTeam', 'WScore', 'LTeam', 'LScore'
        ]]

        # get list of teams in season
        wteams = list(sea_df.WTeam.unique())
        lteams = list(sea_df.LTeam.unique())
        teams = list(set((wteams + lteams)))

        load_preseason = False
        # use for preseason rankings
        if season > first_season:
            load_preseason = True
        else:
            prev_team_dir = None

        # create team directory to track everything
        team_dir = init_td(teams, load_preseason, prev_team_dir)

        # init classes
        elo = Elo()
        glicko = Glicko()

        # track error per week
        week_err = []
        wk_l5err = wk_eloerr = wk_ieloerr = wk_gerr = wk_tserr = 0
        wk_gp = 0
        wk_thres = 7
        wk_cnt = 0

        # iterate games
        for index, row in sea_df.iterrows():
            t1 = row['WTeam']
            t2 = row['LTeam']
            team1 = team_dir[t1]
            team2 = team_dir[t2]

            # set max number of games for testing
            # if (team1.gp > 11):
            #     continue
            # if (team2.gp > 11):
            #     continue

            # tracking error by week, so check if it's a new week
            day_num = row['DayNum']
            if day_num > wk_thres:
                # it's a new week
                # add end date of next week
                wk_thres += 7
                # ignore weeks that don't have games
                if wk_gp > 0:
                    wk_l5err /= wk_gp
                    wk_eloerr /= wk_gp
                    wk_ieloerr /= wk_gp
                    wk_gerr /= wk_gp
                    wk_tserr /= wk_gp
                    week_err.append([
                        season, wk_cnt, wk_l5err, wk_eloerr, wk_ieloerr,
                        wk_gerr, wk_tserr
                    ])
                wk_cnt += 1
                wk_l5err = wk_eloerr = wk_ieloerr = wk_gerr = wk_serr = 0
                wk_gp = 0

            # track games played this week
            wk_gp += 1

            margin = row['WScore'] - row['LScore']

            # get expected outcome for each system
            log5_expect = l5_x(team1.wl, team2.wl)
            elo_expect = elo.x(team1.elo, team2.elo)
            ielo_expect = elo.x(team1.ielo, team2.ielo)
            ts_expect = ts_win_prob([team1.tskill], [team2.tskill])

            # special steps for glicko expectation
            mu, phi = glicko.scale_down(team1.glicko, team1.g_phi)
            mu2, phi2 = glicko.scale_down(team2.glicko, team2.g_phi)
            impact = glicko.reduce_impact(phi2)
            glicko_expect = glicko.get_expected(mu, mu2, impact)

            # update error
            if log5_expect == 0:
                log5_expect += .001
            expects = [
                log5_expect, elo_expect, ielo_expect, glicko_expect, ts_expect
            ]
            t1_errors = calc_error(expects, 1)
            t2_errors = t1_errors
            team1.update_errors(t1_errors)
            team2.update_errors(t2_errors)

            # update week error
            wk_l5err += t1_errors[0]
            wk_eloerr += t1_errors[1]
            wk_ieloerr += t1_errors[2]
            wk_gerr += t1_errors[3]
            wk_tserr += t1_errors[4]

            ## update ratings ##

            # elo
            elo_delta = elo.get_delta(elo_expect)
            t1_ielo_delta, t2_ielo_delta = elo.get_ielo_delta(
                ielo_expect, margin, team1, team2)

            team1.update_rating("elo", elo_delta)
            team1.update_rating("ielo", t1_ielo_delta)

            team2.update_rating("elo", -elo_delta)
            team2.update_rating("ielo", t2_ielo_delta)

            team1.update_ts(team2.tskill, "won")
            team2.update_ts(team1.tskill, "lost")

            # log5
            team1.add_win()
            team2.add_loss()

            # glicko (second arg is win or loss)
            team1.add_glicko_opp(team2, 1)
            team2.add_glicko_opp(team1, 0)

            # check if time to resolve
            if team1.gp % g_resolve == 0:
                team1 = glicko.update(team1)
            if team2.gp % g_resolve == 0:
                team2 = glicko.update(team2)

            team_dir[t1] = team1
            team_dir[t2] = team2

        # add week_err df to season trackers
        week_err = pd.DataFrame(
            week_err,
            columns=['Season', 'Week', 'Log5', 'Elo', 'IElo', 'Glicko', 'TS'])
        if wkbywk_err is None:
            wkbywk_err = week_err
        else:
            wkbywk_err = pd.concat([wkbywk_err, week_err])

        # find total error in season
        sea_gp = 0
        sea_l5err = 0
        sea_eloerr = 0
        sea_ieloerr = 0
        sea_gerr = 0
        sea_tserr = 0
        for team in team_dir.values():
            sea_gp += team.gp
            sea_l5err += team.l5err
            sea_eloerr += team.eloerr
            sea_ieloerr += team.ieloerr
            sea_gerr += team.glickoerr
            sea_tserr += team.tserr
        sea_l5err /= sea_gp
        sea_eloerr /= sea_gp
        sea_ieloerr /= sea_gp
        sea_gerr /= sea_gp
        sea_tserr /= sea_gp

        sea_error.append(
            [season, sea_l5err, sea_eloerr, sea_ieloerr, sea_gerr, sea_tserr])

        # store rankings for preseason rankings next season
        prev_team_dir = team_dir

    final_table = pd.DataFrame(
        sea_error, columns=['Season', 'Log5', 'Elo', 'IElo', 'Glicko', 'TS'])
    print(final_table)
    print(final_table.mean())

    wkbywk = pd.DataFrame(
        wkbywk_err,
        columns=['Season', 'Week', 'Log5', 'Elo', 'IElo', 'Glicko', 'TS'])
    wkbywk = wkbywk.drop(columns=['TS'])
    wk_avg = wkbywk.groupby('Week').mean()

    def plot_weeks(wk_avg):
        import matplotlib.pyplot as plt
        fig, ax = plt.subplots(figsize=(15, 7))
        plt.plot(wk_avg.index.values, wk_avg.Log5, '-k', label='Log5 Baseline')
        plt.plot(wk_avg.index.values, wk_avg.Elo, '-c', label='Elo')
        plt.plot(wk_avg.index.values, wk_avg.IElo, '-b', label='Improved Elo')
        plt.plot(wk_avg.index.values, wk_avg.Glicko, '-r', label='Glicko')

        plt.xlabel("Week of Season")
        plt.ylabel("Cross Entropy Error")
        xint = range(0, math.ceil(17) + 1)
        plt.xticks(xint)

        plt.legend(loc='upper left')

        plt.show()
        return

    plot_weeks(wk_avg)
    return
Example #14
0
    # ['Bryson DeChambeau', 'Joaquin Niemann'],
    # ['Adam Hadwin', 'Nate Lashley'],
    # ['Brooks Koepka', 'Jason Day'],
    # ['Rory Sabbatini','Sungjae Im'],
    # ['Jimmy Walker','Brian Harman'],
    # ['Cameron Champ','Sung Kang'],
    # ['Tony Finau','Viktor Hovland'],
    # ['Mackenzie Hughes','Martin Laird'],
    # ['Charley Hoffman','Lucas Glover'],
    # ['Danny Lee', 'Pat Perez'],
    # ['Peter Malnati','Nick Watney'],
    # ['Daniel Berger','Collin Morikawa'],
    # ['Ryan Moore','Phil Mickelson']
]

Elo = Elo()
Glicko = Glicko()
model = pickle.load(open('model.sav', 'rb'))

all_expected = []
for pair in pairs:
    # expected = np.array(get_expected(pair))
    # prediction = model.predict_proba(expected.reshape(1,-1))[0][0]
    # print(prediction)
    prediction = get_expected(pair)
    p1_name = pair[0]
    p2_name = pair[1]
    all_expected.append([p1_name, p2_name, prediction])

mdf = pd.DataFrame(all_expected, columns=['Player 1', 'Player 2', 'Model'])
Example #15
0
class EloTest(unittest.TestCase):
    def setUp(self):
        self._teams = pd.Series(data=["TeamA", "TeamB", "TeamC", "TeamD"])
        self._MEAN_ELO_RATING = 1500
        self._elo = Elo(self._teams, mean_rating=self._MEAN_ELO_RATING)

    def test_initialise_ratings(self):
        ratings = self._elo.ratings

        expected = np.empty(self._teams.shape)
        expected.fill(self._MEAN_ELO_RATING)

        npt.assert_equal(ratings.values, expected, err_msg="All ratings should be equal on initalisation.")

    def test_calculate_likelihood(self):
        score = self._elo.calculate_likelihood(self._MEAN_ELO_RATING, self._MEAN_ELO_RATING, 0)
        assert_equal(score, 0.5, "Score should be 0.5.")

        score = self._elo.calculate_likelihood(self._MEAN_ELO_RATING, 0, bias=100)
        assert_almost_equal(score, 1, places=3, msg="Score should almost be 1")

        score = self._elo.calculate_likelihood(0, self._MEAN_ELO_RATING, bias=100)
        assert_almost_equal(score, 0, places=3, msg="Score should almost be 0")

        score = self._elo.calculate_likelihood(0, self._MEAN_ELO_RATING, bias=100)
        assert_almost_equal(score, 0, places=3, msg="Score should almost be 0")

    def test_calculate_likelihood_with_home_advantage(self):
        score = self._elo.calculate_likelihood(self._MEAN_ELO_RATING, 0, bias=100)
        assert_almost_equal(score, 1, places=3, msg="Score should almost be 1")

        score = self._elo.calculate_likelihood(0, self._MEAN_ELO_RATING, bias=-100)
        assert_almost_equal(score, 0, places=3, msg="Score should almost be 0")

        score = self._elo.calculate_likelihood(self._MEAN_ELO_RATING, 1, bias=100)
        score_unbiased = self._elo.calculate_likelihood(self._MEAN_ELO_RATING, 1, 100, bias=-100)
        assert_not_equal(score, score_unbiased, msg="Score should not exactly equal, but should be close.")

        score = self._elo.calculate_likelihood(self._MEAN_ELO_RATING, self._MEAN_ELO_RATING, bias=100)
        print score
        assert_almost_equal(score, 0.640, places=3, msg="Score should almost be biased towards team A.")

    def test_calculate_likelihood_invalid(self):
        assert_raises(ValueError, self._elo.calculate_likelihood, self._MEAN_ELO_RATING, -1, bias=100)
        assert_raises(ValueError, self._elo.calculate_likelihood, -1, self._MEAN_ELO_RATING, bias=100)
        assert_raises(ValueError, self._elo.calculate_likelihood, -1, -1, bias=100)

    def test_update_rating(self):
        rating = self._elo.update_rating(self._MEAN_ELO_RATING, 1, 0.5)
        assert_equal(rating, 1510, "Rating should be increased by 10.")

        rating = self._elo.update_rating(self._MEAN_ELO_RATING, 0, 0.5)
        assert_equal(rating, 1490, "Rating should be decreased by 10.")

        rating = self._elo.update_rating(self._MEAN_ELO_RATING, 0.5, 0.25)
        assert_equal(rating, 1505, "Rating should increase by 5.")

        rating = self._elo.update_rating(self._MEAN_ELO_RATING, 0.5, 0.75)
        assert_equal(rating, 1495, "Rating should decrease by 5.")

        rating = self._elo.update_rating(self._MEAN_ELO_RATING, 1, 1.0)
        assert_equal(rating, 1500, "Rating should be not change, we expected a win.")

        rating = self._elo.update_rating(self._MEAN_ELO_RATING, 0, 0.0)
        assert_equal(rating, 1500, "Rating should be not change, we expected a loss.")

        rating = self._elo.update_rating(self._MEAN_ELO_RATING, 0.5, 0.5)
        assert_equal(rating, 1500, "Rating should not change, we expected a tie.")

    def test_update_rating_invalid(self):
        assert_raises(ValueError, self._elo.update_rating, -1, 1, 0.5)
        assert_raises(ValueError, self._elo.update_rating, 1, -1, 0.5)
        assert_raises(ValueError, self._elo.update_rating, 1, 1, -0.5)

    def test_predict_equal_ratings(self):
        game = ("TeamA", "TeamB")
        prediction = self._elo.predict(game)
        team_a_likelihood = prediction.ix[:, 2][0]
        team_b_likelihood = prediction.ix[:, 5][0]
        assert_equal(team_a_likelihood, team_b_likelihood, "Teams with equal ratings should be equally likely")

    def test_predict_unequal_ratings(self):
        self._elo.ratings["TeamA"] = 1800
        self._elo.ratings["TeamB"] = 1200

        game = ("TeamA", "TeamB")
        prediction = self._elo.predict(game)
        team_a_likelihood = prediction.ix[:, 2][0]
        team_b_likelihood = prediction.ix[:, 5][0]

        assert_almost_equal(team_a_likelihood, 0.969346569968, msg="Team A should be more likely to win")
        assert_almost_equal(team_b_likelihood, 0.0306534300317, msg="Teams B should be less likely to win")
        assert_almost_equal(
            team_a_likelihood + team_b_likelihood, 1, places=2, msg="Total probability should sum to 1."
        )

    def test_predict_key_error(self):
        assert_raises(KeyError, self._elo.predict, ("NotATeam", "TeamB"))
        assert_raises(KeyError, self._elo.predict, ("TeamA", "NotATeam"))
        assert_raises(KeyError, self._elo.predict, ("NotATeam", "NotATeam"))

    def test_predict_vectorised(self):
        test_data = [("TeamA", "TeamB"), ("TeamC", "TeamD")]
        predictions = self._elo.predict(test_data)

        assert_true(isinstance(predictions, pd.DataFrame))
        assert_equal(predictions.shape, (2, 7))

        team_a_likelihood = predictions.ix[:, 2][0]
        team_b_likelihood = predictions.ix[:, 5][0]
        assert_almost_equal(
            team_a_likelihood + team_b_likelihood, 1, places=2, msg="Total probability should sum to 1."
        )

        team_c_likelihood = predictions.ix[:, 2][1]
        team_d_likelihood = predictions.ix[:, 5][1]
        assert_almost_equal(
            team_c_likelihood + team_d_likelihood, 1, places=2, msg="Total probability should sum to 1."
        )

    def test_train_unequal(self):
        game = ("TeamA", 23, "TeamB", 14)
        self._elo.train(game)

        assert_equal(self._elo.ratings["TeamA"], 1523.0, "TeamA should of increased their rating.")
        assert_equal(self._elo.ratings["TeamB"], 1476.0, "TeamB should of decreased their rating.")

    def test_train_equal(self):
        game = ("TeamA", 10, "TeamB", 10)
        self._elo.train(game)

        assert_equal(
            self._elo.ratings["TeamA"], self._elo.ratings["TeamB"], "Game was a draw. Ratings should remain the same."
        )

    def test_train_with_home_advantage(self):
        self._elo._home_advantage = 100

        game = ("TeamA", 10, "TeamB", 10)
        self._elo.train(game)

        assert_equal(
            self._elo.ratings["TeamA"],
            self._elo.ratings["TeamB"],
            "Home team advantage should cause a draw not to result in equal rankings.",
        )

        game = ("TeamA", 11, "TeamB", 10)
        self._elo.train(game)

        assert_not_equal(
            self._elo.ratings["TeamA"],
            self._elo.ratings["TeamB"],
            "Home team advantage should cause a draw not to result in equal rankings.",
        )
        assert_almost_equal(self._elo.ratings["TeamA"], 1504.0, msg="Team A increases slightly.")
        assert_almost_equal(self._elo.ratings["TeamB"], 1495.0, msg="Team B decreases slightly.")

    def test_train_key_error(self):
        assert_raises(KeyError, self._elo.train, ("NotATeam", 1, "TeamB", 1))
        assert_raises(KeyError, self._elo.train, ("TeamA", 1, "NotATeam", 1))
        assert_raises(KeyError, self._elo.train, ("NotATeam", 1, "NotATeam", 1))

    def test_revert_ratings_to_mean(self):
        self._elo.ratings["TeamA"] = 1800
        self._elo.ratings["TeamB"] = 1200

        self._elo.revert_ratings_to_mean(0.25)
        team_a = self._elo.ratings.ix[0]
        team_b = self._elo.ratings.ix[1]

        assert_equal(team_a, 1575.0, "Rating should be reverted to within 25\% of the mean.")
        assert_equal(team_b, 1425.0, "Rating should be reverted to within 25\% of the mean.")

    def test_revert_ratings_to_mean_invalid(self):
        assert_raises(ValueError, self._elo.revert_ratings_to_mean, -0.75)
Example #16
0
    def play_game(self):

        elo = Elo()

        # get expected results for all systems
        wl1 = self.team1.wl
        wl2 = self.team2.wl

        wlmx = (wl1 + (1 - wl2)) / 2

        elo_diff = self.team1.elo - self.team2.elo
        dim_diff = self.team1.dim - self.team2.dim
        mov_diff = self.team1.mov - self.team2.mov
        dimv_diff = self.team1.dimv - self.team2.dimv

        # baseline: 1.215, 1.187, 1.189, 1.1646
        if self.loc == "H":
            elo_diff += h_adv
            dim_diff += h_adv
            mov_diff += h_adv
            dimv_diff += h_adv
        elif self.loc == "A":
            elo_diff -= h_adv
            dim_diff -= h_adv
            mov_diff -= h_adv
            dimv_diff -= h_adv

        # basic elo systems
        elox = elo.get_expected(elo_diff)
        dimx = elo.get_expected(dim_diff)

        # mov systems
        movx = elo.get_expected(mov_diff)
        dimvx = elo.get_expected(dimv_diff)

        # glicko
        # glickox = glicko.get_expected(self.team1.glicko, self.team2.glicko)

        # step
        # stephx = steph.get_expected(self.team1.steph, self.team2.steph)

        self.team1.played_game()
        self.team2.played_game()

        self.team1.add_win()
        self.team2.add_loss()

        self.team1.calc_win_loss()
        self.team2.calc_win_loss()

        p1_x = {
            "wlm": wlmx,
            "elo": elox,
            "dim": dimx,
            "mov": movx,
            "dimv": dimvx,
            # "glicko":glickox,
            # "steph":stephx
        }
        p2_x = {
            "wlm": 1 - wlmx,
            "elo": 1 - elox,
            "dim": 1 - dimx,
            "mov": 1 - movx,
            "dimv": 1 - dimvx,
            # "glicko":glickox,
            # "steph":stephx
        }

        self.team1.add_errors(1, p1_x)
        self.team2.add_errors(0, p2_x)

        # update ratings
        elo_K = elo_set['K']
        elo_delta = elo.get_delta(1, elox, elo_K)

        if self.games_played <= 2:
            dim_K = 170
        elif self.games_played <= 4:
            dim_K = 97.5
        elif self.games_played <= 5:
            dim_K = 67
        elif self.games_played <= 8:
            dim_K = 67
        elif self.games_played <= 10:
            dim_K = 30
        else:
            dim_K = dim_set['K']
        dim_delta = elo.get_delta(1, dimx, dim_K)

        mov_K = mov_set['K']
        mov_delta = elo.get_mov_delta(1, movx, self.margin,
                                      (self.team1.mov - self.team2.mov), mov_K)

        if self.games_played <= 4:
            dimv_K = 60
        # else:
        #     dimv_K = 170.59 * (self.games_played ** -0.673)
        elif self.games_played <= 8:
            dimv_K = 45
        elif self.games_played <= 12:
            dimv_K = 31
        elif self.games_played <= 16:
            dimv_K = 28.5
        elif self.games_played <= 20:
            dimv_K = 22
        elif self.games_played <= 24:
            dimv_K = 18
        elif self.games_played <= 28:
            dimv_K = 18
        elif self.games_played <= 32:
            dimv_K = 18
        else:
            dimv_K = dimv_set['K']
        dimv_delta = elo.get_mov_delta(1, dimvx, self.margin,
                                       (self.team1.dimv - self.team2.dimv),
                                       dimv_K)

        self.team1.elo += elo_delta
        self.team1.dim += dim_delta
        self.team1.mov += mov_delta
        self.team1.dimv += dimv_delta

        self.team2.elo -= elo_delta
        self.team2.dim -= dim_delta
        self.team2.mov -= mov_delta
        self.team2.dimv -= dimv_delta

        return self.team1, self.team2
Example #17
0
 def setUp(self):
     self._teams = pd.Series(data=["TeamA", "TeamB", "TeamC", "TeamD"])
     self._MEAN_ELO_RATING = 1500
     self._elo = Elo(self._teams, mean_rating=self._MEAN_ELO_RATING)
Example #18
0

def make_fide_k_factor(scarce_games, too_low_rating, stabled):
    def fide_k_factor(rating):
        if rating.times < 30:
            return scarce_games
        elif rating.stable:
            assert rating.times >= 30
            return stabled
        assert rating < 2400
        assert rating.times >= 30
        return too_low_rating
    return fide_k_factor


#: The new FIDE rating regulations which using 30, 15 & 10 K-factor. FIDE is
#: using this `since July 1, 2011 <http://www.fide.com/component/content/
#: article/1-fide-news/5421-changes-to-rating-regulations.html>`_.
fide30 = Elo(make_fide_k_factor(30, 15, 10), FIDERating)

#: The old FIDE rating regulations which using 25, 15 & 10 K-factor.
fide25 = Elo(make_fide_k_factor(25, 15, 10), FIDERating)

#: The shortcut to :data:`fide30`.
fide = fide30

#: The USCF rating regulations. The initial rating is 1300 but USCF defined
#: more complex rule. See `the paper <http://www.glicko.net/ratings/
#: rating.system.pdf>`_ of Prof. Mark E. Glickman.
uscf = Elo(lambda r: 32 if r < 2100 else 24 if r < 2400 else 16, initial=1300)
Example #19
0
cdf = pd.read_csv(collected_path)
collected_ids = list(cdf.TID.unique())

# only need tournaments that have been scraped
sdf = sdf.loc[sdf['tid'].isin(collected_ids)]

# sort by oldest first
sdf['end_date'] = pd.to_datetime(sdf['end_date'], format='%b %d %Y')
sdf = sdf.sort_values(by='end_date', ascending=True)

del cdf
gc.collect()

# initialize ratings objects
Glicko = Glicko()
Elo = Elo()

# divide sg between golfers who have sample size and not
# 0 means less than 100 rounds

sdf = sdf.reset_index()

# one season at a time
seasons = list(sdf.season.unique())
seasons.sort()

all_sg_loss = []
all_esg_loss = []
all_gsg_loss = []
all_l5_loss = []
# all_relo_loss = []