Example #1
0
def rate_all():
    games = Game.query.all() 
    users = sanitized_users(games)
    g_vec = sanitized_games(games)

    ratings = {u.id: u.last_rating() for u in users}
    rating_prior = {u: v.rating if (v and v.rating) else 0 for u,v in ratings.items()} 

    neighbors = rm.neighbors(games)
    neighbor_avgs = rm.compute_avgs(games, rating_prior) 

    t_min = min(games, key=lambda g: g.date_played or datetime.datetime.now()).date_played
    t_max = max(games, key=lambda g: g.date_played or datetime.datetime.now()).date_played

    iters = 100
    lam = .37 # Weight for controlling 'pull' of neighborhood weighted average. Higher = stays in the neighborhood.  
    lrn = lambda i: ((1. + .1*iters)/(i + .1 * iters))**.6 #Control the learning rate over time.

    for i in range(iters):
        loss = 0
        # Accumulate the neighborhood loss prior to changing the ratings around
        for id, neighbor_wgt in neighbor_avgs.items():
            loss += lam * ((rating_prior[id] - neighbor_wgt) ** 2)

        # Shuffle the vector of result-tuples
        random.shuffle(g_vec)
        for g in g_vec:
            w, b, actual, t, handi, komi = g
            odds = rm.expect(rating_prior[b], rating_prior[w], handi, komi)
            weight = rm.time_weight(t, t_min, t_max)
            rating_prior[w] -= lrn(i) * (weight*(odds - actual)*odds*(1-odds) + (lam/len(neighbors[w]) * (rating_prior[w] - neighbor_avgs[w])))
            rating_prior[b] -= lrn(i) * (-1.0 * weight*(odds - actual)*odds*(1-odds) + (lam/len(neighbors[b]) * (rating_prior[b] - neighbor_avgs[b])))
            loss += weight * ((odds - actual) ** 2)

        # Scale the ratings
        r_min = min(rating_prior.values())
        r_max = max(rating_prior.values()) 
        if r_max != r_min:
            for k,v in rating_prior.items():
                rating_prior[k] = (rating_prior[k] - r_min) / (r_max - r_min) * 40.0

        neighbor_avgs = rm.compute_avgs(games, rating_prior) 
        print('%d : %.4f' % (i, loss))

    # Update the ratings and show how we did.
    wins, losses = {}, {}
    for g in g_vec:
        wins[g[0]] = wins.get(g[0], 0) + g[2]
        losses[g[0]] = losses.get(g[0], 0) + 1-g[2]
        wins[g[1]] = wins.get(g[1], 0) + 1-g[2]
        losses[g[1]] = losses.get(g[1], 0) + g[2]

    
    for k in sorted(rating_prior, key=lambda k: rating_prior[k]): 
        db.session.add(Rating(user_id=k, rating=rating_prior[k]))
        print("%d: %f (%d - %d)" % (k,rating_prior[k], wins.get(k,0), losses.get(k,0)) )
    db.session.commit()
Example #2
0
def rate_all():
    games = Game.query.all()
    users = sanitized_users(games)
    g_vec = sanitized_games(games)

    ratings = {u.id: u.last_rating() for u in users}
    rating_prior = {
        u: v.rating if (v and v.rating) else 0
        for u, v in ratings.items()
    }

    neighbors = rm.neighbors(games)
    neighbor_avgs = rm.compute_avgs(games, rating_prior)

    t_min = min(
        games,
        key=lambda g: g.date_played or datetime.datetime.now()).date_played
    t_max = max(
        games,
        key=lambda g: g.date_played or datetime.datetime.now()).date_played

    iters = 100
    lam = .37  # Weight for controlling 'pull' of neighborhood weighted average. Higher = stays in the neighborhood.
    lrn = lambda i: (
        (1. + .1 * iters) /
        (i + .1 * iters))**.6  #Control the learning rate over time.

    for i in range(iters):
        loss = 0
        # Accumulate the neighborhood loss prior to changing the ratings around
        for id, neighbor_wgt in neighbor_avgs.items():
            loss += lam * ((rating_prior[id] - neighbor_wgt)**2)

        # Shuffle the vector of result-tuples
        random.shuffle(g_vec)
        for g in g_vec:
            w, b, actual, t, handi, komi = g
            odds = rm.expect(rating_prior[b], rating_prior[w], handi, komi)
            weight = rm.time_weight(t, t_min, t_max)
            rating_prior[w] -= lrn(i) * (
                weight * (odds - actual) * odds * (1 - odds) +
                (lam / len(neighbors[w]) *
                 (rating_prior[w] - neighbor_avgs[w])))
            rating_prior[b] -= lrn(i) * (
                -1.0 * weight * (odds - actual) * odds * (1 - odds) +
                (lam / len(neighbors[b]) *
                 (rating_prior[b] - neighbor_avgs[b])))
            loss += weight * ((odds - actual)**2)

        # Scale the ratings
        r_min = min(rating_prior.values())
        r_max = max(rating_prior.values())
        if r_max != r_min:
            for k, v in rating_prior.items():
                rating_prior[k] = (rating_prior[k] - r_min) / (r_max -
                                                               r_min) * 40.0

        neighbor_avgs = rm.compute_avgs(games, rating_prior)
        print('%d : %.4f' % (i, loss))

    # Update the ratings and show how we did.
    wins, losses = {}, {}
    for g in g_vec:
        wins[g[0]] = wins.get(g[0], 0) + g[2]
        losses[g[0]] = losses.get(g[0], 0) + 1 - g[2]
        wins[g[1]] = wins.get(g[1], 0) + 1 - g[2]
        losses[g[1]] = losses.get(g[1], 0) + g[2]

    for k in sorted(rating_prior, key=lambda k: rating_prior[k]):
        db.session.add(Rating(user_id=k, rating=rating_prior[k]))
        print("%d: %f (%d - %d)" %
              (k, rating_prior[k], wins.get(k, 0), losses.get(k, 0)))
    db.session.commit()
Example #3
0
def rate_all(t_from=datetime.datetime.utcfromtimestamp(1.0), 
             t_to=datetime.datetime.now(),
             iters=200, lam=.22):
    """
    t_from -- datetime obj, rate all games after this 
    t_to -- datetime obj, rate all games up to this
    iters -- number of iterations
    lam -- 'neighborhood pull' parameter.  higher = more error from moving a rank away from ranks in its neighborhood
    """
    games = Game.query.filter(Game.date_played < t_to, Game.date_played > t_from) 
    g_vec = sanitized_games(games)
    users = sanitized_users(g_vec)

    print("found %d users with %d valid games" % (len(users), len(g_vec)) )

    aga_ids_to_uids = dict([(int(u.aga_id), u.id) for u in users])

    ratings = {int(u.aga_id): u.last_rating() for u in users}
    rating_prior = {id: v.rating if (v and v.rating) else 20 for id,v in ratings.items()} 
    print ("%d users with no priors" % len(list(filter(lambda v: v == 20, rating_prior.values()))))

    neighbors = rm.neighbors(g_vec)
    neighbor_avgs = rm.compute_avgs(g_vec, rating_prior) 

    t_min = min([g[3] for g in g_vec])
    t_max = max([g[3] for g in g_vec])

    lrn = lambda i: ((1. + .1*iters)/(i + .1 * iters))**.3 #Control the learning rate over time.

    for i in range(iters):
        loss = 0
        # Accumulate the neighborhood loss prior to changing the ratings around
        for id, neighbor_wgt in neighbor_avgs.items():
            loss += lam * ((rating_prior[id] - neighbor_wgt) ** 2)

        # Shuffle the vector of result-tuples and step through them, accumulating error.
        random.shuffle(g_vec)
        for g in g_vec:
            w, b, actual, t, handi, komi = g
            odds = rm.expect(rating_prior[b], rating_prior[w], handi, komi)
            weight = rm.time_weight(t, t_min, t_max)
            rating_prior[w] -= lrn(i) * (weight*(odds - actual)*odds*(1-odds) + (lam/len(neighbors[w]) * (rating_prior[w] - neighbor_avgs[w])))
            rating_prior[b] -= lrn(i) * (-1.0 * weight*(odds - actual)*odds*(1-odds) + (lam/len(neighbors[b]) * (rating_prior[b] - neighbor_avgs[b])))
            loss += weight * ((odds - actual) ** 2)

        # Scale the ratings
        r_min = min(rating_prior.values())
        r_max = max(rating_prior.values()) 
        if r_max != r_min:
            for k,v in rating_prior.items():
                rating_prior[k] = (rating_prior[k] - r_min) / (r_max - r_min) * 40.0

        #update neighborhood averages?
        neighbor_avgs = rm.compute_avgs(g_vec, rating_prior) 
        if (i % 50 == 0):
            print('%d : %.4f' % (i, loss))

    # Update the ratings and show how we did.
    wins, losses = {}, {}
    for g in g_vec:
        wins[g[0]] = wins.get(g[0], 0) + g[2]
        losses[g[0]] = losses.get(g[0], 0) + 1-g[2]
        wins[g[1]] = wins.get(g[1], 0) + 1-g[2]
        losses[g[1]] = losses.get(g[1], 0) + g[2]

    for k in sorted(rating_prior, key=lambda k: rating_prior[k])[-10:]: 
        print("%d (uid: %d): %f (%d - %d)" % (k, aga_ids_to_uids[k], rating_prior[k], wins.get(k,0), losses.get(k,0)) )
    
    for k in sorted(rating_prior, key=lambda k: rating_prior[k]): 
        db.session.add(Rating(user_id=aga_ids_to_uids[k], rating=rating_prior[k], created=t_to))
    db.session.commit()
 def test_time_weight(self):
     t1 = date(1999,1,1) 
     t2 = date(2000,1,1)
     self.assertEqual(rm.time_weight(t2, t1, t2), 1)
     self.assertAlmostEqual(rm.time_weight(t1, t1, t2), 0, places=3) 
Example #5
0
 def test_time_weight(self):
     t1 = date(1999, 1, 1)
     t2 = date(2000, 1, 1)
     self.assertEqual(rm.time_weight(t2, t1, t2), 1)
     self.assertAlmostEqual(rm.time_weight(t1, t1, t2), 0, places=3)
Example #6
0
def rate_all(t_from=datetime.datetime.utcfromtimestamp(1.0),
             t_to=datetime.datetime.now(),
             iters=200,
             lam=.22):
    """
    t_from -- datetime obj, rate all games after this 
    t_to -- datetime obj, rate all games up to this
    iters -- number of iterations
    lam -- 'neighborhood pull' parameter.  higher = more error from moving a rank away from ranks in its neighborhood
    """
    games = Game.query.filter(Game.date_played < t_to,
                              Game.date_played > t_from)
    g_vec = sanitized_games(games)
    users = sanitized_users(g_vec)

    print("found %d users with %d valid games" % (len(users), len(g_vec)))

    aga_ids_to_uids = dict([(int(u.aga_id), u.id) for u in users])

    ratings = {int(u.aga_id): u.last_rating() for u in users}
    rating_prior = {
        id: v.rating if (v and v.rating) else 20
        for id, v in ratings.items()
    }
    print("%d users with no priors" %
          len(list(filter(lambda v: v == 20, rating_prior.values()))))

    neighbors = rm.neighbors(g_vec)
    neighbor_avgs = rm.compute_avgs(g_vec, rating_prior)

    t_min = min([g[3] for g in g_vec])
    t_max = max([g[3] for g in g_vec])

    lrn = lambda i: (
        (1. + .1 * iters) /
        (i + .1 * iters))**.3  #Control the learning rate over time.

    for i in range(iters):
        loss = 0
        # Accumulate the neighborhood loss prior to changing the ratings around
        for id, neighbor_wgt in neighbor_avgs.items():
            loss += lam * ((rating_prior[id] - neighbor_wgt)**2)

        # Shuffle the vector of result-tuples and step through them, accumulating error.
        random.shuffle(g_vec)
        for g in g_vec:
            w, b, actual, t, handi, komi = g
            odds = rm.expect(rating_prior[b], rating_prior[w], handi, komi)
            weight = rm.time_weight(t, t_min, t_max)
            rating_prior[w] -= lrn(i) * (
                weight * (odds - actual) * odds * (1 - odds) +
                (lam / len(neighbors[w]) *
                 (rating_prior[w] - neighbor_avgs[w])))
            rating_prior[b] -= lrn(i) * (
                -1.0 * weight * (odds - actual) * odds * (1 - odds) +
                (lam / len(neighbors[b]) *
                 (rating_prior[b] - neighbor_avgs[b])))
            loss += weight * ((odds - actual)**2)

        # Scale the ratings
        r_min = min(rating_prior.values())
        r_max = max(rating_prior.values())
        if r_max != r_min:
            for k, v in rating_prior.items():
                rating_prior[k] = (rating_prior[k] - r_min) / (r_max -
                                                               r_min) * 40.0

        #update neighborhood averages?
        neighbor_avgs = rm.compute_avgs(g_vec, rating_prior)
        if (i % 50 == 0):
            print('%d : %.4f' % (i, loss))

    # Update the ratings and show how we did.
    wins, losses = {}, {}
    for g in g_vec:
        wins[g[0]] = wins.get(g[0], 0) + g[2]
        losses[g[0]] = losses.get(g[0], 0) + 1 - g[2]
        wins[g[1]] = wins.get(g[1], 0) + 1 - g[2]
        losses[g[1]] = losses.get(g[1], 0) + g[2]

    for k in sorted(rating_prior, key=lambda k: rating_prior[k])[-10:]:
        print("%d (uid: %d): %f (%d - %d)" %
              (k, aga_ids_to_uids[k], rating_prior[k], wins.get(
                  k, 0), losses.get(k, 0)))

    for k in sorted(rating_prior, key=lambda k: rating_prior[k]):
        db.session.add(
            Rating(user_id=aga_ids_to_uids[k],
                   rating=rating_prior[k],
                   created=t_to))
    db.session.commit()