コード例 #1
0
ファイル: survivor.py プロジェクト: jietang/nflsurvivor
def survivor_dp():
    teams_playing_in_week = get_elo_probabilities()
    current_states = set([()])
    best_log_prob = {(): 0.0}
    best_path = {(): []}

    for week in range(6):
        next_states = set()
        next_log_prob_dict = {}
        for current_state in current_states:
            for team, log_prob in teams_playing_in_week[week]:
                if team in current_state:
                    continue
                next_state = tuple(sorted(list(current_state) + [team]))
                next_log_prob = best_log_prob[current_state] + log_prob
                if next_log_prob > next_log_prob_dict.get(next_state, -1e100):
                    next_log_prob_dict[next_state] = next_log_prob
                    best_path[next_state] = best_path[current_state] + [team]
                    next_states.add(next_state)

        # compute best log prob, best path
        best_state, best_prob = max(next_log_prob_dict.iteritems(), key=operator.itemgetter(1))
        path = best_path[best_state]
        print "Best after week ", week, np.exp(best_prob), path

        best_log_prob.update(next_log_prob_dict)
        current_states = next_states

    return current_states, best_log_prob, best_path
コード例 #2
0
ファイル: survivor.py プロジェクト: PlumpMath/nflsurvivor
def survivor_dp():
    teams_playing_in_week = get_elo_probabilities()
    current_states = set([()])
    best_log_prob = {(): 0.0}
    best_path = {(): []}

    for week in range(6):
        next_states = set()
        next_log_prob_dict = {}
        for current_state in current_states:
            for team, log_prob in teams_playing_in_week[week]:
                if team in current_state:
                    continue
                next_state = tuple(sorted(list(current_state) + [team]))
                next_log_prob = best_log_prob[current_state] + log_prob
                if next_log_prob > next_log_prob_dict.get(next_state, -1e100):
                    next_log_prob_dict[next_state] = next_log_prob
                    best_path[next_state] = best_path[current_state] + [team]
                    next_states.add(next_state)

        # compute best log prob, best path
        best_state, best_prob = max(next_log_prob_dict.iteritems(),
                                    key=operator.itemgetter(1))
        path = best_path[best_state]
        print "Best after week ", week, np.exp(best_prob), path

        best_log_prob.update(next_log_prob_dict)
        current_states = next_states

    return current_states, best_log_prob, best_path
コード例 #3
0
ファイル: monte_carlo.py プロジェクト: jietang/nflsurvivor
import random
from collections import defaultdict
from pprint import pprint as pp

import numpy as np
from scipy import stats

from util import do_cprofile, get_elo_probabilities, get_opponents_by_week

NWEEKS = 17


teams_playing_in_week = get_elo_probabilities()
probability_by_week = []
for team_prob_tuples in teams_playing_in_week:
    probability_by_week.append({t: np.exp(prob) for t, prob in team_prob_tuples})

opponents_by_week = get_opponents_by_week(teams_playing_in_week)

# convert team probs into a dict of teams and normalized probabilities
team_sample_probs_by_week = []
for team_prob_tuples in teams_playing_in_week:
    t_p_tuples = [(t, np.exp(p) ** 1.5) for t, p in team_prob_tuples]
    teams, probs = zip(*t_p_tuples)
    normalized_probs = np.array(probs) / sum(probs)
    team_sample_probs_by_week.append((teams, normalized_probs))


def repeat_until(pred, fn):
    x = fn()
    while not pred(x):
コード例 #4
0
ファイル: genetic.py プロジェクト: PlumpMath/nflsurvivor
from util import get_elo_probabilities, TEAMS

# monte carlo simulation suggests 12.5 for 10 ppl at 0.75, 10.5 for 10 ppl at .7
# lets go for week 12
GAMES = 12
POPULATION = 1000
PRUNE = 0.1
GENERATIONS = 100

#STARTING_PICKS = ['MIA', 'PIT']
STARTING_PICKS = ['DAL', 'NO', 'SEA']
NUM_STARTING_PICKS = len(STARTING_PICKS)
# TODO write out and read in data from other sources (lines?)

teams_playing_in_week = get_elo_probabilities()
week_dicts = []
for team_prob_tuples in teams_playing_in_week:
    week_dicts.append({t: prob for t, prob in team_prob_tuples})


def is_valid(genome):
    return all(t in week_dicts[i] for i, t in enumerate(genome))


def repeat_until(pred, fn):
    x = fn()
    while not pred(x):
        x = fn()
    return x