def gp_puzzle_nub(diff_degree=2., amp=1., scale=1.5, steps=100): """ Generate a puzzle nub connecting point a to point b""" M, C = uninformative_prior_gp(0., diff_degree, amp, scale) gp.observe(M, C, data.puzzle_t, data.puzzle_x, data.puzzle_V) GPx = gp.GPSubmodel('GP', M, C, pl.arange(1)) X = GPx.value.f(pl.arange(0., 1.0001, 1. / steps)) M, C = uninformative_prior_gp(0., diff_degree, amp, scale) gp.observe(M, C, data.puzzle_t, data.puzzle_y, data.puzzle_V) GPy = gp.GPSubmodel('GP', M, C, pl.arange(1)) Y = GPy.value.f(pl.arange(0., 1.0001, 1. / steps)) return X, Y
def make_model(n_fmesh=11, fmesh_is_obsmesh=False): x = np.arange(-1., 1., .1) # Prior parameters of C nu = pm.Uniform('nu', 1., 3, value=1.5) phi = pm.Lognormal('phi', mu=.4, tau=1, value=1) theta = pm.Lognormal('theta', mu=.5, tau=1, value=1) # The covariance dtrm C is valued as a Covariance object. @pm.deterministic def C(eval_fun=gp.matern.euclidean, diff_degree=nu, amp=phi, scale=theta): return gp.NearlyFullRankCovariance(eval_fun, diff_degree=diff_degree, amp=amp, scale=scale) # Prior parameters of M a = pm.Normal('a', mu=1., tau=1., value=1) b = pm.Normal('b', mu=.5, tau=1., value=0) c = pm.Normal('c', mu=2., tau=1., value=0) # The mean M is valued as a Mean object. def linfun(x, a, b, c): return a * x**2 + b * x + c @pm.deterministic def M(eval_fun=linfun, a=a, b=b, c=c): return gp.Mean(eval_fun, a=a, b=b, c=c) # The actual observation locations actual_obs_locs = np.linspace(-.8, .8, 4) if fmesh_is_obsmesh: o = actual_obs_locs fmesh = o else: # The unknown observation locations o = pm.Normal('o', actual_obs_locs, 1000., value=actual_obs_locs) fmesh = np.linspace(-1, 1, n_fmesh) # The GP submodel sm = gp.GPSubmodel('sm', M, C, fmesh) # Observation variance V = pm.Lognormal('V', mu=-1, tau=1, value=.0001) observed_values = pm.rnormal(actual_obs_locs**2, 10000) # The data d is just array-valued. It's normally distributed about GP.f(obs_x). d = pm.Normal('d', mu=sm.f(o), tau=1. / V, value=observed_values, observed=True) return locals()
amp=amp, scale=scale) # Prior parameters of M a = pm.Normal('a', mu=1., tau=1.) b = pm.Normal('b', mu=.5, tau=1.) c = pm.Normal('c', mu=2., tau=1.) # The mean M is valued as a Mean object. def linfun(x, a, b, c): # return a * x ** 2 + b * x + c return 0. * x + c @pm.deterministic def M(eval_fun=linfun, a=a, b=b, c=c): return gp.Mean(eval_fun, a=a, b=b, c=c) # The GP submodel fmesh = np.linspace(-np.pi / 3.3, np.pi / 3.3, 4) sm = gp.GPSubmodel('sm', M, C, fmesh) # Observation precision V = .0001 # The data d is just array-valued. It's normally distributed about GP.f(obs_x). init_val = np.random.normal(size=len(fmesh)) d = pm.Normal('d', mu=sm.f_eval, tau=1. / V, value=init_val, observed=True)
def __init__(self, fname, playedto=None): super(LeagueFullModel, self).__init__() league = League(fname, playedto) N = len(league.teams) def outcome_eval(home=None, away=None): if home > away: return 1 if home < away: return -1 if home == away: return 0 def clip_rate(val): if val > 0.2: return val else: return 0.2 def linfun(x, c): return 0. * x + c # The covariance dtrm C is valued as a Covariance object. #@pm.deterministic #def C(eval_fun = gp.matern.euclidean, diff_degree=diff_degree, amp=amp, scale=scale): # return gp.NearlyFullRankCovariance(eval_fun, diff_degree=diff_degree, amp=amp, scale=scale) self.goal_rate = np.empty(N, dtype=object) self.def_rate = np.empty(N, dtype=object) self.goal_var = np.empty(N, dtype=object) self.def_var = np.empty(N, dtype=object) self.match_rate = np.empty(len(league.games) * 2, dtype=object) self.outcome_future = np.empty(len(league.games), dtype=object) self.match_goals_future = np.empty(len(league.future_games) * 2, dtype=object) self.home_adv = Uniform(name='home_adv', lower=0., upper=2.0) self.league = league fmesh = np.arange(0., league.n_days + 2.) for t in league.teams.values(): # Prior parameters of C diff_degree_g = pm.Uniform('diff_degree_g_%i' % t.team_id, 1., 3) amp_g = pm.Uniform('amp_g_%i' % t.team_id, .01, 2.) scale_g = pm.Uniform('scale_g_%i' % t.team_id, 1., 10.) diff_degree_d = pm.Uniform('diff_degree_d_%i' % t.team_id, 1., 3) amp_d = pm.Uniform('amp_d_%i' % t.team_id, .01, 2.) scale_d = pm.Uniform('scale_d_%i' % t.team_id, 1., 10.) @pm.deterministic(name='C_d%i' % t.team_id) def C_d(eval_fun=gp.matern.euclidean, diff_degree=diff_degree_d, amp=amp_d, scale=scale_d): return gp.NearlyFullRankCovariance(eval_fun, diff_degree=diff_degree, amp=amp, scale=scale) @pm.deterministic(name='C_g%i' % t.team_id) def C_g(eval_fun=gp.matern.euclidean, diff_degree=diff_degree_g, amp=amp_g, scale=scale_g): return gp.NearlyFullRankCovariance(eval_fun, diff_degree=diff_degree, amp=amp, scale=scale) self.goal_rate[t.team_id] = Exponential('goal_rate_%i' % t.team_id, beta=1) self.def_rate[t.team_id] = Exponential('def_rate_%i' % t.team_id, beta=1) @pm.deterministic(name='M_d%i' % t.team_id) def M_d(eval_fun=linfun, c=self.def_rate[t.team_id]): return gp.Mean(eval_fun, c=c) @pm.deterministic(name='M_g%i' % t.team_id) def M_g(eval_fun=linfun, c=self.goal_rate[t.team_id]): return gp.Mean(eval_fun, c=c) self.def_var[t.team_id] = gp.GPSubmodel('smd_%i' % t.team_id, M_d, C_d, fmesh) self.goal_var[t.team_id] = gp.GPSubmodel('smg_%i' % t.team_id, M_g, C_g, fmesh) for game in range(len(league.games)): gd = int(game / (league.n_teams / 2)) assert (gd < league.n_days) self.match_rate[2 * game] = Poisson( 'match_rate_%i' % (2 * game), mu=Deterministic( eval=clip_rate, parents={ 'val': self.goal_var[ league.games[game].hometeam.team_id].f_eval[gd] - self.def_var[ league.games[game].awayteam.team_id].f_eval[gd] + self.home_adv }, doc='clipped goal rate', name='clipped_h_%i' % game), value=league.games[game].homescore, observed=True) self.match_rate[2 * game + 1] = Poisson( 'match_rate_%i' % (2 * game + 1), mu=Deterministic( eval=clip_rate, parents={ 'val': self.goal_var[ league.games[game].awayteam.team_id].f_eval[gd] - self.def_var[ league.games[game].hometeam.team_id].f_eval[gd] }, doc='clipped goal rate', name='clipped_a_%i' % game), value=league.games[game].awayscore, observed=True) for game in range(len(league.future_games)): gd = league.n_days self.match_goals_future[2 * game] = Poisson( 'match_goals_future_%i_home' % game, mu=Deterministic( eval=clip_rate, parents={ 'val': self.goal_var[league.future_games[game] [0].team_id].f_eval[gd] - self.def_var[league.future_games[game] [1].team_id].f_eval[gd] + self.home_adv }, doc='clipped goal rate', name='clipped_fut_h_%i' % game)) self.match_goals_future[2 * game + 1] = Poisson( 'match_goals_future_%i_away' % game, mu=Deterministic(eval=clip_rate, parents={ 'val': self.goal_var[league.future_games[game] [1].team_id].f_eval[gd] - self.def_var[league.future_games[game] [0].team_id].f_eval[gd] }, doc='clipped goal rate', name='clipped_fut_a_%i' % game)) self.outcome_future[game] = Deterministic( eval=outcome_eval, parents={ 'home': self.match_goals_future[2 * game], 'away': self.match_goals_future[2 * game + 1] }, name='match_outcome_future_%i' % game, dtype=int, doc='The outcome of the match')