def get_conservative_rating(self): rating = GaussianRating(self.rating, self.rating_variance) game_info = TrueSkillGameInfo() conservative = rating.conservative_rating(game_info) if conservative <= 0.001: return 0.0 else: return conservative
def new_rating(self, self_rating, opponent_rating, comparison, game_info=None): game_info = TrueSkillGameInfo.ensure_game_info(game_info) if comparison == LOSE: mean_delta = opponent_rating.mean - self_rating.mean else: mean_delta = self_rating.mean - opponent_rating.mean c = sqrt(self_rating.stdev**2.0 + opponent_rating.stdev**2.0 + 2.0 * game_info.beta**2.0) if comparison != DRAW: v = v_exceeds_margin_scaled(mean_delta, game_info.draw_margin, c) w = w_exceeds_margin_scaled(mean_delta, game_info.draw_margin, c) rank_multiplier = TwoPlayerTrueSkillCalculator.score[comparison] else: v = v_within_margin_scaled(mean_delta, game_info.draw_margin, c) w = w_within_margin_scaled(mean_delta, game_info.draw_margin, c) rank_multiplier = 1.0 mean_multiplier = (self_rating.stdev**2.0 + game_info.dynamics_factor**2.0) / c variance_with_dynamics = self_rating.stdev**2.0 + game_info.dynamics_factor**2.0 std_dev_multiplier = variance_with_dynamics / (c**2.0) new_mean = self_rating.mean + (rank_multiplier * mean_multiplier * v) new_std_dev = sqrt(variance_with_dynamics * (1.0 - w * std_dev_multiplier)) return GaussianRating(new_mean, new_std_dev)
def updated_ratings(self): results = Match() for current_team in self.prior_layer.output_variables_groups: team_results = Team() for current_player, current_player_rating in [ (player.key, player.value) for player in current_team ]: new_rating = GaussianRating(current_player_rating.mean, current_player_rating.stdev) team_results[current_player] = new_rating results.append(team_results) return results
def new_team_ratings(self, self_team, other_team, self_to_other_team_comparison, game_info=None): game_info = TrueSkillGameInfo.ensure_game_info(game_info) self_mean_sum = sum(rating.mean for rating in self_team.ratings()) other_team_mean_sum = sum(rating.mean for rating in other_team.ratings()) if self_to_other_team_comparison == LOSE: mean_delta = other_team_mean_sum - self_mean_sum else: mean_delta = self_mean_sum - other_team_mean_sum c = sqrt( sum(rating.stdev**2.0 for rating in self_team.ratings()) + sum(rating.stdev**2.0 for rating in other_team.ratings()) + (len(self_team) + len(other_team)) * game_info.beta**2) tau_squared = game_info.dynamics_factor**2 if self_to_other_team_comparison != DRAW: v = v_exceeds_margin_scaled(mean_delta, game_info.draw_margin, c) w = w_exceeds_margin_scaled(mean_delta, game_info.draw_margin, c) rank_multiplier = TwoTeamTrueSkillCalculator.score[ self_to_other_team_comparison] else: v = v_within_margin_scaled(mean_delta, game_info.draw_margin, c) w = w_within_margin_scaled(mean_delta, game_info.draw_margin, c) rank_multiplier = 1.0 new_team_ratings = Team() for self_team_current_player, previous_player_rating in self_team.player_rating( ): mean_multiplier = (previous_player_rating.stdev**2.0 + tau_squared) / c std_dev_multiplier = (previous_player_rating.stdev**2.0 + tau_squared) / (c**2.0) player_mean_delta = rank_multiplier * mean_multiplier * v new_mean = previous_player_rating.mean + player_mean_delta new_std_dev = sqrt( (previous_player_rating.stdev**2.0 + tau_squared) * (1.0 - w * std_dev_multiplier)) new_team_ratings[self_team_current_player] = GaussianRating( new_mean, new_std_dev) return new_team_ratings
def __init__(self, mean, stdev, last_rating_period=None): GaussianRating.__init__(self, mean, stdev) self.last_rating_period = last_rating_period
def default_rating(self): return GaussianRating(self.initial_mean, self.initial_stdev)