def decide_on_players( bot_ids: Iterable[BotID], rank_sys: RankingSystem, ticket_sys: TicketSystem) -> Tuple[List[BotID], List[BotID]]: """ Find two balanced teams. The TicketSystem and the RankingSystem to find a fair match up between some bots that haven't played for a while. """ limit = 200 tries_left = limit while tries_left > 0: tries_left -= 1 # Pick some bots that haven't played for a while picked = ticket_sys.pick_bots(bot_ids) shuffle(picked) ratings = [rank_sys.get(bot) for bot in picked] blue = tuple(ratings[0:3]) orange = tuple(ratings[3:6]) # Is this a fair match? required_fairness = min(tries_left / limit, MIN_REQ_FAIRNESS) if trueskill.quality([blue, orange]) >= required_fairness: print( f"Match: {picked[0:3]} vs {picked[3:6]}\nMatch quality: {trueskill.quality([blue, orange])}" ) ticket_sys.choose(picked, bot_ids) return picked[0:3], picked[3:6] raise Exception("Failed to find a fair match")
def decide_on_players_2(bot_ids: Iterable[BotID], rank_sys: RankingSystem, ticket_sys: TicketSystem) -> Tuple[List[BotID], List[BotID]]: """ Find two balanced teams. The TicketSystem and the RankingSystem to find a fair match up between some bots that haven't played for a while. """ # Composing a team of the best player + the worst two players will likely yield a balanced match (0, 4, 5). # These represent a few arrangements like that which seem reasonable to try, they will be checked against # the trueskill system. likely_balances = [(0, 4, 5), (0, 3, 5), (0, 2, 5), (0, 3, 4)] # Experimental average quality based on limit: # 1000: 0.4615 # 400: 0.460 # 100: 0.457 # 10: 0.448 num_bot_groups_to_test = 400 # How much we value the tightness of rating distribution in a given match. # A higher number will yield matches with similarly skilled bots, but potentially lower probability of a draw. tightness_weight = 1.0 tries_left = num_bot_groups_to_test best_quality_found = 0 best_score_found = 0 best_match = None chosen_balance = None while tries_left > 0: tries_left -= 1 # Pick some bots that haven't played for a while picked = ticket_sys.pick_bots(bot_ids) candidates = [Candidate(bot, rank_sys.get(bot)) for bot in picked] candidates.sort(key=lambda c: float(c.rating), reverse=True) tightness = 1 / (numpy.std([float(c.rating) for c in candidates]) + 1) for balance in likely_balances: blue_candidates = candidates[balance[0]], candidates[balance[1]], candidates[balance[2]] orange_candidates = [c for c in candidates if c not in blue_candidates] quality = trueskill.quality([[c.rating for c in blue_candidates], [c.rating for c in orange_candidates]]) score = quality + tightness * tightness_weight if score > best_score_found: best_score_found = score best_quality_found = quality best_match = (blue_candidates, orange_candidates) chosen_balance = balance blue_ids = [c.bot_id for c in best_match[0]] orange_ids = [c.bot_id for c in best_match[1]] tickets_consumed = sum([ticket_sys.get_ensured(b) for b in blue_ids + orange_ids]) print(f"Match: {blue_ids} vs {orange_ids}\nMatch quality: {best_quality_found} score: {best_score_found} " f"Rank pattern: {chosen_balance}") ticket_sys.choose(blue_ids + orange_ids, bot_ids) return blue_ids, orange_ids
def get_max_mmr_diff(players: Tuple[List[BotID], List[BotID]], rank_sys: RankingSystem) -> float: mus = [rank_sys.get(bot).mu for bot in players[0] + players[1]] return max(mus) - min(mus)
def get_trueskill_quality(players: Tuple[List[BotID], List[BotID]], rank_sys: RankingSystem) -> float: blue_ratings = [rank_sys.get(bot) for bot in players[0]] orange_ratings = [rank_sys.get(bot) for bot in players[1]] return trueskill.quality([blue_ratings, orange_ratings])
def decide_on_players_3(bot_ids: Iterable[BotID], rank_sys: RankingSystem, ticket_sys: TicketSystem) -> Tuple[List[BotID], List[BotID]]: """ Find two balanced teams. The TicketSystem and the RankingSystem to find a fair match up between some bots that haven't played for a while. """ # Higher ticket strength produces a more uniform distribution of matches played, adjust by increments of 0.1 TICKET_STRENGTH = 1 # Higher MMR tolerance allows accurately rated bots to play in more "distant" MMR matches, adjust by increments of 1 MMR_TOLERANCE = 4 # Max attempts to build match of quality >= MIN_QUALITY MAX_ITERATIONS = 20 MIN_QUALITY = 0.4 rank_sys.ensure_all(bot_ids) ticket_sys.ensure(bot_ids) best_quality = 0 best_match = None max_tickets = max([ticket_sys.get(bot_id) for bot_id in bot_ids]) for i in range(MAX_ITERATIONS): # Get Leader Bot (choose randomly between bots with highest tickets) possible_leaders = [bot_id for bot_id, tickets in ticket_sys.tickets.items() if tickets == max_tickets and bot_id in bot_ids] leader = numpy.random.choice(possible_leaders) # Get MU for Leader bot, that will be the match mmr match_mmr = rank_sys.get(leader).mu # Score all bots based on probability to perform at target mmr, scaled by amount of tickets candidates = [Candidate(bot_id, rank_sys.get(bot_id)) for bot_id in bot_ids if bot_id != leader] scores = [] for c in candidates: # Calculate probability to perform at desired mmr performance_prob = pdf(match_mmr, mu=c.rating.mu, sigma=math.sqrt(c.rating.sigma**2 + MMR_TOLERANCE**2)) # Calculate weighting factor based on tickets tickets = ticket_sys.get(c.bot_id) tickets_weight = tickets ** TICKET_STRENGTH # Calculate candidate score scores.append(performance_prob * tickets_weight) # Pick 5 bots randomly based on their score probs = numpy.asarray(scores) / sum(scores) players = list(numpy.random.choice(candidates, size=5, p=probs, replace=False)) players.append(Candidate(leader, rank_sys.get(leader))) # Get the highest quality match with the 6 chosen bots combinations = list(itertools.combinations(players, 3)) possible_matches = len(combinations) // 2 blue_combs = combinations[:possible_matches] orange_combs = combinations[:possible_matches-1:-1] for i in range(possible_matches): blue_team = blue_combs[i] orange_team = orange_combs[i] quality = trueskill.quality([[c.rating for c in blue_team], [c.rating for c in orange_team]]) if quality > best_quality: best_quality = quality best_match = (blue_team, orange_team) if best_quality >= MIN_QUALITY: break # We sort by get_mmr() because it considers sigma blue_ids = sorted([c.bot_id for c in best_match[0]], key=lambda id: rank_sys.get_mmr(id), reverse=True) orange_ids = sorted([c.bot_id for c in best_match[1]], key=lambda id: rank_sys.get_mmr(id), reverse=True) tickets_consumed = sum([ticket_sys.get_ensured(b) for b in blue_ids + orange_ids]) print(f"Match: {blue_ids} vs {orange_ids}\nMatch quality: {best_quality} Tickets consumed: {tickets_consumed}") ticket_sys.choose(blue_ids + orange_ids, bot_ids) return blue_ids, orange_ids