コード例 #1
0
def run_competition(clients: List[Tuple[str, Client, ts.Rating]],
                    num_games: int) -> List[Tuple[str, ts.Rating]]:
    game_id = 0
    while True:
        idx = get_client_pair_indexes(clients)

        (n1, c1, r1), (n2, c2, r2) = np.take(clients, idx, axis=0)

        if ts.quality_1vs1(r1, r2) < 0.2:
            continue

        first_won = run_game(c1, c2)
        c_win, c_los, n_win, n_los, r_win, r_los, i_win, i_los = \
            (c1, c2, n1, n2, r1, r2, idx[0], idx[1]) if first_won \
                else (c2, c1, n2, n1, r2, r1, idx[1], idx[0])
        r_win, r_los = ts.rate_1vs1(r_win, r_los)
        clients[i_win] = (n_win, c_win, r_win)
        clients[i_los] = (n_los, c_los, r_los)

        # print(f'{n_win:<32} {ts.expose(r_win):6.1f}   -- WON --   {n_los:<32} {ts.expose(r_los):6.1f}')

        game_id += 1
        if game_id == num_games:
            break
    return [(n, r) for (n, c, r) in clients]
コード例 #2
0
ファイル: stats.py プロジェクト: Hywan/rank-me
def get_fairness(player, competition):
    """
    Compute the probability of draw against all opponents (ie. how fair is the
    game). Return an OrderedDict of players by score.
    """
    qualities = {}

    own_score = competition.get_score(player)
    opponents = competition.players.exclude(pk=player.pk)
    scores = (competition.scores.filter(
        player__in=opponents).select_related('player'))
    score_by_opponent = {score.player: score for score in scores}

    for opponent in opponents:
        if opponent not in score_by_opponent:
            continue

        score = score_by_opponent[opponent]

        quality = quality_1vs1(Rating(own_score.score, own_score.stdev),
                               Rating(score.score, score.stdev))
        qualities[opponent] = {'score': score, 'quality': quality * 100}

    return OrderedDict(
        sorted(qualities.items(),
               key=lambda t: t[1]['score'].score,
               reverse=True))
コード例 #3
0
ファイル: task.py プロジェクト: bigcrowd/crowdsource-platform
def make_matchups(workers_to_match, project_group_id, review_project, inter_task_review, match_group_id, batch_id):
    matched_workers = []
    for index in xrange(0, len(workers_to_match)):
        if workers_to_match[index] not in matched_workers:
            if len(workers_to_match) - len(matched_workers) == 1:
                is_last_worker = True
                start = 0
            else:
                is_last_worker = False
                start = index + 1
            first_worker = workers_to_match[index]
            first_score = trueskill.Rating(mu=first_worker['score'].mu,
                                           sigma=first_worker['score'].sigma)
            best_quality = 0
            second_worker = None
            is_intertask_match = None
            for j in xrange(start, len(workers_to_match)):
                if is_last_worker or workers_to_match[j] not in matched_workers:
                    second_score = trueskill.Rating(mu=workers_to_match[j]['score'].mu,
                                                    sigma=workers_to_match[j]['score'].sigma)
                    quality = trueskill.quality_1vs1(first_score, second_score)
                    if quality > best_quality:
                        is_intertask_match = False
                        best_quality = quality
                        second_worker = workers_to_match[j]

            if second_worker is not None:
                matched_workers.append(first_worker)
                if not is_intertask_match:
                    matched_workers.append(second_worker)
                create_review_task(first_worker, second_worker, review_project, match_group_id, batch_id)
コード例 #4
0
async def matchmaking(context):
    """Finds a player to play against"""
    discord_id = context.message.author.id
    mention = context.message.author.mention
    try:
        player = Player.objects.get(discord_id=discord_id)
    except Player.DoesNotExist:
        await context.send(
            mention + ' you are not registered in the league!'
            'type ' + BOT.command_prefix +
            'register <jstris nickname> to register in the league')
        return
    # TrueSkill setup
    setup(mu=TRUESKILL_MU,
          sigma=TRUESKILL_SIGMA,
          beta=TRUESKILL_BETA,
          tau=TRUESKILL_TAU)
    if 'scipy' in backends.available_backends():
        # scipy can be used in the current environment
        backends.choose_backend(backend='scipy')
    # Setup the Rating for the asking player
    rating_asking = Rating(mu=player.trueskill_mu,
                           sigma=player.trueskill_sigma)
    players = Player.objects.exclude(discord_id=discord_id)
    best_match = None
    best_quality = 0.0
    for player_match in players:
        player_rating = Rating(mu=player_match.trueskill_mu,
                               sigma=player_match.trueskill_sigma)
        quality = quality_1vs1(rating_asking, player_rating)
        if quality > best_quality:
            best_quality = quality
            best_match = player_match
    await context.send(player.discord_nickname + ' should play against ' +
                       best_match.discord_nickname)
コード例 #5
0
    def get_policy_file_key_weighted_by_skill_similarity(
            self, policy_to_match_with, exclude_keys=None):
        if exclude_keys is None:
            exclude_keys = []

        exclude_keys.append(policy_to_match_with)
        policy_skill_rating, data, games_ranked_on, policy_mixture = self.catalog[
            policy_to_match_with]

        tmp_catalog = self._get_catalog_subset(exclude_keys=exclude_keys)

        all_policy_keys, policy_vals = zip(*tmp_catalog.items())
        all_policy_skills = [v[0] for v in policy_vals]
        weights = [
            quality_1vs1(rating1=policy_skill_rating, rating2=other)
            for other in all_policy_skills
        ]
        weights = np.asarray(weights)
        weights /= sum(weights)

        selected_policy_key = np.random.choice(all_policy_keys, p=weights)

        assert selected_policy_key != policy_to_match_with

        selected_policy_key = self.sample_policy_key_if_key_is_for_mixture(
            selected_policy_key)

        return selected_policy_key
コード例 #6
0
    def win_probability(self, players: List[str]) -> Dict[str, float]:
        """
        Return the win and draw probabilities of two players.

        Take care that the draw probability plus the two win probabilities do
        not sum to 1. The two win probabilities *do* sum to 1, though.

        :players: List of two player names.
        :returns: Dictionary with <players[0]>, <players[1]>, and 'draw' as
          keys and the respective probabilities as values.
        """
        if len(players) != 2:
            raise NotImplementedError(
                "Win probability is only implemented for two players.")
        # Retrieve ratings based on earlier games.
        # Default rating will be used if players have not played earlier.
        true_skills = self.true_skill()
        player_1 = true_skills.get(players[0], Rating())
        player_2 = true_skills.get(players[1], Rating())

        # Calculate the probability of a draw
        draw_probability = quality_1vs1(player_1, player_2)

        # Calculate the win probability of each player using the normal
        # distributions of the two players.
        delta_mu = player_1.mu - player_2.mu
        denominator = np.sqrt(player_1.sigma**2 + player_2.sigma**2)
        player_1_win_chance = self.env.cdf(delta_mu / denominator)
        player_2_win_chance = 1 - player_1_win_chance

        return {
            "draw": draw_probability,
            "player_1": player_1_win_chance,
            "player_2": player_2_win_chance,
        }
コード例 #7
0
ファイル: search.py プロジェクト: yorick-ne/server
 def quality_with(self, opponent):
     if not isinstance(opponent, Player):
         raise TypeError("{} is not a valid player to match with".format(opponent))
     if not getattr(opponent, self.rating_prop):
         return 0
     return quality_1vs1(Rating(*getattr(self.player, self.rating_prop)),
                         Rating(*getattr(opponent, self.rating_prop)))
コード例 #8
0
 def quality_with(self, opponent):
     if not isinstance(opponent, Player):
         raise TypeError("{} is not a valid player to match with".format(opponent))
     if not getattr(opponent, self.rating_prop):
         return 0
     return quality_1vs1(Rating(*self.rating),
                         Rating(*getattr(opponent, self.rating_prop)))
コード例 #9
0
def FindMatch(labratID, battleDog):
    cats = Cats.query.all()
    battleDogTrueSkill = battleDog
    x = (LabRats.query.get_or_404(labratID)).battle_order
    x = x.encode('ascii', 'ignore')
    battleOrder = x.split()
    battleOrder = list(map(int, battleOrder))
    print("battleOrder", battleOrder)
    # lowerLimit = battleDogMean - 2*battleDogDeviation
    # upperLimit = battleDogMean + 2*battleDogDeviation
    bestRating = 99999
    for cat in cats:  # Going through all our cats because do not have many cats, but originally only loop from (mu-2*sigma) to (mu+2*sigma)
        catMean = cat.mean
        catDeviation = cat.deviation
        tempCatTrueSkill = Rating(mu=catMean, sigma=catDeviation)
        tempQuality = quality_1vs1(battleDogTrueSkill, tempCatTrueSkill)
        if cat.id in battleOrder:
            continue
        if (
                abs(tempQuality - 50) < bestRating
        ):  # Finding closest matchup, i.e 50% draw prob. and checking if (i) has been selected already
            bestRating = abs(tempQuality - 50)
            bestCat = cat  # Best Match yet (this is dataclip)
    # Pepega
    print('Battle Dog Mean =',
          battleDogTrueSkill.mu,
          'Battle Dog Sigma =',
          battleDogTrueSkill.sigma,
          file=sys.stderr)
    print('Best Cat =', bestCat.id, ',', bestCat.mean, file=sys.stderr)
    return bestCat
コード例 #10
0
ファイル: task.py プロジェクト: AniruddhIyer/daemo
def make_matchups(workers_to_match, project_group_id, review_project,
                  inter_task_review, match_group_id, batch_id):
    matched_workers = []
    for index in xrange(0, len(workers_to_match)):
        if workers_to_match[index] not in matched_workers:
            if len(workers_to_match) - len(matched_workers) == 1:
                is_last_worker = True
                start = 0
            else:
                is_last_worker = False
                start = index + 1
            first_worker = workers_to_match[index]
            first_score = trueskill.Rating(mu=first_worker['score'].mu,
                                           sigma=first_worker['score'].sigma)
            best_quality = 0
            second_worker = None
            is_intertask_match = None
            for j in xrange(start, len(workers_to_match)):
                if is_last_worker or workers_to_match[j] not in matched_workers:
                    second_score = trueskill.Rating(
                        mu=workers_to_match[j]['score'].mu,
                        sigma=workers_to_match[j]['score'].sigma)
                    quality = trueskill.quality_1vs1(first_score, second_score)
                    if quality > best_quality:
                        is_intertask_match = False
                        best_quality = quality
                        second_worker = workers_to_match[j]

            if second_worker is not None:
                matched_workers.append(first_worker)
                if not is_intertask_match:
                    matched_workers.append(second_worker)
                create_review_task(first_worker, second_worker, review_project,
                                   match_group_id, batch_id)
コード例 #11
0
def _get_match(game_id):
    game = get_object_or_404(Game, pk=game_id)
    rankdiff = int(5 / pow(random(), 0.65))
    bot_list = Bot.objects.filter(game=game).order_by("last_played")
    print(bot_list)
    match = Match()
    match.game = game
    match.bot1 = bot_list[0]
    bot1_id = bot_list[0].id
    bot_list.order_by("-score")
    print(match.bot1)
    print(bot_list)
    opp_list = []
    for i in range(len(bot_list)):
        if bot_list[i].id == bot1_id:
            print(i, len(bot_list))
            opp_list += (bot_list[max(i - rankdiff, 0):i])
            if (i + 1 < len(bot_list)):
                opp_list += (bot_list[i + 1:min(i + rankdiff, len(bot_list))])
            break

    bot1_rate = Rating(mu=match.bot1.mu, sigma=match.bot1.sigma)
    print(opp_list)
    opp_list = sorted(opp_list,
                      key=lambda x: (x.games_played, -quality_1vs1(
                          Rating(mu=x.mu, sigma=x.sigma), bot1_rate)))
    print(opp_list)
    match.bot2 = opp_list[0]
    match.state = 1
    match.save()
    return match
コード例 #12
0
ファイル: skill.py プロジェクト: kjhf/SlappPy
 def calculate_quality_of_game_players(player1: 'Skill',
                                       player2: 'Skill') -> int:
     """
     Calculate the quality of the game, which is the likelihood of the game being a draw (evenly balanced).
     Returns a percentage.
     """
     return int(
         trueskill.quality_1vs1(player1.rating, player2.rating) * 100)
コード例 #13
0
 def quality_with(self, opponent_search: 'Search'):
     if not isinstance(opponent_search.player, Player):
         raise TypeError("{} is not a valid player to match with".format(
             opponent_search.player))
     if opponent_search.raw_rating is None:
         return 0
     return quality_1vs1(Rating(*self.rating),
                         Rating(*opponent_search.rating))
コード例 #14
0
    def choose_opponent(self):
        # Return the opponent with the best drawing probability
        # The drawing prob is given by quality_1vs1(r1,r2)
        games_played = self.player.games_played
        if not self.baseline:
            if self.player.rank_updates == 0:
                self.update_opponent_ranking()

        draw_probabilities = [
            quality_1vs1(self.player.rating, opp.rating)
            for opp in self.parameter_sets
        ]
        sigmas = [opp.rating.sigma for opp in self.parameter_sets]
        if self.verbose:
            rospy.loginfo('Draw probabilities: {}'.format(draw_probabilities))
            rospy.loginfo('Sigmas: {}'.format(sigmas))

        new_opponent_index = np.argmax(draw_probabilities)

        if self.baseline or self.player.games_played < 4:
            # Take greedy decision
            self.opponent_index = new_opponent_index
        else:
            try:
                if new_opponent_index != self.opponent_index:
                    delta_sigma = abs(sigmas[new_opponent_index] -
                                      sigmas[self.opponent_index])
                    delta_treshold = 0.5
                    # Verifiy how much is better the new one
                    delta_p = draw_probabilities[
                        new_opponent_index] - draw_probabilities[
                            self.opponent_index]
                    p_treshold = 0.1
                    if delta_p > p_treshold:
                        delta_sigma = abs(sigmas[new_opponent_index] -
                                          sigmas[self.opponent_index])
                        if delta_sigma > delta_treshold:
                            self.opponent_index = new_opponent_index

                    # delta_sigma = sigmas[new_opponent_index] - sigmas[self.opponent_index]
                    # delta_treshold = 0.2 * self.set_changes
                    # delta_treshold = max(delta_treshold, 0.7)
                    if self.verbose:
                        rospy.loginfo(
                            'Setting a treshold of {}'.format(delta_treshold))
                        rospy.loginfo('Delta sigma: {}'.format(delta_sigma))
                    # if delta_sigma > delta_treshold:
                    #     if self.verbose:
                    #         rospy.loginfo('Changing set..')
                    #     self.set_changes += 1
                    #     self.opponent_index = new_opponent_index
            except Exception as e:
                # First time, we have to inizialize the variable
                self.opponent_index = new_opponent_index

        best_level = self.parameter_sets[self.opponent_index].level_id
        return best_level
コード例 #15
0
def test_compatibility_with_another_rating_systems():
    """All rating system modules should implement ``rate_1vs1`` and
    ``quality_1vs1`` to provide shortcuts for 1 vs 1 simple competition games.
    """
    r1, r2 = Rating(30, 3), Rating(20, 2)
    assert quality_1vs1(r1, r2) == quality([(r1,), (r2,)])
    rated = rate([(r1,), (r2,)])
    assert rate_1vs1(r1, r2) == (rated[0][0], rated[1][0])
    rated = rate([(r1,), (r2,)], [0, 0])
    assert rate_1vs1(r1, r2, drawn=True) == (rated[0][0], rated[1][0])
コード例 #16
0
ファイル: trueskilltest.py プロジェクト: romovpa/trueskill
def test_compatibility_with_another_rating_systems():
    """All rating system modules should implement ``rate_1vs1`` and
    ``quality_1vs1`` to provide shortcuts for 1 vs 1 simple competition games.
    """
    r1, r2 = Rating(30, 3), Rating(20, 2)
    assert quality_1vs1(r1, r2) == quality([(r1, ), (r2, )])
    rated = rate([(r1, ), (r2, )])
    assert rate_1vs1(r1, r2) == (rated[0][0], rated[1][0])
    rated = rate([(r1, ), (r2, )], [0, 0])
    assert rate_1vs1(r1, r2, drawn=True) == (rated[0][0], rated[1][0])
コード例 #17
0
def find_match(player1, agent_pool):
    match_quality = list()
    eligible_opponent = list()
    for agent in agent_pool:
        if agent == player1:
            continue
        else:
            match_quality.append(trueskill.quality_1vs1(player1.rating,
                                                        agent.rating))
            eligible_opponent.append(agent)
    match_quality = np.exp(match_quality)/sum(np.exp(match_quality))
    return np.random.choice(eligible_opponent, p=match_quality)
コード例 #18
0
ファイル: main.py プロジェクト: danlangford/trueskill360
def quality_and_probability(players, names):
    p0, p1 = players[names[0]], players[names[1]]
    quality = trueskill.quality_1vs1(p0, p1)
    probability = arduino_map((1-quality)*100, 0, 100, 50, 100)/100
    ex0, ex1 = trueskill.expose(p0), trueskill.expose(p1)
    if ex0 == ex1:
        favor = None
    elif ex0>ex1:
        favor = names[0]
    else:
        favor = names[1]

    return quality, probability, favor
コード例 #19
0
        def updateDone(value):
            """Callback method for update."""
            p1 = self.getPlayer(user1)
            p2 = self.getPlayer(user2)

            if p1 is None or p2 is None or p1 == p2:
                newDeferred.callback(None)
            else:
                newDeferred.callback((p1.name, p2.name,
                                      trueskill.quality_1vs1(p1.skill,
                                                             p2.skill),
                                      p1.rating, p2.rating))

            self.session.close()
コード例 #20
0
 def get_match_quality(self, match):
   team_a, team_b = match[0], match[1]
   # Two teams in one match, number of rounds ignored
   if len(match[0]) == len(match[1]):
     team_a_skills = [self.get_player_skill(p) for p in team_a]
     team_a_skills = self.__modify_team_sigmas_by_mu_ratio(team_a_skills)
     team_b_skills = [self.get_player_skill(p) for p in team_b]
     team_b_skills = self.__modify_team_sigmas_by_mu_ratio(team_b_skills)
     return quality([team_a_skills, team_b_skills])
   else:
     # Unmatched teams, aggregate team skill and rate as 1vs1
     skill_team_a = self.get_team_skill(team_a)
     skill_team_b = self.get_team_skill(team_b)
     return quality_1vs1(skill_team_a, skill_team_b)
コード例 #21
0
def add_trueskill(df, users_rating, questions_rating, update=True):
    # -----------------------------------------------------------------------
    users_mean = np.zeros(len(df), dtype=np.float32)
    users_var = np.zeros(len(df), dtype=np.float32)
    questions_mean = np.zeros(len(df), dtype=np.float32)
    questions_var = np.zeros(len(df), dtype=np.float32)
    win_prob = np.zeros(len(df), dtype=np.float32)
    quality = np.zeros(len(df), dtype=np.float32)

    for num, row in enumerate(
            tqdm(df[['user_id', 'content_id', 'answered_correctly']].values)):

        users_mean[num] = users_rating[row[0]].mu
        users_var[num] = users_rating[row[0]].sigma

        questions_mean[num] = questions_rating[row[1]].mu
        questions_var[num] = questions_rating[row[1]].sigma

        win_prob[num] = win_probability(users_rating[row[0]],
                                        questions_rating[row[1]])
        quality[num] = quality_1vs1(users_rating[row[0]],
                                    questions_rating[row[1]])

        if update is True:

            old_user_rating = users_rating[row[0]]
            old_question_rating = questions_rating[row[1]]
            if row[2] == 1:
                new_user_rating, new_question_rating = rate_1vs1(
                    old_user_rating, old_question_rating)
            if row[2] == 0:
                new_question_rating, new_user_rating = rate_1vs1(
                    old_question_rating, old_user_rating)

            users_rating[row[0]] = new_user_rating
            questions_rating[row[1]] = new_question_rating

    user_df = pd.DataFrame({
        'u_mu': users_mean,
        'u_sigma': users_var,
        'q_mu': questions_mean,
        'q_sigma': questions_var,
        'win_prob': win_prob,
        'quality': quality
    })

    df = pd.concat([df, user_df], axis=1)
    return df
コード例 #22
0
    def choose_opponent(self):
        # Return the opponent with the best drawing probability
        # The drawing prob is given by quality_1vs1(r1,r2)
        games_played = self.player.games_played
        if not self.baseline:
            if self.player.rank_updates == 0:
                self.update_opponent_ranking()
        # player_rating = Rating(self.player_row[3], self.player_row[4])
        # opponent_ratings = [Rating(opp[3], opp[4]) for opp in self.opponent_rows]

        draw_probabilities = [
            quality_1vs1(self.player.rating, opp.rating)
            for opp in self.parameter_sets
        ]
        sigmas = [opp.rating.sigma for opp in self.parameter_sets]
        if self.verbose:
            print('Draw probabilities: {}'.format(draw_probabilities))
            print('Sigmas: {}'.format(sigmas))

        new_opponent_index = np.argmax(draw_probabilities)

        if self.baseline:
            # Take greedy decision
            self.opponent_index = new_opponent_index
        else:
            try:
                if new_opponent_index != self.opponent_index:
                    delta_sigma = abs(sigmas[new_opponent_index] -
                                      sigmas[self.opponent_index])
                    delta_treshold = 0.5
                    # delta_sigma = sigmas[new_opponent_index] - sigmas[self.opponent_index]
                    # delta_treshold = 0.2 * self.set_changes
                    # delta_treshold = max(delta_treshold, 0.7)
                    if self.verbose:
                        print(
                            'Setting a treshold of {}'.format(delta_treshold))
                        print('Delta sigma: {}'.format(delta_sigma))
                    if delta_sigma > delta_treshold:
                        if self.verbose:
                            print('Changing set..')
                        self.set_changes += 1
                        self.opponent_index = new_opponent_index
            except Exception as e:
                # First time, we have to inizialize the variable
                self.opponent_index = new_opponent_index

        best_level = self.parameter_sets[self.opponent_index].level_id
        return best_level
コード例 #23
0
    def start_session(self):
        self.get_current_session().start()

        session_players = self.get_current_session().get_players()

        # Set the start time of the current session in remote
        self.get_db().child('current_session').update({
            'session_started':
            self.get_current_session().start_time.isoformat(),
            'trueskill_quality':
            quality_1vs1(session_players[0].get_trueskill_rating(),
                         session_players[1].get_trueskill_rating())
        })

        # Send a notification to listeners
        for game_listener in self.game_listeners:
            game_listener.on_start_session(
                self.get_current_session().get_players())
コード例 #24
0
        def updateDone(value):
            """Callback method for update."""
            player = self.getPlayer(user)
            if player is None:
                newDeferred.callback(None)
            else:
                players = (self.session.query(Player)
                           .filter(Player.pid != player.pid)
                           .all())

                best = [(other.name, trueskill.quality_1vs1(player.skill,
                                                            other.skill))
                        for other in players]

                best = sorted(best, key=lambda p: p[1], reverse=True)
                best_names = [p[0] for p in best[0:n]]
                newDeferred.callback((player.name, best_names))

            self.session.close()
コード例 #25
0
    def choose_opponent(self):
        robotic_players_rows = self.db.get_all_robotic_players(
            self.current_player_record[0])
        robotic_players = []
        for robotic_players_row in robotic_players_rows:
            robotic_players.append(
                trueskill.Rating(robotic_players_row[3],
                                 robotic_players_row[4]))

        draw_probabilities = []

        for robotic_player in robotic_players:
            draw_probabilities.append(
                trueskill.quality_1vs1(self.current_player_rating,
                                       robotic_player))

        if np.random.uniform() < (0.2 / (self.changes + 1)):
            best_index = np.random.choice(range(3))
            self.changes += 1
        else:
            choice_made = False
            for i, rp in enumerate(robotic_players):
                sum_sigma = 0
                for j, rp2 in enumerate(robotic_players):
                    if j != i:
                        sum_sigma += rp2.sigma

                if robotic_players[i].sigma >= sum_sigma:
                    choice_made = True
                    best_index = i
                    break
            # Seems this is overriding the choice made during the previous loops
            if not choice_made:
                best_index = np.argmax(draw_probabilities)

        best_robotic_player_id = robotic_players_rows[best_index][
            2]  # Get level id
        self.set_current_robotic_player(robotic_players_rows[best_index][0])
        return best_robotic_player_id
コード例 #26
0
 async def crladder_quality(self, ctx, name, member1: discord.Member, member2: discord.Member = None):
     """1v1 Drawing chance."""
     author = ctx.message.author
     server = ctx.message.server
     if member2 is None:
         pm1 = author
         pm2 = member1
     else:
         pm1 = member1
         pm2 = member2
     try:
         series = self.settings.get_series_by_name(server, name)
         if series is None:
             raise NoSuchSeries
         p1 = None
         p2 = None
         for player in series['players']:
             if str(player['discord_id']) == pm1.id:
                 p1 = player
             if str(player['discord_id']) == pm2.id:
                 p2 = player
         if p1 is None:
             raise NoSuchPlayer
         if p2 is None:
             raise NoSuchPlayer
     except NoSuchSeries:
         await self.bot.say("No series with that name on this server.")
     except NoSuchPlayer:
         await self.bot.say("Player not found.")
     else:
         p1_rating = env.create_rating(mu=p1['rating']['mu'], sigma=p1['rating']['sigma'])
         p2_rating = env.create_rating(mu=p2['rating']['mu'], sigma=p2['rating']['sigma'])
         await self.bot.say(
             "If {} plays against {}, "
             "there is a {:.1%} chance to draw.".format(
                 pm1, pm2, quality_1vs1(p1_rating, p2_rating)
             )
         )
コード例 #27
0
    def get_voting(self, user_id):
        """
        Get two entries to vote using sophisticated algorithm.
        :return: Tuple of two entries if conditions are met. Otherwise false.
        """
        entries = [e for e in self.get_entries() if e.user_id != user_id]

        user = User.get_or_404(user_id=user_id)
        if len(entries) >= 2:
            entry1 = random.choice(entries)
            entry2 = random.choice(entries)
            rating1 = entry1.rating
            rating2 = entry2.rating
            impatience = 0
            while entry1.id == entry2.id or quality_1vs1(rating1, rating2) < 0.3 - impatience or \
                    (user.is_voted(entry1.battle_id, entry1.id, entry2.id) and impatience < 1):  # FIXME
                entry1 = random.choice(entries)
                entry2 = random.choice(entries)
                impatience += 0.01
            if not user.is_voted(entry1.battle_id, entry1.id,
                                 entry2.id) and not entry1.id == entry2.id:
                return entry1, entry2
        return None
コード例 #28
0
def get_matchup(participant_team_ids, mmr):
    # Choose the first player randomly.
    player1_id = np.random.choice(participant_team_ids, size=1, replace=False)

    # Compute the probability the chosen player should face the other remaining teams. Compute the probabilities
    # such that players closer to each other in MMR are more likely to play each other.
    other_players = list(participant_team_ids)
    other_players.remove(player1_id)

    # The faceoff probability is computed based on the 'quality_1vs1' method of the trueskill module. Perfectly
    # matched MMRs return a match quality of ~0.44, and sharply falls off to 0 as the MMRs diverge. To avoid having
    # the same teams play against each other continuously, a bias of ~0.1 is added to the weighting of each opponent.
    faceoff_probabilities = np.array([
        0.10 + quality_1vs1(mmr[int(player1_id)], mmr[int(x)])
        for x in other_players
    ])
    faceoff_probabilities /= faceoff_probabilities.sum()
    player2_id = np.random.choice(other_players,
                                  size=1,
                                  replace=False,
                                  p=faceoff_probabilities)
    player1_id, player2_id = sorted([int(player1_id), int(player2_id)])

    return player1_id, player2_id
コード例 #29
0
    def calculate_matchup_probabilities(self):
        # Calculate [P(matchup), ...] for every entrant.
        matchup_probs_by_entrant = {
        }  # name: ([opp_name...], [matchup_prob...])
        for name, entrant in self.entrants.items():
            opp_names = []
            p_draws = []

            # Get P(draw | opp) for every opp
            for opp_name, opp_entrant in self.entrants.items():
                if name == opp_name:
                    continue
                opp_names.append(opp_name)
                p_draw = quality_1vs1(entrant.skill_rating,
                                      opp_entrant.skill_rating)
                p_draws.append(p_draw)

            # Normalize by sum(p_draw)
            p_draw_sum = sum(p_draws)
            matchup_probs = [x / p_draw_sum for x in p_draws]

            # Stash
            matchup_probs_by_entrant[name] = (opp_names, matchup_probs)
        return matchup_probs_by_entrant
コード例 #30
0
ファイル: task.py プロジェクト: bigcrowd/crowdsource-platform
def generate_matches(task_worker_ids, review_project, is_inter_task, match_group):
    cursor = connection.cursor()
    # noinspection SqlResolve
    query = '''
        SELECT
          tw.id,
          tw.worker_id,
          coalesce(match_workers.mu, 25.0) mu,
          coalesce(match_workers.sigma, 8.333) sigma,
          u.username,
          tw.task_id
        FROM crowdsourcing_taskworker tw
            INNER JOIN auth_user u ON u.id = tw.worker_id
          LEFT OUTER JOIN (
                            SELECT *
                            FROM (
                                   SELECT
                                     max_mw.project_group_id,
                                     max_mw.worker_id,
                                     mw.sigma,
                                     mw.mu,
                                     tw.id task_worker_id
                                   FROM crowdsourcing_matchworker mw
                                    INNER JOIN crowdsourcing_match m ON m.id = mw.match_id
                                     INNER JOIN crowdsourcing_taskworker tw ON tw.id = mw.task_worker_id
                                     INNER JOIN crowdsourcing_task t ON t.id = tw.task_id
                                     INNER JOIN crowdsourcing_project p ON p.id = t.project_id
                                     INNER JOIN
                                     (SELECT
                                        p.group_id           project_group_id,
                                        tw.worker_id,
                                        max(m.submitted_at) submitted_at
                                      FROM crowdsourcing_matchworker mw
                                        INNER JOIN crowdsourcing_match m ON m.id = mw.match_id
                                        INNER JOIN crowdsourcing_taskworker tw ON tw.id = mw.task_worker_id
                                        INNER JOIN crowdsourcing_task t ON t.id = tw.task_id
                                        INNER JOIN crowdsourcing_project p ON p.id = t.project_id
                                      GROUP BY p.group_id, tw.worker_id) max_mw
                                       ON max_mw.project_group_id = p.group_id AND max_mw.worker_id = tw.worker_id AND
                                          max_mw.submitted_at = m.submitted_at
                                 ) mw

                          ) match_workers ON match_workers.task_worker_id = tw.id
        WHERE tw.id = ANY(%(ids)s);
    '''
    cursor.execute(query, {'ids': task_worker_ids})
    worker_scores = cursor.fetchall()
    match_workers = []
    newly_matched = []
    if not is_inter_task:  # TODO add inter task support later
        to_match = {}
        for worker_score in worker_scores:
            task_id = str(worker_score[5])
            if task_id not in to_match:
                to_match[task_id] = []
            to_match[task_id].append(worker_score)

        for task_id in to_match:
            length = len(to_match[task_id])
            for i, worker_score in enumerate(to_match[task_id]):
                if worker_score in newly_matched:
                    continue
                score_one = worker_score

                rating_one = trueskill.Rating(mu=score_one[2], sigma=score_one[3])
                best_quality = 0
                score_two = None
                for inner_ws in to_match[task_id]:
                    if inner_ws != score_one and \
                        (inner_ws not in newly_matched or (length - 1 == i and i % 2 == 0)) and \
                            score_one[1] != inner_ws[1] and score_one[5] == inner_ws[5]:
                        rating_two = trueskill.Rating(mu=inner_ws[2], sigma=inner_ws[3])
                        match_quality = trueskill.quality_1vs1(rating_one, rating_two)
                        if match_quality > best_quality:
                            best_quality = match_quality
                            score_two = inner_ws
                if score_two is not None:
                    newly_matched.append(score_one)
                    newly_matched.append(score_two)
                    task = Task.objects.create(
                        data={"task_workers": [{'username': score_one[4], 'task_worker': score_one[0]},
                                               {'username': score_two[4], 'task_worker': score_two[0]}]},
                        batch_id=match_group.batch_id, project_id=review_project.id, min_rating=1.99)
                    task.group_id = task.id
                    task.save()
                    match = Match.objects.create(group=match_group, task=task)
                    match_workers.append(
                        MatchWorker(match=match, task_worker_id=score_one[0], old_mu=score_one[2],
                                    old_sigma=score_one[3])
                    )
                    match_workers.append(
                        MatchWorker(match=match, task_worker_id=score_two[0], old_mu=score_two[2],
                                    old_sigma=score_two[3])
                    )
    MatchWorker.objects.bulk_create(match_workers)
    # Task.objects.bulk_create(review_tasks)
    return [s[0] for s in newly_matched]
コード例 #31
0
ファイル: task.py プロジェクト: AniruddhIyer/daemo
def generate_matches(task_worker_ids, review_project, is_inter_task,
                     match_group):
    cursor = connection.cursor()
    # noinspection SqlResolve
    query = '''
        SELECT
          tw.id,
          tw.worker_id,
          coalesce(match_workers.mu, 25.0) mu,
          coalesce(match_workers.sigma, 8.333) sigma,
          u.username,
          tw.task_id
        FROM crowdsourcing_taskworker tw
            INNER JOIN auth_user u ON u.id = tw.worker_id
          LEFT OUTER JOIN (
                            SELECT *
                            FROM (
                                   SELECT
                                     max_mw.project_group_id,
                                     max_mw.worker_id,
                                     mw.sigma,
                                     mw.mu,
                                     tw.id task_worker_id
                                   FROM crowdsourcing_matchworker mw
                                    INNER JOIN crowdsourcing_match m ON m.id = mw.match_id
                                     INNER JOIN crowdsourcing_taskworker tw ON tw.id = mw.task_worker_id
                                     INNER JOIN crowdsourcing_task t ON t.id = tw.task_id
                                     INNER JOIN crowdsourcing_project p ON p.id = t.project_id
                                     INNER JOIN
                                     (SELECT
                                        p.group_id           project_group_id,
                                        tw.worker_id,
                                        max(m.submitted_at) submitted_at
                                      FROM crowdsourcing_matchworker mw
                                        INNER JOIN crowdsourcing_match m ON m.id = mw.match_id
                                        INNER JOIN crowdsourcing_taskworker tw ON tw.id = mw.task_worker_id
                                        INNER JOIN crowdsourcing_task t ON t.id = tw.task_id
                                        INNER JOIN crowdsourcing_project p ON p.id = t.project_id
                                      GROUP BY p.group_id, tw.worker_id) max_mw
                                       ON max_mw.project_group_id = p.group_id AND max_mw.worker_id = tw.worker_id AND
                                          max_mw.submitted_at = m.submitted_at
                                 ) mw

                          ) match_workers ON match_workers.task_worker_id = tw.id
        WHERE tw.id = ANY(%(ids)s);
    '''
    cursor.execute(query, {'ids': task_worker_ids})
    worker_scores = cursor.fetchall()
    match_workers = []
    newly_matched = []
    if not is_inter_task:  # TODO add inter task support later
        to_match = {}
        for worker_score in worker_scores:
            task_id = str(worker_score[5])
            if task_id not in to_match:
                to_match[task_id] = []
            to_match[task_id].append(worker_score)

        for task_id in to_match:
            length = len(to_match[task_id])
            for i, worker_score in enumerate(to_match[task_id]):
                if worker_score in newly_matched:
                    continue
                score_one = worker_score

                rating_one = trueskill.Rating(mu=score_one[2],
                                              sigma=score_one[3])
                best_quality = 0
                score_two = None
                for inner_ws in to_match[task_id]:
                    if inner_ws != score_one and \
                        (inner_ws not in newly_matched or (length - 1 == i and i % 2 == 0)) and \
                            score_one[1] != inner_ws[1] and score_one[5] == inner_ws[5]:
                        rating_two = trueskill.Rating(mu=inner_ws[2],
                                                      sigma=inner_ws[3])
                        match_quality = trueskill.quality_1vs1(
                            rating_one, rating_two)
                        if match_quality > best_quality:
                            best_quality = match_quality
                            score_two = inner_ws
                if score_two is not None:
                    newly_matched.append(score_one)
                    newly_matched.append(score_two)
                    task = Task.objects.create(data={
                        "task_workers": [{
                            'username': score_one[4],
                            'task_worker': score_one[0]
                        }, {
                            'username': score_two[4],
                            'task_worker': score_two[0]
                        }]
                    },
                                               batch_id=match_group.batch_id,
                                               project_id=review_project.id,
                                               min_rating=1.99)
                    task.group_id = task.id
                    task.save()
                    match = Match.objects.create(group=match_group, task=task)
                    match_workers.append(
                        MatchWorker(match=match,
                                    task_worker_id=score_one[0],
                                    old_mu=score_one[2],
                                    old_sigma=score_one[3]))
                    match_workers.append(
                        MatchWorker(match=match,
                                    task_worker_id=score_two[0],
                                    old_mu=score_two[2],
                                    old_sigma=score_two[3]))
    MatchWorker.objects.bulk_create(match_workers)
    # Task.objects.bulk_create(review_tasks)
    return [s[0] for s in newly_matched]
コード例 #32
0
ファイル: quality.py プロジェクト: daggaz/pool
def quality(player_1, player_2):
    return int(quality_1vs1(player_1.rating, player_2.rating)*100)
コード例 #33
0
## Estimate Suggested random pairing

from random import sample
from random import random

print('\n'.join(
    list(map(lambda i: str(i) + " -> " + teams[i], list(range(len(teams)))))))
toPlay = list(range(len(teams))) * 2
toPlay = [1, 2, 3, 4, 5, 6, 8, 9, 10, 11] * 2
matches = []

while len(toPlay) > 2:
    # pick two teams
    pair = sample(toPlay, 2)
    if  pair[0] != pair[1] and \
        random() <= quality_1vs1(ratings[pair[0]], ratings[pair[1]]) :
        matches.append([
            teams[pair[0]], teams[pair[1]],
            '{:.1%}'.format(quality_1vs1(ratings[pair[0]], ratings[pair[1]]))
        ])
        toPlay.remove(pair[0])
        toPlay.remove(pair[1])

# save matches

import csv

with open('next_matches.csv', 'w', newline='') as csvfile:
    writer = csv.writer(csvfile, delimiter=',')
    for pairing in matches:
        writer.writerow(pairing)
コード例 #34
0
ファイル: __main__.py プロジェクト: Tjorriemorrie/ufc
def main(hyper_params, train=0):
    logger.info('Starting main training')

    hyper_params = list(hyper_params)
    # bet_multi_param = int(round(hyper_params.pop(0)))
    bet_multi_param = -14

    all_data = DATA_2018_07 + DATA_2018_08 + DATA_2018_09 + DATA_2018_10 + \
               DATA_2019_01 + DATA_2019_02 + DATA_2019_03 + DATA_2019_04 + DATA_2019_05 + DATA_2019_06 + \
               DATA_2019_07 + DATA_2019_08 + DATA_2019_09 + DATA_2019_10 + DATA_2019_11 + \
               DATA_2020_01 + DATA

    # bet_sfcr_a, bet_sfcr_b,
    bet_sfcr_a = 10.007218390537238
    bet_sfcr_b = -5.501752363680982

    # bet_age_a, bet_age_b, bet_spd_a, bet_spd_b,
    bet_age_a = -1.8635234765777555
    bet_age_b = 24.92516053266948
    bet_spd_a = -22.450915143817532
    bet_spd_b = 50.81331907162685

    # bet_setr_a, bet_setr_b, bet_drsw_a, bet_drsw_b, bet_tiel_a, bet_tiel_b = hyper_params
    bet_setr_a = -18.420335662272393
    bet_setr_b = -3.0849265357219013
    bet_drsw_a = -13.931397245705076
    bet_drsw_b = 8.617939186326849
    bet_tiel_a = -14.597410948570172
    bet_tiel_b = 18.506415802460246

    # bet_wnlr_a, bet_wnlr_b, bet_upsw_a, bet_upsw_b, bet_drs_a, bet_drs_b = hyper_params
    bet_wnlr_a = 0.5759959382580377
    bet_wnlr_b = -3.149020744259276
    bet_upsw_a = -6.916534460758778
    bet_upsw_b = 4.136262175260056
    bet_drs_a = -5.068401842647043
    bet_drs_b = -11.036583332479655

    # bet_gms_a, bet_gms_b, bet_drsl_a, bet_drsl_b, bet_tmi_a, bet_tmi_b = hyper_params
    bet_gms_a = -1.1199718460907597
    bet_gms_b = -4.975264460039275
    bet_drsl_a = -3.5193793439419188
    bet_drsl_b = 8.157513638414523
    bet_tmi_a = -1.6290341203972258
    bet_tmi_b = 11.182173806372235

    # bet_wnlw_a, bet_wnlw_b, bet_setw_a, bet_setw_b, bet_setl_a, bet_setl_b = hyper_params
    bet_wnlw_a = -15.125372111294066
    bet_wnlw_b = -22.583131894175065
    bet_setw_a = 24.489629815162466
    bet_setw_b = -32.34158251098889
    bet_setl_a = -12.946297107133
    bet_setl_b = 6.99071560946205

    # bet_tier_a, bet_tier_b, bet_upsl_a, bet_upsl_b, bet_ts_a, bet_ts_b = hyper_params
    bet_tier_a = 76.69376718420321
    bet_tier_b = 0.22499021217210005
    bet_upsl_a = 50.652313466232535
    bet_upsl_b = -54.1700508996346
    bet_ts_a = -41.55281708232367
    bet_ts_b = 15.054725376991598

    # bet_upsr_a, bet_upsr_b, bet_sfcw_a, bet_sfcw_b, bet_wnll_a, bet_wnll_b = hyper_params
    bet_upsr_a = 1.7388898924369287
    bet_upsr_b = -3.651644175622176
    bet_sfcw_a = 4.708276194896277
    bet_sfcw_b = 2.837036795184313
    bet_wnll_a = 6.390115360073748
    bet_wnll_b = -5.559582537604789

    # bet_tma_a, bet_tma_b, bet_lati_a, bet_lati_b, bet_tiew_a, bet_tiew_b = hyper_params
    bet_tma_a = 8.508661586868248
    bet_tma_b = -0.9753566485775689
    bet_lati_a = 14.255555245990033
    bet_lati_b = 0.31998644244105373
    bet_tiew_a = -1.1450370999467043
    bet_tiew_b = -3.4693811126912357

    # init
    start_date = None
    ratings = defaultdict(lambda: Rating())
    wins_losses = defaultdict(lambda: [])
    doors = defaultdict(lambda: [])
    surfaces = defaultdict(lambda: [])
    speeds = defaultdict(lambda: [100, 0])
    sets = defaultdict(lambda: [])
    games = defaultdict(lambda: [0])
    ties = defaultdict(lambda: [])
    last_ties = defaultdict(lambda: [])
    upsets = defaultdict(lambda: [])
    payouts = []
    bet_amts = []
    accuracy = []
    trueskill = []
    matches = 0
    tab = []
    tab_amts = []
    actual = (0, 0)
    actual_debug = []
    bet_multis = []
    bet_multis_cat = []

    # loop through scenes
    for i, event in enumerate(all_data):
        bet_size = 1
        if not start_date:
            start_date = datetime.strptime(event['date'], '%Y-%m-%d')
        logger.info('')
        logger.info(f'{event["date"]} {event["location"]["name"]}')

        for match in event['matches']:
            # skip if no odds:
            if 'odds' not in match:
                continue

            p1, p2 = match['players']

            # odds data
            p1_odds = match['odds'][p1]
            p2_odds = match['odds'][p2]
            if not -40 < p1_odds < 40 or not -40 < p2_odds < 40:
                raise ValueError(
                    f'surely these odds are wrong? {p1_odds} {p2_odds}')

            # trueskill data
            p1_ts = ratings[p1].mu
            p2_ts = ratings[p2].mu
            p1_sigma = ratings[p1].sigma
            p2_sigma = ratings[p2].sigma

            # trueskill min data
            p1_ts_min = p1_ts - p1_sigma * 2
            p2_ts_min = p2_ts - p2_sigma * 2

            # trueskill max data
            p1_ts_max = p1_ts + p1_sigma * 2
            p2_ts_max = p2_ts + p2_sigma * 2

            # trueskill match quality
            ts_quality = quality_1vs1(ratings[p1], ratings[p2])

            # wins losses
            p1_wins_losses = Counter(wins_losses[p1])
            p1_wins = p1_wins_losses[1]
            p1_losses = p1_wins_losses[-1]
            p1_wnl_winrate = p1_wins / max(1, len(wins_losses[p1]))
            p2_wins_losses = Counter(wins_losses[p2])
            p2_wins = p2_wins_losses[1]
            p2_losses = p2_wins_losses[-1]
            p2_wnl_winrate = p2_wins / max(1, len(wins_losses[p2]))

            # outdoors
            match_door = event['location']['outdoor']
            p1_doors = Counter(doors[p1])
            p1_doors_wins = p1_doors[match_door]
            p1_doors_losses = p1_doors[-match_door]
            p1_doors_winrate = p1_doors_wins / max(1, len(doors[p1]))
            p2_doors = Counter(doors[p2])
            p2_doors_wins = p2_doors[match_door]
            p2_doors_losses = p2_doors[-match_door]
            p2_doors_winrate = p2_doors_wins / max(1, len(doors[p2]))

            # surface
            match_surface = event['location']['surface']
            p1_surface = Counter(surfaces[p1])
            p1_surface_wins = p1_surface[match_surface]
            p1_surface_losses = p1_surface[-match_surface]
            p1_surface_winrate = p1_surface_wins / max(
                1, p1_surface_wins + p1_surface_losses)
            p2_surface = Counter(surfaces[p2])
            p2_surface_wins = p2_surface[match_surface]
            p2_surface_losses = p2_surface[-match_surface]
            p2_surface_winrate = p2_surface_wins / max(
                1, p2_surface_wins + p2_surface_losses)

            # speed
            match_speed = event['location']['speed']
            p1_speed_prs = [(abs(v), 1 if v > 0 else -1) for v in speeds[p1]]
            p1_speed_lin = linregress([v[0] for v in p1_speed_prs],
                                      [v[1] for v in p1_speed_prs])
            p2_speed_prs = [(abs(v), 1 if v > 0 else -1) for v in speeds[p2]]
            p2_speed_lin = linregress([v[0] for v in p2_speed_prs],
                                      [v[1] for v in p2_speed_prs])

            # sets
            p1_sets = Counter(sets[p1])
            p1_sets_wins = p1_sets[1]
            p1_sets_losses = p1_sets[-1]
            p1_sets_winrate = p1_sets[1] / max(1, len(sets[p1]))
            p2_sets = Counter(sets[p2])
            p2_sets_wins = p2_sets[1]
            p2_sets_losses = p2_sets[-1]
            p2_sets_winrate = p2_sets[1] / max(1, len(sets[p2]))

            # games
            p1_gms_avg = np.average(games[p1])
            p2_gms_avg = np.average(games[p2])

            # ties
            p1_ties = Counter(ties[p1])
            p1_ties_wins = p1_ties[1]
            p1_ties_losses = p1_ties[-1]
            p1_ties_winrate = p1_ties[1] / max(1, len(ties[p1]))
            p2_ties = Counter(ties[p2])
            p2_ties_wins = p2_ties[1]
            p2_ties_losses = p2_ties[-1]
            p2_ties_winrate = p2_ties[1] / max(1, len(ties[p2]))

            # last ties
            p1_lati = Counter(last_ties[p1])
            p1_lati_winrate = p1_lati[1] / max(1, len(last_ties[p1]))
            p2_lati = Counter(last_ties[p2])
            p2_lati_winrate = p2_lati[1] / max(1, len(last_ties[p2]))

            # upsets
            p1_upsets = Counter(upsets[p1])
            p1_upsets_wins = p1_upsets[1]
            p1_upsets_losses = p1_upsets[-1]
            p1_upsets_win_avg = p1_upsets[1] / max(1, len(upsets[p1]))
            p2_upsets = Counter(upsets[p2])
            p2_upsets_wins = p2_upsets[1]
            p2_upsets_losses = p2_upsets[-1]
            p2_upsets_win_avg = p2_upsets[1] / max(1, len(upsets[p2]))

            # age
            p1_age = get_age_months(p1)
            p2_age = get_age_months(p2)

            #########################################
            # update here as next sections can skip ahead
            if 'score' in match:

                # update ratings
                ratings[p1], ratings[p2] = rate_1vs1(ratings[p1], ratings[p2])

                # update wins losses
                wins_losses[p1] += [1]
                wins_losses[p2] += [-1]

                # update sets
                sets[p1] += [1 if v[0] > v[1] else -1 for v in match['score']]
                sets[p2] += [1 if v[1] > v[0] else -1 for v in match['score']]

                # update games
                games[p1] += [sum(v[0] - v[1] for v in match['score'])]
                games[p2] += [sum(v[1] - v[0] for v in match['score'])]

                # update doors
                doors[p1] += [match_door]
                doors[p2] += [-match_door]

                # update surface
                surfaces[p1] += [match_surface]
                surfaces[p2] += [-match_surface]

                # update speeds
                speeds[p1] += [match_speed]
                speeds[p2] += [-match_speed]

                # update ties
                ties[p1] += [
                    1 if v[0] == 7 else -1 for v in match['score']
                    if 7 in v and 6 in v
                ]
                ties[p2] += [
                    1 if v[1] == 7 else -1 for v in match['score']
                    if 7 in v and 6 in v
                ]

                # update last ties
                if match['score'] and 7 in match['score'][-1] and 6 in match[
                        'score'][-1]:
                    last_ties[p1].append(1 if match['score'][-1][0] ==
                                         7 else -1)
                    last_ties[p2].append(1 if match['score'][-1][1] ==
                                         7 else -1)

                # update upsets
                upset = p2_odds < p1_odds
                upsets[p1] += [1 if upset else 0]
                upsets[p2] += [-1 if upset else 0]

            if train and random() > 0.80:
                continue
            matches += 1

            log_odds = f'[{p1_odds:.2f} vs {p2_odds:.2f}]'
            log_trueskill = f'[{p1_ts:.0f}.{p1_sigma:.0f} vs {p2_ts:.0f}.{p2_sigma:.0f}]'

            ###############################
            # bet scaling
            bet_multi = bet_multi_param

            # trueskill mu
            if p1_odds < p2_odds:
                f_ts = p1_ts - p2_ts
            else:
                f_ts = p2_ts - p1_ts
            bet_multi = up_multi_bet(bet_multi, bet_multis_cat,
                                     [bet_ts_a, bet_ts_b], f_ts, 'ts')

            # trueskill min
            if p1_odds < p2_odds:
                f_ts_min = p1_ts_min - p2_ts_min
            else:
                f_ts_min = p2_ts_min - p1_ts_min
            bet_multi = up_multi_bet(bet_multi, bet_multis_cat,
                                     [bet_tmi_a, bet_tmi_b], f_ts_min, 'tmi')

            # trueskill max
            if p1_odds < p2_odds:
                f_ts_max = p1_ts_max - p2_ts_max
            else:
                f_ts_max = p2_ts_max - p1_ts_max
            bet_multi = up_multi_bet(bet_multi, bet_multis_cat,
                                     [bet_tma_a, bet_tma_b], f_ts_max, 'tma')

            # wins and losses wins
            if p1_odds < p2_odds:
                p_wnlw = p1_wins - p2_wins
            else:
                p_wnlw = p2_wins - p1_wins
            bet_multi = up_multi_bet(bet_multi, bet_multis_cat,
                                     [bet_wnlw_a, bet_wnlw_b], p_wnlw, 'wnlw')

            # wins and losses lost
            if p1_odds < p2_odds:
                p_wnll = p2_losses - p1_losses
            else:
                p_wnll = p1_losses - p2_losses
            bet_multi = up_multi_bet(bet_multi, bet_multis_cat,
                                     [bet_wnll_a, bet_wnll_b], p_wnll, 'wnll')

            # wins and losses winrate
            if p1_odds < p2_odds:
                p_wnlr = p1_wnl_winrate - p2_wnl_winrate
            else:
                p_wnlr = p2_wnl_winrate - p1_wnl_winrate
            bet_multi = up_multi_bet(bet_multi, bet_multis_cat,
                                     [bet_wnlr_a, bet_wnlr_b], p_wnlr, 'wnlr')

            # doors wins
            if p1_odds < p2_odds:
                p_drsw = p1_doors_wins - p2_doors_wins
            else:
                p_drsw = p2_doors_wins - p1_doors_wins
            bet_multi = up_multi_bet(bet_multi, bet_multis_cat,
                                     [bet_drsw_a, bet_drsw_b], p_drsw, 'drsw')

            # doors losses
            if p1_odds < p2_odds:
                p_drsl = p2_doors_losses - p1_doors_losses
            else:
                p_drsl = p1_doors_losses - p2_doors_losses
            bet_multi = up_multi_bet(bet_multi, bet_multis_cat,
                                     [bet_drsl_a, bet_drsl_b], p_drsl, 'drsl')

            # doors winrate
            if p1_odds < p2_odds:
                p_drs = p1_doors_winrate - p2_doors_winrate
            else:
                p_drs = p2_doors_winrate - p1_doors_winrate
            bet_multi = up_multi_bet(bet_multi, bet_multis_cat,
                                     [bet_drs_a, bet_drs_b], p_drs, 'drs')

            # surface wins
            if p1_odds < p2_odds:
                p_sfcw = p1_surface_wins - p2_surface_wins
            else:
                p_sfcw = p2_surface_wins - p1_surface_wins
            bet_multi = up_multi_bet(bet_multi, bet_multis_cat,
                                     [bet_sfcw_a, bet_sfcw_b], p_sfcw, 'sfcw')

            # surface winrate
            if p1_odds < p2_odds:
                p_sfcr = p1_surface_winrate - p2_surface_winrate
            else:
                p_sfcr = p2_surface_winrate - p1_surface_winrate
            bet_multi = up_multi_bet(bet_multi, bet_multis_cat,
                                     [bet_sfcr_a, bet_sfcr_b], p_sfcr, 'sfcr')

            # speed
            p1_speed = p1_speed_lin.intercept + p1_speed_lin.slope * match_speed
            p2_speed = p2_speed_lin.intercept + p2_speed_lin.slope * match_speed
            if p1_odds < p2_odds:
                p_spd = p1_speed - p2_speed
            else:
                p_spd = p2_speed - p1_speed
            bet_multi = up_multi_bet(bet_multi, bet_multis_cat,
                                     [bet_spd_a, bet_spd_b], p_spd, 'spd')

            # sets wins
            if p1_odds < p2_odds:
                p_setw = p1_sets_wins - p2_sets_wins
            else:
                p_setw = p2_sets_wins - p1_sets_wins
            bet_multi = up_multi_bet(bet_multi, bet_multis_cat,
                                     [bet_setw_a, bet_setw_b], p_setw, 'setw')

            # sets losses
            if p1_odds < p2_odds:
                p_setl = p2_sets_losses - p1_sets_losses
            else:
                p_setl = p1_sets_losses - p2_sets_losses
            bet_multi = up_multi_bet(bet_multi, bet_multis_cat,
                                     [bet_setl_a, bet_setl_b], p_setl, 'setl')

            # sets winrate
            if p1_odds < p2_odds:
                p_setr = p1_sets_winrate - p2_sets_winrate
            else:
                p_setr = p2_sets_winrate - p1_sets_winrate
            bet_multi = up_multi_bet(bet_multi, bet_multis_cat,
                                     [bet_setr_a, bet_setr_b], p_setr, 'setr')

            # games
            if p1_odds < p2_odds:
                p_gms = p1_gms_avg - p2_gms_avg
            else:
                p_gms = p2_gms_avg - p1_gms_avg
            bet_multi = up_multi_bet(bet_multi, bet_multis_cat,
                                     [bet_gms_a, bet_gms_b], p_gms, 'gms')

            # ties wins
            if p1_odds < p2_odds:
                p_tiew = p1_ties_wins - p2_ties_wins
            else:
                p_tiew = p2_ties_wins - p1_ties_wins
            bet_multi = up_multi_bet(bet_multi, bet_multis_cat,
                                     [bet_tiew_a, bet_tiew_b], p_tiew, 'tiew')

            # ties losses
            if p1_odds < p2_odds:
                p_tiel = p2_ties_losses - p1_ties_losses
            else:
                p_tiel = p1_ties_losses - p2_ties_losses
            bet_multi = up_multi_bet(bet_multi, bet_multis_cat,
                                     [bet_tiel_a, bet_tiel_b], p_tiel, 'tiel')

            # ties winrate
            if p1_odds < p2_odds:
                p_tier = p1_ties_winrate - p2_ties_winrate
            else:
                p_tier = p2_ties_winrate - p2_ties_winrate
            bet_multi = up_multi_bet(bet_multi, bet_multis_cat,
                                     [bet_tier_a, bet_tier_b], p_tier, 'tier')

            # last ties winrate
            if p1_odds < p2_odds:
                p_lati = p1_lati_winrate - p2_lati_winrate
            else:
                p_lati = p2_lati_winrate - p2_lati_winrate
            bet_multi = up_multi_bet(bet_multi, bet_multis_cat,
                                     [bet_lati_a, bet_lati_b], p_lati, 'lati')

            # upsets wins
            if p1_odds < p2_odds:
                p_upsw = p1_upsets_wins - p2_upsets_wins
            else:
                p_upsw = p2_upsets_wins - p1_upsets_wins
            bet_multi = up_multi_bet(bet_multi, bet_multis_cat,
                                     [bet_upsw_a, bet_upsw_b], p_upsw, 'upsw')

            # upsets losess
            if p1_odds < p2_odds:
                p_upsl = p2_upsets_losses - p1_upsets_losses
            else:
                p_upsl = p1_upsets_losses - p2_upsets_losses
            bet_multi = up_multi_bet(bet_multi, bet_multis_cat,
                                     [bet_upsl_a, bet_upsl_b], p_upsl, 'upsl')

            # upsets winrate
            if p1_odds < p2_odds:
                p_upsr = p1_upsets_win_avg - p2_upsets_win_avg
            else:
                p_upsr = p2_upsets_win_avg - p1_upsets_win_avg
            bet_multi = up_multi_bet(bet_multi, bet_multis_cat,
                                     [bet_upsr_a, bet_upsr_b], p_upsr, 'upsr')

            # age
            if p1_odds < p2_odds:
                p_age = p1_age - p2_age
            else:
                p_age = p2_age - p1_age
            bet_multi = up_multi_bet(bet_multi, bet_multis_cat,
                                     [bet_age_a, bet_age_b], p_age, 'age')

            log_players = f'x{bet_multi:.0f} {p1} {match.get("score")} {p2}'
            bet_amt = round(bet_size * bet_multi)
            ###############################

            # make prediction
            if 'prediction' in match and match['prediction'] is None:
                # no positive bet and no favourite
                if bet_amt < 1 or p1_odds == p2_odds:
                    logger.warning(
                        f'No bet!  {p1} vs {p2} {log_odds} {log_trueskill}')
                    continue
                if p1_odds < p2_odds:
                    w_odds = p1_odds
                    w = p1
                    l_odds = p2_odds
                    l = p2
                else:
                    w_odds = p2_odds
                    w = p2
                    l_odds = p1_odds
                    l = p1
                logger.warning(
                    f'[{w_odds:.2f} vs {l_odds:.2f}] Bet x{round(bet_multi):.0f} on {w} to beat {l} [{ratings[w].mu:.0f} vs {ratings[l].mu:.0f}]'
                )
                continue

            # prediction bet on
            if 'score' not in match:
                logger.warning(f'Pending {p1} vs {p2}')
                continue

            if bet_amt < 1:
                logger.info(f'no bet {log_players} {log_odds} {log_trueskill}')
                continue

            # testing outcome
            payout = -bet_amt
            if p1_odds < p2_odds:
                payout += p1_odds * bet_amt

            accuracy.append(1 if p1_odds < p2_odds else -1)
            trueskill.append(1 if p1_ts > p2_ts else -1)
            bet_amts.append(bet_amt)
            bet_multis.append(bet_multi)
            payouts.append(round(payout, 2))
            log_balance = f'[{sum(payouts):.0f}|{payout:.0f}]'

            # actual outcome
            if 'bet' in match and 'score' in match:
                is_actual_correct = match['prediction'] == p1
                actual = (actual[0] + is_actual_correct, actual[1] + 1)
                cash = -match['bet']
                pred_odds = p2_odds
                if is_actual_correct:
                    pred_odds = p1_odds
                    cash += p1_odds * match['bet']
                tab.append(round(cash, 2))
                tab_amts.append(match['bet'])
                actual_debug.append(
                    f'${match["bet"]} {pred_odds:.2f}: {cash:.2f} {match["prediction"]} {event["location"]["name"]}:{match["round"]}'
                )

            logger.info(
                f'{log_balance} {log_players} {log_odds} {log_trueskill}')

    if train:
        total_payouts = sum(payouts)
        roi = total_payouts / max(1, sum(bet_amts))
        participation = len(accuracy) / max(1, matches)
        # res = roi + participation / 4 + total_payouts / 100000
        res = roi * participation
        print(
            f'Score: {res * 100:.2f}  ROI: {roi * 100:.1f}%  Part: {participation * 100:.1f}%  Profit: ${total_payouts:.0f} {[round(p, 1) for p in [bet_multi_param] + hyper_params]}'
        )
        return -res
    else:
        summary(accuracy, payouts, bet_amts, start_date, actual, tab, tab_amts,
                bet_multis, bet_multis_cat, actual_debug, matches, trueskill)
コード例 #35
0
ファイル: app.py プロジェクト: mehdidc/annot
def get_fairness(rating, url1, url2):
    return quality_1vs1(rating[url1], rating[url2])
コード例 #36
0
ファイル: pingpong.py プロジェクト: dmillard/pingpong
def index():
    db = get_db()
    players = db.execute('''
            SELECT *
            FROM player
            WHERE active = 1 
            ORDER BY exposure DESC;''')
    players = Ranking(players.fetchall(), start=1,
            key=lambda x: x['exposure'])
    aliases = db.execute('SELECT alias FROM player ORDER BY alias;')
    recents = db.execute('''
            SELECT w.alias, l.alias, winscore, losescore, scheduled
            FROM match
            JOIN player w ON winner = w.id
            JOIN player l ON loser = l.id
            ORDER BY date DESC LIMIT 10;''')

    games = db.execute('SELECT COUNT(id) FROM match;').fetchone()

    # get or regenerate the schedule
    weekrow = db.execute('SELECT * FROM week;').fetchone();
    week = datetime.now().isocalendar()[1]
    if weekrow is None or weekrow['week'] != week:
        db.execute('DELETE FROM schedule;')
        db.execute('DELETE FROM week;')
        db.execute('INSERT INTO week VALUES (?);', (week,))

        players2 = db.execute('''
                SELECT *
                FROM player
                WHERE active = 1
                ORDER BY exposure DESC;''').fetchall()

        match_offset = min((week % 5) + 1, len(players2)//2)

        # shuffle bottom of ladder so players get new opponents
        players2 = players2[:-match_offset] + shuffle(players2[-match_offset:])

        while len(players2) > match_offset:
            p1 = players2.pop(0)
            p2 = players2.pop(match_offset)
            matches.append((p1['id'], p2['id']))

        while len(players) > 1:
            p1 = players2.pop(0)
            p2 = players2.pop(1)
            matches.append((p1['id'], p2['id']))

        db.executemany('''
            INSERT INTO schedule (p1, p2)
            VALUES (?, ?);''',
            matches)

        db.commit()

    schedule = db.execute('''
        SELECT p1.alias, p2.alias, p1.mu, p1.sigma, p2.mu, p2.sigma
        FROM schedule
        JOIN player p1 ON p1 = p1.id
        JOIN player p2 ON p2 = p2.id;''').fetchall()

    qualities = []
    for match in schedule:
        r1 = ts.Rating(match[2], match[3])
        r2 = ts.Rating(match[4], match[5])
        qualities.append(ts.quality_1vs1(r1, r2) * 100)

    return render_template('index.html',
            players=players, aliases=aliases, recents=recents, games = games[0],
            schedule=zip(schedule, qualities), rankedweek=(week%2==1))
コード例 #37
0
ファイル: task.py プロジェクト: ptrhlm/ChaTa-Compare
def generate_new_tasks(db_session, *, survey: Survey) -> None:
    if survey.type == SurveyType.COMPARISON:
        active_task_count = get_active_task_count(db_session, survey=survey)
        existing_tasks = get_task_count(db_session, survey_id=survey.id)
        chart_count = db_session\
            .query(chart_survey_association)\
            .filter(chart_survey_association.c.survey_id == survey.id)\
            .count()
        all_tasks = chart_count * survey.tasks_per_chart
        tasks_to_add = min(500, all_tasks - existing_tasks) - active_task_count

        if tasks_to_add < 100:
            return

        sub1 = db_session\
            .query(
                Task.chart1_id.label("chart_id"), func.count('*').label("task1_count")
            )\
            .filter(Task.survey_id == survey.id)\
            .group_by(Task.chart1_id)\
            .subquery()
        sub2 = db_session\
            .query(
                Task.chart2_id.label("chart_id"), func.count('*').label("task2_count")
            )\
            .filter(Task.survey_id == survey.id)\
            .group_by(Task.chart2_id)\
            .subquery()
        chart_task_count = db_session\
            .query(
                chart_survey_association.c.chart_id.label("chart_id"),
                (func.coalesce(sub1.c.task1_count, 0) + func.coalesce(sub2.c.task2_count, 0)).label("task_count")
            )\
            .filter(chart_survey_association.c.survey_id == survey.id)\
            .outerjoin(sub1, sub1.c.chart_id == chart_survey_association.c.chart_id)\
            .outerjoin(sub2, sub2.c.chart_id == chart_survey_association.c.chart_id)\
            .subquery()
        charts = db_session\
            .query(chart_task_count.c.chart_id.label("chart_id"))\
            .filter(chart_task_count.c.task_count < survey.tasks_per_chart)\
            .subquery()

        current_tasks = db_session\
            .query(
                Task.chart1_id,
                Task.chart2_id
            )\
            .filter(Task.survey_id == survey.id)\
            .subquery()

        chart1 = aliased(chart_survey_association)
        chart2 = aliased(chart_survey_association)
        task_candidates = db_session\
            .query(
                chart1.c.chart_id.label("chart1_id"),
                chart1.c.sigma.label("sigma1"),
                chart1.c.mu.label("mu1"),
                chart2.c.chart_id.label("chart2_id"),
                chart2.c.sigma.label("sigma2"),
                chart2.c.mu.label("mu2"),
            )\
            .filter(chart1.c.chart_id < chart2.c.chart_id)\
            .filter(tuple_(chart1.c.chart_id, chart2.c.chart_id).notin_(current_tasks))\
            .filter(chart1.c.chart_id.in_(charts))\
            .filter(chart2.c.chart_id.in_(charts))\
            .order_by(text("RANDOM()"))\
            .limit(1000)\
            .all()

        task_candidates = [t._asdict() for t in task_candidates]
        task_candidates = [
            (t["chart1_id"], t["chart2_id"],
             trueskill.quality_1vs1(trueskill.Rating(t["sigma1"], t["mu1"]),
                                    trueskill.Rating(t["sigma2"], t["mu2"])))
            for t in task_candidates
        ]

        selected_tasks = list(
            sorted(task_candidates, key=lambda x: x[2],
                   reverse=True))[:tasks_to_add]

        db_session.execute(Task.__table__.insert(), [{
            "survey_id": survey.id,
            "chart1_id": chart1_id,
            "chart2_id": chart2_id
        } for chart1_id, chart2_id, score in selected_tasks])

        db_session.commit()
    elif survey.type == SurveyType.SINGLE:
        charts = db_session\
            .query(chart_survey_association.c.chart_id.label("chart_id"))\
            .filter(chart_survey_association.c.survey_id == survey.id)\
            .subquery()
        tasked_charts = db_session\
            .query(Task.chart1_id)\
            .filter(Task.survey_id == survey.id)\
            .subquery
        untasked_charts = db_session\
            .query(charts.c.chart_id.label("chart1_id"), literal(survey.id).label("survey_id"))\
            .filter(charts.c.chart_id.notin_(tasked_charts))\
            .subquery()

        db_session.execute(Task.__table__.insert().from_select(
            ["chart1_id", "survey_id"], untasked_charts))
        db_session.commit()
    else:
        raise RuntimeError("Type of survey unknown")
コード例 #38
0
from trueskill import Rating, quality_1vs1, rate_1vs1, quality, rate

alice, bob = Rating(25), Rating(30)  # assign Alice and Bob's ratings
if quality_1vs1(alice, bob) < 0.50:
    print('This match seems to be not so fair')
alice, bob = rate_1vs1(alice, bob)  # update the ratings after the match

print('\n\nHey there')

r1 = Rating()
r2 = Rating()

print('{:.1%} chance to draw'.format(quality_1vs1(r1, r2)))

new_r1, new_r2 = rate_1vs1(r1, r2, drawn=True)
print(new_r1)
print(new_r2)

r1 = Rating()  # 1P's skill
r2 = Rating()  # 2P's skill
r3 = Rating()  # 3P's skill
t1 = [r1]  # Team A contains just 1P
t2 = [r2, r3]  # Team B contains 2P and 3P

print('{:.1%} chance to draw'.format(quality([t1, t2])))
#13.5% chance to draw
(new_r1, ), (new_r2, new_r3) = rate([t1, t2], ranks=[0, 1])
print(new_r1)
#trueskill.Rating(mu=33.731, sigma=7.317)
print(new_r2)
#trueskill.Rating(mu=16.269, sigma=7.317)