def get(self, event_key):
        event = Event.get_by_id(event_key)
        matchstats_dict = MatchstatsHelper.calculate_matchstats(event.matches, event.year)

        if event.year == 2016:
            organized_matches = MatchHelper.organizeMatches(event.matches)
            match_predictions, match_prediction_stats = PredictionHelper.get_match_predictions(organized_matches['qm'])
            ranking_predictions, ranking_prediction_stats = PredictionHelper.get_ranking_predictions(organized_matches['qm'], match_predictions)

            matchstats_dict['match_predictions'] = match_predictions
            matchstats_dict['match_prediction_stats'] = match_prediction_stats
            matchstats_dict['ranking_predictions'] = ranking_predictions
            matchstats_dict['ranking_prediction_stats'] = ranking_prediction_stats

        if any([v != {} for v in matchstats_dict.values()]):
            event.matchstats_json = json.dumps(matchstats_dict)
            EventManipulator.createOrUpdate(event)
        else:
            logging.warn("Matchstat calculation for {} failed!".format(event_key))

        template_values = {
            'matchstats_dict': matchstats_dict,
        }

        path = os.path.join(os.path.dirname(__file__), '../templates/math/event_matchstats_do.html')
        self.response.out.write(template.render(path, template_values))
    def get(self, event_key):
        event = Event.get_by_id(event_key)
        matchstats_dict = MatchstatsHelper.calculate_matchstats(
            event.matches, event.year)

        if event.year == 2016:
            organized_matches = MatchHelper.organizeMatches(event.matches)
            match_predictions, match_prediction_stats = PredictionHelper.get_match_predictions(
                organized_matches['qm'])
            ranking_predictions, ranking_prediction_stats = PredictionHelper.get_ranking_predictions(
                organized_matches['qm'], match_predictions)

            matchstats_dict['match_predictions'] = match_predictions
            matchstats_dict['match_prediction_stats'] = match_prediction_stats
            matchstats_dict['ranking_predictions'] = ranking_predictions
            matchstats_dict[
                'ranking_prediction_stats'] = ranking_prediction_stats

        if any([v != {} for v in matchstats_dict.values()]):
            event.matchstats_json = json.dumps(matchstats_dict)
            EventManipulator.createOrUpdate(event)
        else:
            logging.warn(
                "Matchstat calculation for {} failed!".format(event_key))

        template_values = {
            'matchstats_dict': matchstats_dict,
        }

        path = os.path.join(os.path.dirname(__file__),
                            '../templates/math/event_matchstats_do.html')
        self.response.out.write(template.render(path, template_values))
    def get(self, event_key):
        event = Event.get_by_id(event_key)
        matchstats_dict = MatchstatsHelper.calculate_matchstats(event.matches)
        if matchstats_dict != {}:
            event.matchstats_json = json.dumps(matchstats_dict)
            event.dirty = True  # TODO: hacky
            EventManipulator.createOrUpdate(event)
        else:
            logging.warn("Matchstat calculation for {} failed!".format(event_key))

        template_values = {"matchstats_dict": matchstats_dict}

        path = os.path.join(os.path.dirname(__file__), "../templates/math/event_matchstats_do.html")
        self.response.out.write(template.render(path, template_values))
    def get(self, event_key):
        event = Event.get_by_id(event_key)
        matchstats_dict = MatchstatsHelper.calculate_matchstats(event.matches)
        if matchstats_dict != {}:
            event.matchstats_json = json.dumps(matchstats_dict)
            event.put()
        else:
            logging.warn("Matchstat calculation for {} failed!".format(event_key))

        template_values = {
            'matchstats_dict': matchstats_dict,
        }

        path = os.path.join(os.path.dirname(__file__), '../templates/math/event_matchstats_do.html')
        self.response.out.write(template.render(path, template_values))
Exemple #5
0
    def get(self, event_key):
        event = Event.get_by_id(event_key)
        matchstats_dict = MatchstatsHelper.calculate_matchstats(
            event.matches, event.year)
        if any([v != {} for v in matchstats_dict.values()]):
            pass
        else:
            logging.warn(
                "Matchstat calculation for {} failed!".format(event_key))
            matchstats_dict = None

        predictions_dict = None
        if event.year in {
                2016, 2017, 2018, 2019, 2020
        } and event.event_type_enum in EventType.SEASON_EVENT_TYPES or event.enable_predictions:
            sorted_matches = MatchHelper.play_order_sort_matches(event.matches)
            match_predictions, match_prediction_stats, stat_mean_vars = PredictionHelper.get_match_predictions(
                sorted_matches)
            ranking_predictions, ranking_prediction_stats = PredictionHelper.get_ranking_predictions(
                sorted_matches, match_predictions)

            predictions_dict = {
                'match_predictions': match_predictions,
                'match_prediction_stats': match_prediction_stats,
                'stat_mean_vars': stat_mean_vars,
                'ranking_predictions': ranking_predictions,
                'ranking_prediction_stats': ranking_prediction_stats
            }

        event_insights = EventInsightsHelper.calculate_event_insights(
            event.matches, event.year)

        event_details = EventDetails(
            id=event_key,
            matchstats=matchstats_dict,
            predictions=predictions_dict,
            insights=event_insights,
        )
        EventDetailsManipulator.createOrUpdate(event_details)

        template_values = {
            'matchstats_dict': matchstats_dict,
        }

        if 'X-Appengine-Taskname' not in self.request.headers:  # Only write out if not in taskqueue
            path = os.path.join(os.path.dirname(__file__),
                                '../templates/math/event_matchstats_do.html')
            self.response.out.write(template.render(path, template_values))
    def get(self, event_key):
        event = Event.get_by_id(event_key)
        matchstats_dict = MatchstatsHelper.calculate_matchstats(event.matches)
        if matchstats_dict != {}:
            event.matchstats_json = json.dumps(matchstats_dict)
            event.put()
        else:
            logging.warn(
                "Matchstat calculation for {} failed!".format(event_key))

        template_values = {
            'matchstats_dict': matchstats_dict,
        }

        path = os.path.join(os.path.dirname(__file__),
                            '../templates/math/event_matchstats_do.html')
        self.response.out.write(template.render(path, template_values))
    def get(self, event_key):
        event = Event.get_by_id(event_key)
        matchstats_dict = MatchstatsHelper.calculate_matchstats(event.matches)
        if any([v != {} for v in matchstats_dict.values()]):
            event.matchstats_json = json.dumps(matchstats_dict)
            event.dirty = True  # TODO: hacky
            EventManipulator.createOrUpdate(event)
        else:
            logging.warn(
                "Matchstat calculation for {} failed!".format(event_key))

        template_values = {
            'matchstats_dict': matchstats_dict,
        }

        path = os.path.join(os.path.dirname(__file__),
                            '../templates/math/event_matchstats_do.html')
        self.response.out.write(template.render(path, template_values))
    def get(self, event_key):
        event = Event.get_by_id(event_key)
        matchstats_dict = MatchstatsHelper.calculate_matchstats(event.matches, event.year)
        if any([v != {} for v in matchstats_dict.values()]):
            pass
        else:
            logging.warn("Matchstat calculation for {} failed!".format(event_key))
            matchstats_dict = None

        predictions_dict = None
        if event.year in {2016, 2017, 2018} and event.event_type_enum in EventType.SEASON_EVENT_TYPES or event.enable_predictions:
            sorted_matches = MatchHelper.play_order_sort_matches(event.matches)
            match_predictions, match_prediction_stats, stat_mean_vars = PredictionHelper.get_match_predictions(sorted_matches)
            ranking_predictions, ranking_prediction_stats = PredictionHelper.get_ranking_predictions(sorted_matches, match_predictions)

            predictions_dict = {
                'match_predictions': match_predictions,
                'match_prediction_stats': match_prediction_stats,
                'stat_mean_vars': stat_mean_vars,
                'ranking_predictions': ranking_predictions,
                'ranking_prediction_stats': ranking_prediction_stats
            }

        event_insights = EventInsightsHelper.calculate_event_insights(event.matches, event.year)

        event_details = EventDetails(
            id=event_key,
            matchstats=matchstats_dict,
            predictions=predictions_dict,
            insights=event_insights,
        )
        EventDetailsManipulator.createOrUpdate(event_details)

        template_values = {
            'matchstats_dict': matchstats_dict,
        }

        if 'X-Appengine-Taskname' not in self.request.headers:  # Only write out if not in taskqueue
            path = os.path.join(os.path.dirname(__file__), '../templates/math/event_matchstats_do.html')
            self.response.out.write(template.render(path, template_values))
    def get_match_predictions(cls, matches):
        if not matches:
            return None, None

        event_key = matches[0].event
        event = event_key.get()

        # Setup
        team_list, team_id_map = MatchstatsHelper.build_team_mapping(matches)
        last_event_stats = MatchstatsHelper.get_last_event_stats(team_list, event_key)
        Minv = MatchstatsHelper.build_Minv_matrix(matches, team_id_map)

        init_stats_sums = defaultdict(int)
        init_stats_totals = defaultdict(int)
        for _, stats in last_event_stats.items():
            for stat, stat_value in stats.items():
                init_stats_sums[stat] += stat_value
                init_stats_totals[stat] += 1

        init_stats_default = defaultdict(int)
        for stat, stat_sum in init_stats_sums.items():
            init_stats_default[stat] = float(stat_sum) / init_stats_totals[stat]

        relevant_stats = [
            'oprs',
            '2016autoPointsOPR',
            '2016crossingsOPR',
            '2016bouldersOPR'
        ]

        # Make predictions before each match
        predictions = {}
        played_matches = 0
        played_matches_75 = 0
        correct_predictions = 0
        correct_predictions_75 = 0
        score_differences = []
        stats_sum = defaultdict(int)
        for i, match in enumerate(matches):
            # Calculate ixOPR
            all_ixoprs = {}
            for stat in relevant_stats:
                all_ixoprs[stat] = MatchstatsHelper.calc_stat(
                    matches, team_list, team_id_map, Minv, stat,
                    init_stats=last_event_stats,
                    init_stats_default=init_stats_default[stat],
                    limit_matches=i)
            for _ in xrange(2):
                for stat in relevant_stats:
                    start = time.time()
                    all_ixoprs[stat] = MatchstatsHelper.calc_stat(
                        matches, team_list, team_id_map, Minv, stat,
                        init_stats=all_ixoprs,
                        init_stats_default=init_stats_default[stat],
                        limit_matches=i)

            # Make prediction
            is_champs = event.event_type_enum in EventType.CMP_EVENT_TYPES
            prediction = cls._predict_match(match, all_ixoprs, is_champs)
            predictions[match.key.id()] = prediction

            # Benchmark prediction
            if match.has_been_played:
                played_matches += 1
                if prediction['prob'] > 75:
                    played_matches_75 += 1
                if match.winning_alliance == prediction['winning_alliance']:
                    correct_predictions += 1
                    if prediction['prob'] > 75:
                        correct_predictions_75 += 1
                    for alliance_color in ['red', 'blue']:
                        score_differences.append(abs(match.alliances[alliance_color]['score'] - prediction[alliance_color]['score']))

            # Update init_stats
            if match.has_been_played:
                for alliance_color in ['red', 'blue']:
                    stats_sum['score'] += match.alliances[alliance_color]['score']

                    for stat in relevant_stats:
                        if stat == '2016autoPointsOPR':
                            init_stats_default[stat] += match.score_breakdown[alliance_color]['autoPoints']
                        elif stat == '2016bouldersOPR':
                            init_stats_default[stat] += (
                                match.score_breakdown[alliance_color]['autoBouldersLow'] +
                                match.score_breakdown[alliance_color]['autoBouldersHigh'] +
                                match.score_breakdown[alliance_color]['teleopBouldersLow'] +
                                match.score_breakdown[alliance_color]['teleopBouldersHigh'])
                        elif stat == '2016crossingsOPR':
                            init_stats_default[stat] += (
                                match.score_breakdown[alliance_color]['position1crossings'] +
                                match.score_breakdown[alliance_color]['position2crossings'] +
                                match.score_breakdown[alliance_color]['position3crossings'] +
                                match.score_breakdown[alliance_color]['position4crossings'] +
                                match.score_breakdown[alliance_color]['position5crossings'])

            init_stats_default['oprs'] = float(stats_sum['score']) / (i + 1) / 6  # Initialize with 1/3 of average scores (2 alliances per match)
            for stat in relevant_stats:
                if stat != 'oprs':
                    init_stats_default[stat] = float(stats_sum[stat]) / (i + 1) / 6  # Initialize with 1/3 of average scores (2 alliances per match)

        prediction_stats = {
            'wl_accuracy': None if played_matches == 0 else 100 * float(correct_predictions) / played_matches,
            'wl_accuracy_75': None if played_matches_75 == 0 else 100 * float(correct_predictions_75) / played_matches_75,
            'err_mean': np.mean(score_differences) if score_differences else None,
            'err_var': np.var(score_differences) if score_differences else None,
        }

        return predictions, prediction_stats
Exemple #10
0
    def get_match_predictions(cls, matches):
        if not matches:
            return None, None

        event_key = matches[0].event
        event = event_key.get()

        # Setup
        team_list, team_id_map = MatchstatsHelper.build_team_mapping(matches)
        last_event_stats = MatchstatsHelper.get_last_event_stats(
            team_list, event_key)
        Minv = MatchstatsHelper.build_Minv_matrix(matches, team_id_map)

        init_stats_sums = defaultdict(int)
        init_stats_totals = defaultdict(int)
        for _, stats in last_event_stats.items():
            for stat, stat_value in stats.items():
                init_stats_sums[stat] += stat_value
                init_stats_totals[stat] += 1

        init_stats_default = defaultdict(int)
        for stat, stat_sum in init_stats_sums.items():
            init_stats_default[stat] = float(
                stat_sum) / init_stats_totals[stat]

        relevant_stats = [
            'oprs', '2016autoPointsOPR', '2016crossingsOPR', '2016bouldersOPR'
        ]

        # Make predictions before each match
        predictions = {}
        played_matches = 0
        played_matches_75 = 0
        correct_predictions = 0
        correct_predictions_75 = 0
        score_differences = []
        stats_sum = defaultdict(int)
        for i, match in enumerate(matches):
            # Calculate ixOPR
            all_ixoprs = {}
            for stat in relevant_stats:
                all_ixoprs[stat] = MatchstatsHelper.calc_stat(
                    matches,
                    team_list,
                    team_id_map,
                    Minv,
                    stat,
                    init_stats=last_event_stats,
                    init_stats_default=init_stats_default[stat],
                    limit_matches=i)
            for _ in xrange(2):
                for stat in relevant_stats:
                    start = time.time()
                    all_ixoprs[stat] = MatchstatsHelper.calc_stat(
                        matches,
                        team_list,
                        team_id_map,
                        Minv,
                        stat,
                        init_stats=all_ixoprs,
                        init_stats_default=init_stats_default[stat],
                        limit_matches=i)

            # Make prediction
            is_champs = event.event_type_enum in EventType.CMP_EVENT_TYPES
            prediction = cls._predict_match(match, all_ixoprs, is_champs)
            predictions[match.key.id()] = prediction

            # Benchmark prediction
            if match.has_been_played:
                played_matches += 1
                if prediction['prob'] > 75:
                    played_matches_75 += 1
                if match.winning_alliance == prediction['winning_alliance']:
                    correct_predictions += 1
                    if prediction['prob'] > 75:
                        correct_predictions_75 += 1
                    for alliance_color in ['red', 'blue']:
                        score_differences.append(
                            abs(match.alliances[alliance_color]['score'] -
                                prediction[alliance_color]['score']))

            # Update init_stats
            if match.has_been_played and match.score_breakdown:
                for alliance_color in ['red', 'blue']:
                    stats_sum['score'] += match.alliances[alliance_color][
                        'score']

                    for stat in relevant_stats:
                        if stat == '2016autoPointsOPR':
                            init_stats_default[stat] += match.score_breakdown[
                                alliance_color]['autoPoints']
                        elif stat == '2016bouldersOPR':
                            init_stats_default[stat] += (
                                match.score_breakdown[alliance_color]
                                ['autoBouldersLow'] +
                                match.score_breakdown[alliance_color]
                                ['autoBouldersHigh'] +
                                match.score_breakdown[alliance_color]
                                ['teleopBouldersLow'] +
                                match.score_breakdown[alliance_color]
                                ['teleopBouldersHigh'])
                        elif stat == '2016crossingsOPR':
                            init_stats_default[stat] += (
                                match.score_breakdown[alliance_color]
                                ['position1crossings'] +
                                match.score_breakdown[alliance_color]
                                ['position2crossings'] +
                                match.score_breakdown[alliance_color]
                                ['position3crossings'] +
                                match.score_breakdown[alliance_color]
                                ['position4crossings'] +
                                match.score_breakdown[alliance_color]
                                ['position5crossings'])

            init_stats_default['oprs'] = float(stats_sum['score']) / (
                i + 1
            ) / 6  # Initialize with 1/3 of average scores (2 alliances per match)
            for stat in relevant_stats:
                if stat != 'oprs':
                    init_stats_default[stat] = float(stats_sum[stat]) / (
                        i + 1
                    ) / 6  # Initialize with 1/3 of average scores (2 alliances per match)

        prediction_stats = {
            'wl_accuracy':
            None if played_matches == 0 else 100 * float(correct_predictions) /
            played_matches,
            'wl_accuracy_75':
            None if played_matches_75 == 0 else 100 *
            float(correct_predictions_75) / played_matches_75,
            'err_mean':
            np.mean(score_differences) if score_differences else None,
            'err_var':
            np.var(score_differences) if score_differences else None,
        }

        return predictions, prediction_stats