Beispiel #1
0
 def update_ranking_stats(self, ranking_id=None):
     """ Will build ranking stats based of the ranking by calling c++. """
     if ranking_id is None: ranking_id = self.ranking.id
     cpp = sc2.RankingData(self.db_name, Enums.INFO)
     cpp.load(ranking_id)
     cpp.save_stats(ranking_id, to_unix(utcnow()))
     cpp.release()
Beispiel #2
0
    def process_ladder(self, load=False, save=False, region=Region.EU, fetch_time=None,
                       mode=Mode.TEAM_1V1, version=Version.HOTS, league=League.GOLD, season=None, tier=0,
                       members=None, **kwargs):
        """ Update a ranking building single member with kwargs or use members if set. """

        season = season or self.db.season
        fetch_time = fetch_time or utcnow()
        members = members or [gen_member(**kwargs)]

        if not getattr(self, 'cpp', None):
            self.cpp = sc2.RankingData(self.db.db_name, Enums.INFO)

        if load:
            self.load()

        self.cpp.update_with_ladder(0,  # bid
                                    0,  # source_id
                                    region,
                                    mode,
                                    league,
                                    tier,
                                    version,
                                    season.id,
                                    to_unix(fetch_time),
                                    fetch_time.date().isoformat(),
                                    Mode.team_size(mode),
                                    members)
        if save:
            self.save_to_ranking()
Beispiel #3
0
    def run(self, args, logger):

        cpp = sc2.RankingData(get_db_name(), Enums.INFO)

        for ranking in Ranking.objects.filter(season_id__gte=28):
            cpp.load(ranking.id)
            cpp.save_data(ranking.id, ranking.season_id, to_unix(utcnow()))
            cpp.save_stats(ranking.id, to_unix(utcnow()))

        return 0
Beispiel #4
0
    def create_ranking_data(self, raw=True, **kwargs):
        kwargs = merge_args(dict(ranking=self.ranking, updated=utcnow()),
                            kwargs)
        data = kwargs.pop('data', [])
        ranking = kwargs['ranking']

        for team_rank in data:
            self._default_team_rank(team_rank)
            ranking.sources.add(self.get(Cache, pk=team_rank['source_id']))

        self.ranking_data = RankingData.objects.create(**kwargs)
        sc2.save_ranking_data_raw(self.db_name, ranking.id, 0, data, True)
        if not raw:
            cpp = sc2.RankingData(self.db_name, Enums.INFO)
            cpp.load(ranking.id)
            cpp.save_data(ranking.id, ranking.season_id, to_unix(utcnow()))
            cpp.release()

        return self.ranking_data
Beispiel #5
0
def countinously_update(regions=None,
                        check_stop=None,
                        update_manager=None,
                        switch_hour=10):

    update_manager = update_manager or UpdateManager()

    ranking = Ranking.objects.order_by('-id').first()

    if ranking.status != Ranking.COMPLETE_WITH_DATA:
        raise Exception("ranking %d is not in a good state, clean up" %
                        ranking.id)

    season = ranking.season

    cpp = sc2.RankingData(get_db_name(), Enums.INFO)

    while not check_stop(throw=False):

        # Check if we want to switch to new season.

        current_season = Season.get_current_season()
        if current_season.id != season.id:

            if current_season.id != season.get_next().id:
                raise Exception(
                    "something is really wrong, current season is not next")

            if Ladder.objects.filter(season=current_season,
                                     strangeness=Ladder.GOOD).count() > 8:
                season = current_season
                logger.info(
                    "switching to rank new season %d multiple new season ladders was detected"
                    % season.id)

        # Do we want to create new ranking? We want to switch around switch_hour UTC every day but not if ranking is
        # too young. If too old, switch anyway.
        now = utcnow()

        if season.id != ranking.season_id:
            # Create new ranking based on new season.
            ranking = Ranking.objects.create(season=season,
                                             created=now,
                                             data_time=season.start_time(),
                                             min_data_time=season.start_time(),
                                             max_data_time=season.start_time(),
                                             status=Ranking.CREATED)
            logger.info("created new ranking %d based on new season %d" %
                        (ranking.id, season.id))

            cpp.clear_team_ranks()
            cpp.reconnect_db()
            update_manager.save_ranking(cpp, ranking, 0)

        elif ((ranking.created + timedelta(hours=48) < now or
               (ranking.created + timedelta(hours=12) < now
                and now.hour == switch_hour))
              and not ranking.season.near_start(now, days=4)):
            # Create a new ranking within the season.

            cpp.clear_team_ranks()
            cpp.reconnect_db()

            with transaction.atomic():
                new_ranking = Ranking.objects.create(
                    season=season,
                    created=now,
                    data_time=ranking.data_time,
                    min_data_time=ranking.min_data_time,
                    max_data_time=ranking.max_data_time,
                    status=Ranking.CREATED)
                # Copy all caches of old ranking to new ranking. Also remake the full ranking while doing so to get
                # rid of leave leaguers.

                logger.info(
                    "created new ranking %d basing it on copy of ranking %d, seaons %d"
                    % (new_ranking.id, ranking.id, season.id))

                count = ranking.sources.count()
                logger.info(
                    "copying %d cached ladders from ranking %d to ranking %d and adding them to ranking"
                    % (count, ranking.id, new_ranking.id))

                for i, lc in enumerate(ranking.sources.all(), start=1):
                    lc.pk = None
                    lc.created = utcnow()
                    lc.ladder = None
                    lc.ranking = ranking
                    lc.save()

                    new_ranking.sources.add(lc)

                    ladder = Ladder.objects.get(region=lc.region, bid=lc.bid)
                    team_size = Mode.team_size(ladder.mode)
                    stats = cpp.update_with_ladder(
                        ladder.id, lc.id, ladder.region, ladder.mode,
                        ladder.league, ladder.tier, ladder.version,
                        ladder.season_id, to_unix(lc.updated), team_size,
                        ApiLadder(lc.data,
                                  lc.url).members_for_ranking(team_size))

                    if i % 100 == 0:
                        logger.info(
                            "copied and added cache %d/%d, player cache size %d, team cache size %d"
                            % (i, count, stats['player_cache_size'],
                               stats['team_cache_size']))

            ranking = new_ranking
            update_manager.save_ranking(cpp, ranking, 0)
        else:
            logger.info("continuing with ranking %d, season %d" %
                        (ranking.id, season.id))
            cpp.reconnect_db()
            cpp.load(ranking.id)

        now = utcnow()
        until = now.replace(hour=switch_hour, minute=0, second=0)
        if until < now:
            until += timedelta(hours=24)

        update_manager.update_until(ranking=ranking,
                                    cpp=cpp,
                                    regions=regions,
                                    until=until,
                                    check_stop=check_stop)
Beispiel #6
0
def refetch_past_season(season, now, check_stop, bnet_client):
    """ Refetch ladders for past seasons. """

    with transaction.atomic():
        ranking = Ranking.objects.filter(
            season=season).order_by('-data_time').first()
        if ranking is None:
            logger.warning(
                "season %d has no ranking to check refetch past for, this is strange, skipping"
                % season.id)
            return

        too_old_limit = now - timedelta(
            days=Season.REFETCH_PAST_DAYS_AGE_LIMIT)

        need_refetch_limit = season.end_time() + timedelta(
            days=Season.REFETCH_PAST_UNTIL_DAYS_AFTER_SEASON_END)

        ladders = list(
            Ladder.objects.filter(season=season,
                                  strangeness=Ladder.GOOD,
                                  updated__lt=need_refetch_limit,
                                  updated__gt=too_old_limit))

        logger.info("%d ladders to refetch for season %d" %
                    (len(ladders), season.id))

    if not ladders:
        return

    # This is kind of bad but since c++ works in it's own db connection we can't fetch ladders and update
    # ranking in same transaction, which in turn means that if the code fails here ranking needs to be repaired.

    cpp = sc2.RankingData(get_db_name(), Enums.INFO)
    cpp.load(ranking.id)

    fetch_time = 0

    try:
        for i, ladder in enumerate(ladders, start=1):
            check_stop()

            with transaction.atomic(), LogContext(
                    region=Region.key_by_ids[ladder.region]):

                status, api_ladder, fetch_time, fetch_duration = \
                    bnet_client.fetch_ladder(ladder.region, ladder.bid, timeout=20)

                logger.info("fetched %s got %d in %.2fs, %s (%d/%d)" %
                            (api_ladder.url, status, fetch_duration,
                             ladder.info(), i, len(ladders)))

                if status == 503:
                    logger.warning(
                        "got 503, skipping refetch past for rest of this season"
                    )
                    raise SystemExit()

                if status != 200:
                    logger.info("refetching %d returned %d, skipping" %
                                (ladder.id, status))
                    continue

                update_ladder_cache(cpp, ranking, ladder, status, api_ladder,
                                    fetch_time)

                logger.info(
                    "saved updated ladder %d and added data to ranking %d" %
                    (ladder.id, ranking.id))

    except SystemExit:
        pass

    except Exception as e:
        raise Exception(
            "failure while refetching past, you will need to repair ranking %d:"
            % ranking.id) from e

    if fetch_time:
        logger.info("saving ranking data and ranking stats for ranking %d" %
                    ranking.id)
        cpp.save_data(ranking.id, ranking.season_id, to_unix(utcnow()))
        cpp.save_stats(ranking.id, to_unix(utcnow()))
        ranking.data_time = min(season.end_time(), fetch_time)
        ranking.save()
    def run(self, args, logger):
        logger.info(
            "NOTE: fetching needs to be turned off if repairing latest rank")

        ranking = Ranking.objects.get(pk=args.ranking_id)

        if ranking.status not in [Ranking.CREATED, Ranking.COMPLETE_WITH_DATA]:
            raise Exception("ranking with status %s can not be repaired" %
                            ranking.status)

        # If last in season use all available ladders, not only those connected to ranking.
        last_in_season = Ranking.objects.filter(
            season=ranking.season).order_by('-id').first()
        if last_in_season == ranking:
            cursor = connection.cursor()
            cursor.execute(
                "SELECT id FROM ("
                "  SELECT DISTINCT ON (c.bid, c.region) c.id, c.updated FROM cache c JOIN ladder l"
                "    ON c.bid = l.bid AND c.region = l.region"
                "    WHERE l.strangeness = %s AND l.season_id = %s"
                "    ORDER BY c.bid, c.region, c.updated DESC) s"
                " ORDER by updated", [Ladder.GOOD, ranking.season_id])
            cache_ids = [row[0] for row in cursor.fetchall()]
            cursor.execute(
                "UPDATE cache SET ranking_id = NULL WHERE ranking_id = %s",
                [ranking.id])
        else:
            cache_ids = [
                c['id']
                for c in ranking.sources.values('id').order_by('updated')
            ]

        cpp = sc2.RankingData(get_db_name(), Enums.INFO)

        count = len(cache_ids)
        for i, id_ in enumerate(cache_ids, start=1):
            cache = Cache.objects.get(id=id_)
            self.check_stop()
            try:
                ladder = Ladder.objects.get(season=ranking.season,
                                            region=cache.region,
                                            bid=cache.bid)
            except Ladder.DoesNotExist:
                raise Exception(
                    "ladder region %s, bid %s missing in ladder table" %
                    (cache.region, cache.bid))

            if cache.ranking is None and cache.ladder is None:
                cache.ranking = ranking
                cache.save()
            elif cache.ranking != ranking:
                logger.info("cache %s was not included in ranking copying" %
                            cache.id)
                cache.id = None
                cache.ladder = None
                cache.ranking = ranking
                cache.save()

            logger.info("adding cache %s, ladder %s, %d/%d" %
                        (cache.id, ladder.id, i, count))

            team_size = Mode.team_size(ladder.mode)
            cpp.update_with_ladder(
                ladder.id, cache.id, ladder.region, ladder.mode, ladder.league,
                ladder.tier, ladder.version, ladder.season_id,
                to_unix(cache.updated), team_size,
                ApiLadder(cache.data).members_for_ranking(team_size))

        ranking.set_data_time(ranking.season, cpp)
        ranking.save()
        self.check_stop()
        cpp.save_data(ranking.id, ranking.season_id, to_unix(utcnow()))
        self.check_stop()
        cpp.save_stats(ranking.id, to_unix(utcnow()))

        return 0
Beispiel #8
0
 def load(self):
     self.cpp = sc2.RankingData(self.db.db_name, Enums.INFO)
     self.cpp.load(self.db.ranking.id)
Beispiel #9
0
def refetch_past_season(season, now, check_stop, bnet_client):
    """ Refetch ladders for past seasons. """

    start = perf_counter()

    need_refetch_limit = now - timedelta(
        days=Season.REFETCH_PAST_REFRESH_WHEN_OLDER_THAN_DAYS)

    with transaction.atomic():
        ranking = Ranking.objects.filter(
            season=season).order_by('-data_time').first()
        if ranking is None:
            logger.warning(
                f"season {season.id} has no ranking to check refetch past for, this is strange, skipping"
            )
            return

        # Prevent refetch of sea since sea api endpoints are no longer present, the data will be purged eventually.
        ladders_query = Ladder.objects.filter(
            season=season, strangeness=Ladder.GOOD).exclude(region=Region.SEA)

        last_updated = ladders_query.aggregate(Min('updated'))['updated__min']

        if need_refetch_limit < last_updated:
            logger.info(
                f"skipping refetch of season {season.id}, it was refetched {last_updated.date()}"
            )
            return

        ladders = list(ladders_query.filter(updated__lt=need_refetch_limit))

        ladders_count = ladders_query.count()

        logger.info(
            f"{len(ladders)} (of {ladders_count}) to refetch for season {season.id}"
        )

    # if not ladders:
    #     return

    # This is kind of bad but since c++ works in it's own db connection we can't fetch ladders and update
    # ranking in same transaction, which in turn means that if the code fails here ranking needs to be repaired.
    # TODO Move updating of cache to cpp? How is this done in update?

    cpp = sc2.RankingData(get_db_name(), Enums.INFO)
    cpp.load(ranking.id)

    fetch_time = 0

    try:
        for i, ladder in enumerate(ladders, start=1):
            check_stop()

            with transaction.atomic(), LogContext(
                    region=Region.key_by_ids[ladder.region]):

                status, api_ladder, fetch_time, fetch_duration = \
                    bnet_client.fetch_ladder(ladder.region, ladder.bid, timeout=20)

                logger.info("fetched %s got %d in %.2fs, %s (%d/%d)" %
                            (api_ladder.url, status, fetch_duration,
                             ladder.info(), i, len(ladders)))

                if status == 503:
                    logger.warning(
                        "got 503, skipping refetch past for rest of this season"
                    )
                    raise SystemExit()

                if status != 200:
                    logger.info("refetching %d returned %d, skipping ladder" %
                                (ladder.id, status))
                    continue

                update_ladder_cache(cpp, ranking, ladder, status, api_ladder,
                                    fetch_time)

                logger.info(
                    "saved updated ladder %d and added data to ranking %d" %
                    (ladder.id, ranking.id))

    except SystemExit:
        pass

    except Exception as e:
        raise Exception(
            "failure while refetching past, you will need to repair ranking %d:"
            % ranking.id) from e

    if fetch_time:
        logger.info("saving ranking data and ranking stats for ranking %d" %
                    ranking.id)
        cpp.save_data(ranking.id, ranking.season_id, to_unix(utcnow()))
        cpp.save_stats(ranking.id, to_unix(utcnow()))
        ranking.set_data_time(season, cpp)
        ranking.save()
    else:
        logger.info(
            "skipping save of ranking data and ranking stats for ranking %d, nothing changed"
            % ranking.id)

    logger.info(
        f"completed refetch of season {season.id} in {human_i_split(int(perf_counter() - start))} seconds"
    )