Exemple #1
0
    def save_ranking(self, cpp, ranking, queue_length):
        ranking.set_data_time(ranking.season.reload(), cpp)

        logger.info(
            "saving ranking %d, %d updates left in queue not included, new data_time is %s"
            % (ranking.id, queue_length, ranking.data_time))
        cpp.save_data(ranking.id, ranking.season_id, to_unix(utcnow()))
        cpp.save_stats(ranking.id, to_unix(utcnow()))

        ranking.status = Ranking.COMPLETE_WITH_DATA
        ranking.save()

        # Ping server to reload ranking.
        try:
            raw = request_tcp('localhost',
                              4747,
                              json.dumps({
                                  'cmd': 'refresh'
                              }).encode('utf-8'),
                              timeout=self.server_ping_timeout)
            response = json.loads(raw.decode('utf-8'))
            code = response.get('code')
            if code == 'ok':
                logger.info("refresh ping returned ok")
            else:
                logger.warning("refresh ping returned %s" % code)

        except OSError as e:
            logger.warning("refresh ping to server failed: " + str(e))
Exemple #2
0
    def get(self, request, mode_id=None):
        mode_id = int(mode_id)
        if not (mode_id in Mode.stat_v1_ids):
            return HttpResponse(status=404)

        last_updated = to_unix(
            cache_value("ranking_stats_last_modified", 600,
                        ranking_stats_last_modified))
        now = to_unix(utcnow())

        try:
            if_modified_since = parse_http_date(
                request.META['HTTP_IF_MODIFIED_SINCE'])
        except (ValueError, KeyError):
            if_modified_since = 0

        if if_modified_since >= last_updated:
            response = HttpResponse("",
                                    content_type="application/json",
                                    status=304)
        else:
            response = HttpResponse(cache_value("ranking_stats_%d" % mode_id,
                                                600, rankings_view_client,
                                                'ranking_stats', mode_id),
                                    content_type="application/json")

        response['Cache-Control'] = "max-age=86400"
        response['Date'] = http_date(now)
        response['Expires'] = http_date(now + 86400)
        response['Last-Modified'] = http_date(last_updated)
        return response
Exemple #3
0
 def _default_team_rank(self, team_rank):
     """ Update a team_rank dict with defaults. """
     for k, v in self.default_ranking_data__data.items():
         team_rank.setdefault(k, v)
     team_rank.setdefault("team_id", self.team.id)
     team_rank.setdefault("data_time", to_unix(self.ranking.data_time))
     team_rank.setdefault("version", Version.HOTS)
     team_rank.setdefault("region", Region.EU)
     team_rank.setdefault("mode", Mode.TEAM_1V1)
     team_rank.setdefault("league", League.GOLD)
     team_rank.setdefault("tier", 0)
     team_rank.setdefault("ladder_id", self.ladder.id)
     team_rank.setdefault("join_time", to_unix(self.ranking.data_time))
     team_rank.setdefault("source_id", self.cache.id)
     team_rank.setdefault("mmr", 1000)
     team_rank.setdefault("points", 100.0)
     team_rank.setdefault("wins", 10)
     team_rank.setdefault("losses", 10)
     team_rank.setdefault("race0", Race.ZERG)
     team_rank.setdefault("race1", Race.UNKNOWN)
     team_rank.setdefault("race2", Race.UNKNOWN)
     team_rank.setdefault("race3", Race.UNKNOWN)
     team_rank.setdefault("ladder_rank", 1)
     team_rank.setdefault("ladder_count", 1)
     team_rank.setdefault("league_rank", 1)
     team_rank.setdefault("league_count", 1)
     team_rank.setdefault("region_rank", 1)
     team_rank.setdefault("region_count", 1)
     team_rank.setdefault("world_rank", 1)
     team_rank.setdefault("world_count", 1)
Exemple #4
0
    def run(self, args, logger):

        cpp = sc2.RankingData(get_db_name(), Enums.INFO)

        for ranking in Ranking.objects.filter(season_id__gte=28):
            cpp.load(ranking.id)
            cpp.save_data(ranking.id, ranking.season_id, to_unix(utcnow()))
            cpp.save_stats(ranking.id, to_unix(utcnow()))

        return 0
Exemple #5
0
def gen_api_ladder(members=None,
                   team_size=1,
                   url='http://fake-url',
                   gd=True,
                   **kwargs):
    """ Generate api ladder object from members and other data. Can generalte legacy data or gamedata version. """
    if members is None:
        members = [gen_member(**kwargs)]

    if gd:
        return ApiLadder(gen_ladder_data(members, team_size=team_size), url)
    else:

        # Skipping races for now, since it is not needed for current set of tests.
        # races["favoriteRaceP%d" % (i + 1)] = Race.key_by_ids[m['race']].upper()

        return ApiLadder(
            {
                'ladderMembers': [{
                    "points": m['points'],
                    "previousRank": 0,
                    "wins": m['wins'],
                    "losses": m['losses'],
                    "highestRank": 0,
                    "joinTimestamp": to_unix(m['join_time']),
                    "character": {
                        "realm": m['realm'],
                        "profilePath": None,
                        "clanName": m['clan'],
                        "id": m['bid'],
                        "clanTag": m['tag'],
                        "displayName": m['name'],
                    }
                } for m in members]
            }, url)
Exemple #6
0
def _get_season_list():
    return json.dumps([{
        "id":
        season.id,
        "number":
        season.number,
        "year":
        season.year,
        "start":
        to_unix(season.start_time()),
        "end":
        to_unix(season.end_time()) if season.end_date else to_unix(utcnow()),
        "color":
        "#ff6666" if season.id % 2 == 0 else "#6666ff"
    } for season in Season.objects.filter(
        id__gt=14, start_date__isnull=False).order_by('id')])
Exemple #7
0
    def process_ladder(self, load=False, save=False, region=Region.EU, fetch_time=None,
                       mode=Mode.TEAM_1V1, version=Version.HOTS, league=League.GOLD, season=None, tier=0,
                       members=None, **kwargs):
        """ Update a ranking building single member with kwargs or use members if set. """

        season = season or self.db.season
        fetch_time = fetch_time or utcnow()
        members = members or [gen_member(**kwargs)]

        if not getattr(self, 'cpp', None):
            self.cpp = sc2.RankingData(self.db.db_name, Enums.INFO)

        if load:
            self.load()

        self.cpp.update_with_ladder(0,  # bid
                                    0,  # source_id
                                    region,
                                    mode,
                                    league,
                                    tier,
                                    version,
                                    season.id,
                                    to_unix(fetch_time),
                                    fetch_time.date().isoformat(),
                                    Mode.team_size(mode),
                                    members)
        if save:
            self.save_to_ranking()
Exemple #8
0
 def mock_current_season(self, status=200, season_id=None, start_time=None, fetch_time=None):
     self.bnet.fetch_current_season = \
         Mock(return_value=SeasonResponse(status,
                                          ApiSeason({'seasonId': season_id or self.db.season.id,
                                                     'startDate': to_unix(start_time or utcnow())},
                                                    'http://fake-url'),
                                          fetch_time or utcnow(), 0))
Exemple #9
0
 def update_ranking_stats(self, ranking_id=None):
     """ Will build ranking stats based of the ranking by calling c++. """
     if ranking_id is None: ranking_id = self.ranking.id
     cpp = sc2.RankingData(self.db_name, Enums.INFO)
     cpp.load(ranking_id)
     cpp.save_stats(ranking_id, to_unix(utcnow()))
     cpp.release()
Exemple #10
0
 def update_age(data):
     """ Update age of ladder data to present it as correctly as possible (to not be cached in outer layers). """
     now = to_unix(utcnow())
     for t in data['teams']:
         delta = now - int(t["data_time"])
         if delta < 3600:
             t["age"] = "%dm" % max((delta + 60) // 60, 1)
         else:
             t["age"] = "%dh" % (delta // 3600)
Exemple #11
0
def gen_member(**kwargs):
    """ Generate member as returned from ApiLadder and used by process_ladder. """
    return merge_args(
        {
            "bid": randint(1, 1e6),
            "realm": 0,
            "name": uniqueid(length=12),
            "clan": uniqueid(length=32),
            "tag": uniqueid(length=6),
            "join_time": int(to_unix(utcnow())),
            "points": float(randint(0, 2000)),
            "wins": randint(0, 200),
            "mmr": randint(1000, 5000),
            "losses": randint(0, 200),
            "race": choice([Race.ZERG, Race.PROTOSS, Race.TERRAN, Race.RANDOM])
        }, **kwargs)
Exemple #12
0
def update_ladder_cache(cpp, ranking, ladder, status, api_ladder, fetch_time):
    """
    Update cache and link/unlink. The fetch is done for a specific ranking and ladder, both provided. Since this is a
    refetch of a present GOOD ladder (or it is becoming GOOD) only 200 responses are allowed. Transaction should be
    spanning call to make transaction abortion possible.
    """

    try:
        lc = ranking.sources.get(region=ladder.region, bid=ladder.bid, type=Cache.LADDER)

    except Cache.DoesNotExist:
        lc = None

    lc = lc or Cache(region=ladder.region,
                     bid=ladder.bid,
                     type=Cache.LADDER,
                     created=fetch_time)
    lc.ranking = ranking
    lc.data = api_ladder.to_text()
    lc.url = api_ladder.url
    lc.updated = fetch_time
    lc.status = status
    lc.save()

    ladder.updated = fetch_time
    ladder.last_join = api_ladder.last_join()
    ladder.max_points = api_ladder.max_points()
    ladder.member_count = api_ladder.member_count()
    ladder.strangeness = Ladder.GOOD
    ladder.save()

    team_size = Mode.team_size(ladder.mode)
    return cpp.update_with_ladder(ladder.id,
                                  lc.id,
                                  ladder.region,
                                  ladder.mode,
                                  ladder.league,
                                  ladder.tier,
                                  ladder.version,
                                  ladder.season_id,
                                  to_unix(lc.updated),
                                  lc.updated.date().isoformat(),
                                  team_size,
                                  api_ladder.members_for_ranking(team_size))
Exemple #13
0
    def create_ranking_data(self, raw=True, **kwargs):
        kwargs = merge_args(dict(ranking=self.ranking, updated=utcnow()),
                            kwargs)
        data = kwargs.pop('data', [])
        ranking = kwargs['ranking']

        for team_rank in data:
            self._default_team_rank(team_rank)
            ranking.sources.add(self.get(Cache, pk=team_rank['source_id']))

        self.ranking_data = RankingData.objects.create(**kwargs)
        sc2.save_ranking_data_raw(self.db_name, ranking.id, 0, data, True)
        if not raw:
            cpp = sc2.RankingData(self.db_name, Enums.INFO)
            cpp.load(ranking.id)
            cpp.save_data(ranking.id, ranking.season_id, to_unix(utcnow()))
            cpp.release()

        return self.ranking_data
Exemple #14
0
    def archive_unused_caches(self, check_stop=lambda: None):
        """ One off archive of cache data that is no lunger used or updated save each all data in a data dir file. """
    
        with transaction.atomic():
            filename = "%s/archive-ladder-caches-%d.gz" % (self.dir, to_unix(utcnow()))

            logger.info(f"archiving unused caches to {filename}")
            
            with gzip.open(filename, mode='wb') as file:
                def move_to_file(caches):
                    self.write_caches(file, caches, check_stop)
                    caches.delete()

                move_to_file(Cache.objects.filter(type=Cache.PLAYER_LADDERS))
                move_to_file(Cache.objects.filter(ladder__season_id__lt=LAST_AVAILABLE_SEASON, ranking__isnull=True,
                                                  type=Cache.LADDER))
                move_to_file(Cache.objects.filter(ladder__isnull=True, ranking__isnull=True, type=Cache.LADDER))
            
            return filename
Exemple #15
0
def purge_player_data(check_stop=lambda: None):
    rankings = list(
        Ranking.objects.filter(
            status__in=(Ranking.COMPLETE_WITH_DATA,
                        Ranking.COMPLETE_WITOUT_DATA)).order_by('id'))

    cpp_purger = sc2.Purger(get_db_name())

    for ranking in rankings[:-1]:
        if check_stop():
            return

        logger.info(f"Purge teams from ranking {ranking.id}.")

        cpp_purger.purge_removed_teams_from_ranking(
            ranking.id, to_unix(utcnow()),
            api_data_purge_date().isoformat())

    # Maybe would be better to null here instead of delete, we will see what happens.
    Team.all_objects.filter(last_seen__lt=api_data_purge_date()).delete()
    Player.all_objects.filter(last_seen__lt=api_data_purge_date()).delete()
Exemple #16
0
def refetch_past_season(season, now, check_stop, bnet_client):
    """ Refetch ladders for past seasons. """

    start = perf_counter()

    need_refetch_limit = now - timedelta(
        days=Season.REFETCH_PAST_REFRESH_WHEN_OLDER_THAN_DAYS)

    with transaction.atomic():
        ranking = Ranking.objects.filter(
            season=season).order_by('-data_time').first()
        if ranking is None:
            logger.warning(
                f"season {season.id} has no ranking to check refetch past for, this is strange, skipping"
            )
            return

        # Prevent refetch of sea since sea api endpoints are no longer present, the data will be purged eventually.
        ladders_query = Ladder.objects.filter(
            season=season, strangeness=Ladder.GOOD).exclude(region=Region.SEA)

        last_updated = ladders_query.aggregate(Min('updated'))['updated__min']

        if need_refetch_limit < last_updated:
            logger.info(
                f"skipping refetch of season {season.id}, it was refetched {last_updated.date()}"
            )
            return

        ladders = list(ladders_query.filter(updated__lt=need_refetch_limit))

        ladders_count = ladders_query.count()

        logger.info(
            f"{len(ladders)} (of {ladders_count}) to refetch for season {season.id}"
        )

    # if not ladders:
    #     return

    # This is kind of bad but since c++ works in it's own db connection we can't fetch ladders and update
    # ranking in same transaction, which in turn means that if the code fails here ranking needs to be repaired.
    # TODO Move updating of cache to cpp? How is this done in update?

    cpp = sc2.RankingData(get_db_name(), Enums.INFO)
    cpp.load(ranking.id)

    fetch_time = 0

    try:
        for i, ladder in enumerate(ladders, start=1):
            check_stop()

            with transaction.atomic(), LogContext(
                    region=Region.key_by_ids[ladder.region]):

                status, api_ladder, fetch_time, fetch_duration = \
                    bnet_client.fetch_ladder(ladder.region, ladder.bid, timeout=20)

                logger.info("fetched %s got %d in %.2fs, %s (%d/%d)" %
                            (api_ladder.url, status, fetch_duration,
                             ladder.info(), i, len(ladders)))

                if status == 503:
                    logger.warning(
                        "got 503, skipping refetch past for rest of this season"
                    )
                    raise SystemExit()

                if status != 200:
                    logger.info("refetching %d returned %d, skipping ladder" %
                                (ladder.id, status))
                    continue

                update_ladder_cache(cpp, ranking, ladder, status, api_ladder,
                                    fetch_time)

                logger.info(
                    "saved updated ladder %d and added data to ranking %d" %
                    (ladder.id, ranking.id))

    except SystemExit:
        pass

    except Exception as e:
        raise Exception(
            "failure while refetching past, you will need to repair ranking %d:"
            % ranking.id) from e

    if fetch_time:
        logger.info("saving ranking data and ranking stats for ranking %d" %
                    ranking.id)
        cpp.save_data(ranking.id, ranking.season_id, to_unix(utcnow()))
        cpp.save_stats(ranking.id, to_unix(utcnow()))
        ranking.set_data_time(season, cpp)
        ranking.save()
    else:
        logger.info(
            "skipping save of ranking data and ranking stats for ranking %d, nothing changed"
            % ranking.id)

    logger.info(
        f"completed refetch of season {season.id} in {human_i_split(int(perf_counter() - start))} seconds"
    )
Exemple #17
0
 def save_to_ranking(self):
     self.cpp.save_data(self.db.ranking.id, self.db.ranking.season_id,
                        to_unix(utcnow()))
Exemple #18
0
 def unix_time(self, **kwargs):
     return to_unix(self.now + timedelta(**kwargs))
Exemple #19
0
def refetch_past_season(season, now, check_stop, bnet_client):
    """ Refetch ladders for past seasons. """

    with transaction.atomic():
        ranking = Ranking.objects.filter(
            season=season).order_by('-data_time').first()
        if ranking is None:
            logger.warning(
                "season %d has no ranking to check refetch past for, this is strange, skipping"
                % season.id)
            return

        too_old_limit = now - timedelta(
            days=Season.REFETCH_PAST_DAYS_AGE_LIMIT)

        need_refetch_limit = season.end_time() + timedelta(
            days=Season.REFETCH_PAST_UNTIL_DAYS_AFTER_SEASON_END)

        ladders = list(
            Ladder.objects.filter(season=season,
                                  strangeness=Ladder.GOOD,
                                  updated__lt=need_refetch_limit,
                                  updated__gt=too_old_limit))

        logger.info("%d ladders to refetch for season %d" %
                    (len(ladders), season.id))

    if not ladders:
        return

    # This is kind of bad but since c++ works in it's own db connection we can't fetch ladders and update
    # ranking in same transaction, which in turn means that if the code fails here ranking needs to be repaired.

    cpp = sc2.RankingData(get_db_name(), Enums.INFO)
    cpp.load(ranking.id)

    fetch_time = 0

    try:
        for i, ladder in enumerate(ladders, start=1):
            check_stop()

            with transaction.atomic(), LogContext(
                    region=Region.key_by_ids[ladder.region]):

                status, api_ladder, fetch_time, fetch_duration = \
                    bnet_client.fetch_ladder(ladder.region, ladder.bid, timeout=20)

                logger.info("fetched %s got %d in %.2fs, %s (%d/%d)" %
                            (api_ladder.url, status, fetch_duration,
                             ladder.info(), i, len(ladders)))

                if status == 503:
                    logger.warning(
                        "got 503, skipping refetch past for rest of this season"
                    )
                    raise SystemExit()

                if status != 200:
                    logger.info("refetching %d returned %d, skipping" %
                                (ladder.id, status))
                    continue

                update_ladder_cache(cpp, ranking, ladder, status, api_ladder,
                                    fetch_time)

                logger.info(
                    "saved updated ladder %d and added data to ranking %d" %
                    (ladder.id, ranking.id))

    except SystemExit:
        pass

    except Exception as e:
        raise Exception(
            "failure while refetching past, you will need to repair ranking %d:"
            % ranking.id) from e

    if fetch_time:
        logger.info("saving ranking data and ranking stats for ranking %d" %
                    ranking.id)
        cpp.save_data(ranking.id, ranking.season_id, to_unix(utcnow()))
        cpp.save_stats(ranking.id, to_unix(utcnow()))
        ranking.data_time = min(season.end_time(), fetch_time)
        ranking.save()
Exemple #20
0
def countinously_update(regions=None,
                        check_stop=None,
                        update_manager=None,
                        switch_hour=10):

    update_manager = update_manager or UpdateManager()

    ranking = Ranking.objects.order_by('-id').first()

    if ranking.status != Ranking.COMPLETE_WITH_DATA:
        raise Exception("ranking %d is not in a good state, clean up" %
                        ranking.id)

    season = ranking.season

    cpp = sc2.RankingData(get_db_name(), Enums.INFO)

    while not check_stop(throw=False):

        # Check if we want to switch to new season.

        current_season = Season.get_current_season()
        if current_season.id != season.id:

            if current_season.id != season.get_next().id:
                raise Exception(
                    "something is really wrong, current season is not next")

            if Ladder.objects.filter(season=current_season,
                                     strangeness=Ladder.GOOD).count() > 8:
                season = current_season
                logger.info(
                    "switching to rank new season %d multiple new season ladders was detected"
                    % season.id)

        # Do we want to create new ranking? We want to switch around switch_hour UTC every day but not if ranking is
        # too young. If too old, switch anyway.
        now = utcnow()

        if season.id != ranking.season_id:
            # Create new ranking based on new season.
            ranking = Ranking.objects.create(season=season,
                                             created=now,
                                             data_time=season.start_time(),
                                             min_data_time=season.start_time(),
                                             max_data_time=season.start_time(),
                                             status=Ranking.CREATED)
            logger.info("created new ranking %d based on new season %d" %
                        (ranking.id, season.id))

            cpp.clear_team_ranks()
            cpp.reconnect_db()
            update_manager.save_ranking(cpp, ranking, 0)

        elif ((ranking.created + timedelta(hours=48) < now or
               (ranking.created + timedelta(hours=12) < now
                and now.hour == switch_hour))
              and not ranking.season.near_start(now, days=4)):
            # Create a new ranking within the season.

            cpp.clear_team_ranks()
            cpp.reconnect_db()

            with transaction.atomic():
                new_ranking = Ranking.objects.create(
                    season=season,
                    created=now,
                    data_time=ranking.data_time,
                    min_data_time=ranking.min_data_time,
                    max_data_time=ranking.max_data_time,
                    status=Ranking.CREATED)
                # Copy all caches of old ranking to new ranking. Also remake the full ranking while doing so to get
                # rid of leave leaguers.

                logger.info(
                    "created new ranking %d basing it on copy of ranking %d, seaons %d"
                    % (new_ranking.id, ranking.id, season.id))

                count = ranking.sources.count()
                logger.info(
                    "copying %d cached ladders from ranking %d to ranking %d and adding them to ranking"
                    % (count, ranking.id, new_ranking.id))

                for i, lc in enumerate(ranking.sources.all(), start=1):
                    lc.pk = None
                    lc.created = utcnow()
                    lc.ladder = None
                    lc.ranking = ranking
                    lc.save()

                    new_ranking.sources.add(lc)

                    ladder = Ladder.objects.get(region=lc.region, bid=lc.bid)
                    team_size = Mode.team_size(ladder.mode)
                    stats = cpp.update_with_ladder(
                        ladder.id, lc.id, ladder.region, ladder.mode,
                        ladder.league, ladder.tier, ladder.version,
                        ladder.season_id, to_unix(lc.updated), team_size,
                        ApiLadder(lc.data,
                                  lc.url).members_for_ranking(team_size))

                    if i % 100 == 0:
                        logger.info(
                            "copied and added cache %d/%d, player cache size %d, team cache size %d"
                            % (i, count, stats['player_cache_size'],
                               stats['team_cache_size']))

            ranking = new_ranking
            update_manager.save_ranking(cpp, ranking, 0)
        else:
            logger.info("continuing with ranking %d, season %d" %
                        (ranking.id, season.id))
            cpp.reconnect_db()
            cpp.load(ranking.id)

        now = utcnow()
        until = now.replace(hour=switch_hour, minute=0, second=0)
        if until < now:
            until += timedelta(hours=24)

        update_manager.update_until(ranking=ranking,
                                    cpp=cpp,
                                    regions=regions,
                                    until=until,
                                    check_stop=check_stop)
    def run(self, args, logger):
        logger.info(
            "NOTE: fetching needs to be turned off if repairing latest rank")

        ranking = Ranking.objects.get(pk=args.ranking_id)

        if ranking.status not in [Ranking.CREATED, Ranking.COMPLETE_WITH_DATA]:
            raise Exception("ranking with status %s can not be repaired" %
                            ranking.status)

        # If last in season use all available ladders, not only those connected to ranking.
        last_in_season = Ranking.objects.filter(
            season=ranking.season).order_by('-id').first()
        if last_in_season == ranking:
            cursor = connection.cursor()
            cursor.execute(
                "SELECT id FROM ("
                "  SELECT DISTINCT ON (c.bid, c.region) c.id, c.updated FROM cache c JOIN ladder l"
                "    ON c.bid = l.bid AND c.region = l.region"
                "    WHERE l.strangeness = %s AND l.season_id = %s"
                "    ORDER BY c.bid, c.region, c.updated DESC) s"
                " ORDER by updated", [Ladder.GOOD, ranking.season_id])
            cache_ids = [row[0] for row in cursor.fetchall()]
            cursor.execute(
                "UPDATE cache SET ranking_id = NULL WHERE ranking_id = %s",
                [ranking.id])
        else:
            cache_ids = [
                c['id']
                for c in ranking.sources.values('id').order_by('updated')
            ]

        cpp = sc2.RankingData(get_db_name(), Enums.INFO)

        count = len(cache_ids)
        for i, id_ in enumerate(cache_ids, start=1):
            cache = Cache.objects.get(id=id_)
            self.check_stop()
            try:
                ladder = Ladder.objects.get(season=ranking.season,
                                            region=cache.region,
                                            bid=cache.bid)
            except Ladder.DoesNotExist:
                raise Exception(
                    "ladder region %s, bid %s missing in ladder table" %
                    (cache.region, cache.bid))

            if cache.ranking is None and cache.ladder is None:
                cache.ranking = ranking
                cache.save()
            elif cache.ranking != ranking:
                logger.info("cache %s was not included in ranking copying" %
                            cache.id)
                cache.id = None
                cache.ladder = None
                cache.ranking = ranking
                cache.save()

            logger.info("adding cache %s, ladder %s, %d/%d" %
                        (cache.id, ladder.id, i, count))

            team_size = Mode.team_size(ladder.mode)
            cpp.update_with_ladder(
                ladder.id, cache.id, ladder.region, ladder.mode, ladder.league,
                ladder.tier, ladder.version, ladder.season_id,
                to_unix(cache.updated), team_size,
                ApiLadder(cache.data).members_for_ranking(team_size))

        ranking.set_data_time(ranking.season, cpp)
        ranking.save()
        self.check_stop()
        cpp.save_data(ranking.id, ranking.season_id, to_unix(utcnow()))
        self.check_stop()
        cpp.save_stats(ranking.id, to_unix(utcnow()))

        return 0