def refetch_past_season(season, now, check_stop, bnet_client): """ Refetch ladders for past seasons. """ with transaction.atomic(): ranking = Ranking.objects.filter( season=season).order_by('-data_time').first() if ranking is None: logger.warning( "season %d has no ranking to check refetch past for, this is strange, skipping" % season.id) return too_old_limit = now - timedelta( days=Season.REFETCH_PAST_DAYS_AGE_LIMIT) need_refetch_limit = season.end_time() + timedelta( days=Season.REFETCH_PAST_UNTIL_DAYS_AFTER_SEASON_END) ladders = list( Ladder.objects.filter(season=season, strangeness=Ladder.GOOD, updated__lt=need_refetch_limit, updated__gt=too_old_limit)) logger.info("%d ladders to refetch for season %d" % (len(ladders), season.id)) if not ladders: return # This is kind of bad but since c++ works in it's own db connection we can't fetch ladders and update # ranking in same transaction, which in turn means that if the code fails here ranking needs to be repaired. cpp = sc2.RankingData(get_db_name(), Enums.INFO) cpp.load(ranking.id) fetch_time = 0 try: for i, ladder in enumerate(ladders, start=1): check_stop() with transaction.atomic(), LogContext( region=Region.key_by_ids[ladder.region]): status, api_ladder, fetch_time, fetch_duration = \ bnet_client.fetch_ladder(ladder.region, ladder.bid, timeout=20) logger.info("fetched %s got %d in %.2fs, %s (%d/%d)" % (api_ladder.url, status, fetch_duration, ladder.info(), i, len(ladders))) if status == 503: logger.warning( "got 503, skipping refetch past for rest of this season" ) raise SystemExit() if status != 200: logger.info("refetching %d returned %d, skipping" % (ladder.id, status)) continue update_ladder_cache(cpp, ranking, ladder, status, api_ladder, fetch_time) logger.info( "saved updated ladder %d and added data to ranking %d" % (ladder.id, ranking.id)) except SystemExit: pass except Exception as e: raise Exception( "failure while refetching past, you will need to repair ranking %d:" % ranking.id) from e if fetch_time: logger.info("saving ranking data and ranking stats for ranking %d" % ranking.id) cpp.save_data(ranking.id, ranking.season_id, to_unix(utcnow())) cpp.save_stats(ranking.id, to_unix(utcnow())) ranking.data_time = min(season.end_time(), fetch_time) ranking.save()
def update_until(self, ranking=None, cpp=None, regions=None, until=None, check_stop=None, fetch_manager=None, bnet_client=None): """ Update until time until (utc) has passed and code outisde will decide if season and/or ranking switching should be done. """ bnet_client = bnet_client or BnetClient() fetch_manager = fetch_manager or FetchManager(ranking, regions, bnet_client) try: logger.info( "updating season %d, ranking %d, regions %s, until %s" % (ranking.season_id, ranking.id, regions, until)) last_save = utcnow() last_season_check = utcnow() last_gm = utcnow(days=-20) last_plat = utcnow(days=-20) last_rest = utcnow(days=-20) while not check_stop(throw=False): now = utcnow() if now > until: logger.info( "we reached until time %s, pausing to switch season/ranking" % until) break if now - last_season_check > timedelta(minutes=30): last_season_check = now if ranking.season_id != Season.get_current_season().id: logger.info( "current season %d is closed, pausing to give chance for season switch" % ranking.season_id) break if now - last_save > timedelta(seconds=60): self.save_ranking(cpp, ranking, len(fetch_manager.fetched_queue)) last_save = utcnow( ) # This can take a long time, so get new now again. if now - last_gm > timedelta(seconds=60): last_gm = now fetch_manager.start_gm() if now - last_plat > timedelta(minutes=10): last_plat = now fetch_manager.start_plat() if now - last_rest > timedelta(minutes=60): last_rest = now fetch_manager.start_rest() try: ladder, status, api_ladder, fetch_time = fetch_manager.pop( ) with transaction.atomic(): stats = update_ladder_cache(cpp, ranking, ladder, status, api_ladder, fetch_time) with LogContext(region=ladder.region): logger.info( "saved updated ladder %d and added data to ranking %d, " "updated %d players %d teams, inserted %d players %d teams, " "cache sizes %d players %d teams" % ( ladder.id, ranking.id, stats["updated_player_count"], stats["updated_team_count"], stats["inserted_player_count"], stats["inserted_team_count"], stats["player_cache_size"], stats["team_cache_size"], )) except IndexError: sleep(0.04) logger.info("stopped fetching, saving") fetch_manager.stop() fetch_manager.join() self.save_ranking(cpp, ranking, len(fetch_manager.fetched_queue)) except Exception: fetch_manager.stop() raise cpp.release()
def refetch_past_season(season, now, check_stop, bnet_client): """ Refetch ladders for past seasons. """ start = perf_counter() need_refetch_limit = now - timedelta( days=Season.REFETCH_PAST_REFRESH_WHEN_OLDER_THAN_DAYS) with transaction.atomic(): ranking = Ranking.objects.filter( season=season).order_by('-data_time').first() if ranking is None: logger.warning( f"season {season.id} has no ranking to check refetch past for, this is strange, skipping" ) return # Prevent refetch of sea since sea api endpoints are no longer present, the data will be purged eventually. ladders_query = Ladder.objects.filter( season=season, strangeness=Ladder.GOOD).exclude(region=Region.SEA) last_updated = ladders_query.aggregate(Min('updated'))['updated__min'] if need_refetch_limit < last_updated: logger.info( f"skipping refetch of season {season.id}, it was refetched {last_updated.date()}" ) return ladders = list(ladders_query.filter(updated__lt=need_refetch_limit)) ladders_count = ladders_query.count() logger.info( f"{len(ladders)} (of {ladders_count}) to refetch for season {season.id}" ) # if not ladders: # return # This is kind of bad but since c++ works in it's own db connection we can't fetch ladders and update # ranking in same transaction, which in turn means that if the code fails here ranking needs to be repaired. # TODO Move updating of cache to cpp? How is this done in update? cpp = sc2.RankingData(get_db_name(), Enums.INFO) cpp.load(ranking.id) fetch_time = 0 try: for i, ladder in enumerate(ladders, start=1): check_stop() with transaction.atomic(), LogContext( region=Region.key_by_ids[ladder.region]): status, api_ladder, fetch_time, fetch_duration = \ bnet_client.fetch_ladder(ladder.region, ladder.bid, timeout=20) logger.info("fetched %s got %d in %.2fs, %s (%d/%d)" % (api_ladder.url, status, fetch_duration, ladder.info(), i, len(ladders))) if status == 503: logger.warning( "got 503, skipping refetch past for rest of this season" ) raise SystemExit() if status != 200: logger.info("refetching %d returned %d, skipping ladder" % (ladder.id, status)) continue update_ladder_cache(cpp, ranking, ladder, status, api_ladder, fetch_time) logger.info( "saved updated ladder %d and added data to ranking %d" % (ladder.id, ranking.id)) except SystemExit: pass except Exception as e: raise Exception( "failure while refetching past, you will need to repair ranking %d:" % ranking.id) from e if fetch_time: logger.info("saving ranking data and ranking stats for ranking %d" % ranking.id) cpp.save_data(ranking.id, ranking.season_id, to_unix(utcnow())) cpp.save_stats(ranking.id, to_unix(utcnow())) ranking.set_data_time(season, cpp) ranking.save() else: logger.info( "skipping save of ranking data and ranking stats for ranking %d, nothing changed" % ranking.id) logger.info( f"completed refetch of season {season.id} in {human_i_split(int(perf_counter() - start))} seconds" )