def load_unlocked_problems(tid): """Gets the list of all unlocked problems for a team. First check for 'unlocked_<tid>' in the cache, if it exists return it otherwise rebuild the unlocked list. Query all problems from the database as well as all submissions from the current team. Cycle over all problems while looking at their weightmap, check to see if problems in the weightmap are solved. Increment the threshold counter for solved weightmap problems. If the threshold counter is higher than the problem threshold then add the problem to the return list (ret). """ unlocked = cache.get('unlocked_' + tid) # Get the teams list of unlocked problems from the cache if unlocked is not None: # Return this if it is not empty in the cache return json.loads(unlocked) unlocked = [] team = db.teams.find_one({'tid': tid}) if 'probinstance' not in team.keys(): db.teams.update({'tid': tid}, {'$set': {'probinstance': {}}}) team['probinstance'] = dict() correctPIDs = {p['pid'] for p in list(db.submissions.find({"tid": tid, "correct": True}))} for p in list(db.problems.find()): if 'weightmap' not in p or 'threshold' not in p or sum([p['weightmap'][pid] for pid in correctPIDs if pid in p['weightmap']]) >= p['threshold']: unlocked.append({'pid': p['pid'], 'displayname': p.get('displayname', None), 'hint': p.get('hint', None), 'basescore': p.get('basescore', None), 'correct': True if p['pid'] in correctPIDs else False, 'desc': p.get('desc') if not p.get('autogen', False) else team['probinstance'][p['pid']].get('desc', None) if p['pid'] in team.get('probinstance', dict()) else build_problem_instance(p, tid)}) unlocked.sort(key=lambda k: k['basescore'] if 'basescore' in k else 99999) cache.set('unlocked_' + tid, json.dumps(unlocked), 60 * 60) return unlocked
def run(self): from common import cache process_pool[self.ident] = self self.last_results = None print('%s: run process' % self.uuid) def step_callback(df, force=False): now = time.time() if force or self.last_results is None or now - self.last_results > 0.5: cache.set('thread-%s-results' % self.uuid, df, timeout=30) print('%s: step callback' % self.uuid) self.last_results = now if cache.get('thread-%s-kill' % self.uuid): return False return True try: df = simulate_individuals(step_callback=step_callback, variable_store=self.variables) except ExecutionInterrupted: print('%s: process cancelled' % self.uuid) else: print('%s: computation finished' % self.uuid) step_callback(df, force=True) cache.set('thread-%s-finished' % self.uuid, True) print('%s: process finished' % self.uuid) del process_pool[self.ident]
def run_simulation_callback(n_clicks, simulation_days): print('run simulation (days %d)' % simulation_days) print(generate_cache_key(simulate_individuals)) set_variable('simulation_days', simulation_days) if n_clicks: set_variable('random_seed', n_clicks) df = simulate_individuals(only_if_in_cache=True) if df is not None: return render_results(df) if settings.RESTRICT_TO_PRESET_SCENARIOS: return [html.Div('Palvelussa ruuhkaa, osa simulaatiotoiminnallisuuksista on pois käytöstä')] existing_thread_id = session.get('thread_id', None) if existing_thread_id: cache.set('thread-%s-kill' % existing_thread_id, True) process = SimulationThread(variables=session.copy()) session['thread_id'] = process.uuid process.start() return [ dcc.Interval(id='simulation-output-interval', interval=500, max_intervals=60), html.Div(id='simulation-output-results'), ]
def run(self): from common import cache process_pool[self.ident] = self func_hash = generate_cache_key(simulate_individuals, var_store=self.variables) self.last_results = None print('%s: run process (func hash %s)' % (self.uuid, func_hash)) def step_callback(df, force=False): now = time.time() if force or self.last_results is None or now - self.last_results > 0.5: print('%s: set results to %s' % (self.uuid, func_hash)) cache.set('%s-results' % func_hash, df, timeout=30) self.last_results = now return True try: df = simulate_individuals(step_callback=step_callback, variable_store=self.variables) except ExecutionInterrupted: print('%s: process cancelled' % self.uuid) else: print('%s: computation finished' % self.uuid) step_callback(df, force=True) cache.set('%s-finished' % func_hash, True) print('%s: process finished' % self.uuid) del process_pool[self.ident]
def get_teams_scoreboard_cached(teams, cache_key): """Gets the cached scoreboard of teams. Kind of a hack, tells the front end to look for a static page scoreboard rather than sending a 2000+ length array that the front end must parse. """ scoreboard = cache.get(cache_key) if scoreboard is None: scoreboard = dict() problems = problem.load_problems() problems = [{ 'pid': p['pid'], 'displayname': p['displayname'] } for p in problems] pids = [p['pid'] for p in problems] team_scores = [{ "teamname": t['teamname'], "score": load_team_score(t['tid']), "solved": [pids.index(p) for p in problem.get_solved_problems(t['tid'])] } for t in teams] team_scores.sort(key=lambda x: (-x['score']['score'], x['score']['time_penalty'])) scoreboard['problems'] = problems scoreboard['teamname'] = [ts['teamname'] for ts in team_scores] scoreboard['score'] = [ts['score']['score'] for ts in team_scores] scoreboard['solved'] = [ts['solved'] for ts in team_scores] cache.set(cache_key, json.dumps(scoreboard), 60 * 60) else: scoreboard = json.loads(scoreboard) return scoreboard
def load_team_score(tid): """Get the score for a team. Looks for a cached team score, if not found we query all correct submissions by the team and add up their basescores if they exist. Cache the result. """ score = cache.get('teamscore_' + tid) if score is None: problems = problem.load_problems() pscore = {p['pid']: p['basescore'] for p in problems} solved = problem.get_solved_problems(tid) score = dict() score['score'] = sum(pscore[pid] for pid in solved) # TODO: calculate time penalty submission = list(db.submissions.find( { "tid": tid, "correct": True, "pid": {"$ne": "wait_re"}, "timestamp": {"$gt": ctf_start}, "timestamp": {"$lt": ctf_end} }, { "_id": 0, "pid": 1, "timestamp": 1 })) time_penalty = max([0] + [s['timestamp'] for s in submission]) score['time_penalty'] = time_penalty cache.set('teamscore_' + tid, json.dumps(score), 60 * 60) else: score = json.loads(score) return score
def run_simulation_callback(n_clicks, simulation_days): from flask import session from common import cache print('run simulation (days %d)' % simulation_days) set_variable('simulation_days', simulation_days) if n_clicks: set_variable('random_seed', n_clicks) df = simulate_individuals(only_if_in_cache=True) if df is not None: return render_results(df) existing_thread_id = session.get('thread_id', None) if existing_thread_id: cache.set('thread-%s-kill' % existing_thread_id, True) process = SimulationThread(variables=session.copy()) session['thread_id'] = process.uuid process.start() return [ dcc.Interval(id='simulation-output-interval', interval=500, max_intervals=60), html.Div(id='simulation-output-results'), ]
def step_callback(df, force=False): now = time.time() if force or self.last_results is None or now - self.last_results > 0.5: print('%s: set results to %s' % (self.uuid, func_hash)) cache.set('%s-results' % func_hash, df, timeout=30) self.last_results = now return True
def step_callback(total, age_groups=None, by_variant=None, force=False): now = time.time() res = dict(total=total, age_groups=age_groups, by_variant=by_variant) if force or self.last_results is None or now - self.last_results > 0.5: logger.debug('%s: set results to %s' % (self.uuid, self.cache_key)) cache.set('%s-results' % self.cache_key, res, timeout=self.cache_expiration) self.last_results = now return True
def step_callback(df, force=False): now = time.time() if force or self.last_results is None or now - self.last_results > 0.5: cache.set('thread-%s-results' % self.uuid, df, timeout=30) print('%s: step callback' % self.uuid) self.last_results = now if cache.get('thread-%s-kill' % self.uuid): return False return True
def start(self): logger.info('%s: start process' % self.uuid) finished = cache.get('%s-finished' % self.cache_key) if finished is not None: logger.info('%s: already running in another process (%s)' % (self.uuid, self.cache_key)) return # Race condition here, but it is of little consequence # FIXME: Probably should use SETNX instead cache.set('%s-error' % self.cache_key, None, self.cache_expiration) cache.set('%s-finished' % self.cache_key, False, timeout=self.cache_expiration) super().start()
def load_unlocked_problems(tid): """Gets the list of all unlocked problems for a team. First check for 'unlocked_<tid>' in the cache, if it exists return it otherwise rebuild the unlocked list. Query all problems from the database as well as all submissions from the current team. Cycle over all problems while looking at their weightmap, check to see if problems in the weightmap are solved. Increment the threshold counter for solved weightmap problems. If the threshold counter is higher than the problem threshold then add the problem to the return list (ret). """ unlocked = cache.get( 'unlocked_' + tid) # Get the teams list of unlocked problems from the cache if unlocked is not None: # Return this if it is not empty in the cache return json.loads(unlocked) unlocked = [] team = db.teams.find_one({'tid': tid}) if 'probinstance' not in team.keys(): db.teams.update({'tid': tid}, {'$set': {'probinstance': {}}}) team['probinstance'] = dict() correctPIDs = { p['pid'] for p in list(db.submissions.find({ "tid": tid, "correct": True })) } for p in list(db.problems.find()): if 'weightmap' not in p or 'threshold' not in p or sum([ p['weightmap'][pid] for pid in correctPIDs if pid in p['weightmap'] ]) >= p['threshold']: unlocked.append({ 'pid': p['pid'], 'displayname': p.get('displayname', None), 'hint': p.get('hint', None), 'basescore': p.get('basescore', None), 'correct': True if p['pid'] in correctPIDs else False, 'desc': p.get('desc') if not p.get('autogen', False) else team['probinstance'][p['pid']].get('desc', None) if p['pid'] in team.get('probinstance', dict()) else build_problem_instance(p, tid) }) unlocked.sort(key=lambda k: k['basescore'] if 'basescore' in k else 99999) cache.set('unlocked_' + tid, json.dumps(unlocked), 60 * 60) return unlocked
def load_team_score(tid): """Get the score for a team. Looks for a cached team score, if not found we query all correct submissions by the team and add up their basescores if they exist. Cache the result. """ score = cache.get('teamscore_' + tid) if score is not None: return score s = {d['pid'] for d in list(db.submissions.find({"tid": tid, "correct": True}))} # ,#"timestamp": {"$lt": end}}))} score = sum([d['basescore'] if 'basescore' in d else 0 for d in list(db.problems.find({ 'pid': {"$in": list(s)}}))]) cache.set('teamscore_' + tid, score, 1 * 60) return score
def load_team_score(tid): """Get the score for a team. Looks for a cached team score, if not found we query all correct submissions by the team and add up their basescores if they exist. Cache the result. """ score = cache.get('teamscore_' + tid) if score is not None: return score s = {d['pid'] for d in list(db.submissions.find({"tid": tid, "correct": True}))} # ,#"timestamp": {"$lt": end}}))} score = sum([d['basescore'] if 'basescore' in d else 0 for d in list(db.problems.find({ 'pid': {"$in": list(s)}}))]) cache.set('teamscore_' + tid, score, 60 * 60) return score
def load_news(): """Get news to populate the news page. Queries the database for all news articles, loads them into a json document and returns them ordered by their date. Newest articles are at the beginning of the list to appear at the top of the news page. """ news = cache.get('news') if news is not None: return json.loads(news) news = sorted([{'date': str(n['date']) if 'date' in n else "2000-01-01", 'header': n['header'] if 'header' in n else None, 'articlehtml': n['articlehtml' if 'articlehtml' in n else None]} for n in list(db.news.find())], key=lambda k: k['date'], reverse=True) cache.set('news', json.dumps(news), 60 * 2) return news
def wrap_calc_func(*args, **kwargs): pc = PerfCounter('%s.%s' % (func.__module__, func.__name__)) pc.display('enter') hash_data = _get_func_hash_data(func) cache_key = _calculate_cache_key(hash_data) assert 'variables' not in kwargs assert 'datasets' not in kwargs if not args and not kwargs: should_cache_func = True else: should_cache_func = False print('not caching func %s.%s' % (func.__module__, func.__name__)) if should_cache_func: ret = cache.get(cache_key) if ret is not None: # calcfuncs must not return None pc.display('cache hit') return ret if variables is not None: kwargs['variables'] = {x: get_variable(y) for x, y in variables.items()} if datasets is not None: datasets_to_load = set(list(datasets.values())) - set(_dataset_cache.keys()) if datasets_to_load: loaded_datasets = [] for dataset_name in datasets_to_load: ds_pc = PerfCounter('dataset %s' % dataset_name) df = load_datasets(dataset_name) ds_pc.display('loaded') loaded_datasets.append(df) del ds_pc for dataset_name, dataset in zip(datasets_to_load, loaded_datasets): _dataset_cache[dataset_name] = dataset kwargs['datasets'] = {ds_name: _dataset_cache[ds_url] for ds_name, ds_url in datasets.items()} ret = func(*args, **kwargs) pc.display('func ret') if should_cache_func: assert ret is not None cache.set(cache_key, ret, timeout=600) return ret
def get_verified_teams_public(): """Get list of email-verified teams public Do a cached query. """ verified_teams = cache.get('verified_teams_public') if verified_teams is None: verified_teams = list(db.teams.find({ "email_verified": True, "email": {"$not": re.compile(".*zju\.edu\.cn$")} }, { "_id": 0, "teamname": 1, "tid": 1 })) cache.set('verified_teams_public', json.dumps(verified_teams), 60 * 60) else: verified_teams = json.loads(verified_teams) return verified_teams
def load_problems(): """Gets the list of all problems. First check for 'problems' in the cache, if it exists return it otherwise rebuild the unlocked list. Query all problems from the database as well as all submissions from the current team. Cycle over all problems while looking at their weightmap, check to see if problems in the weightmap are solved. Increment the threshold counter for solved weightmap problems. If the threshold counter is higher than the problem threshold then add the problem to the return list (ret). """ problems = cache.get('problems') if problems is None: problems = list(db.problems.find( { "enabled": {"$ne": False} }, { "_id": 0, "pid": 1, "category": 1, "displayname": 1, "hint": 1, "basescore": 1, "desc": 1 })) """ problems = [] for p in list(db.problems.find()): #if 'weightmap' not in p or 'threshold' not in p or sum([p['weightmap'][pid] for pid in correctPIDs if pid in p['weightmap']]) >= p['threshold']: if 'enabled' not in p or p['enabled']: problems.append({'pid': p['pid'], 'category': p.get('category', None), 'displayname': p.get('displayname', None), 'hint': p.get('hint', None), 'basescore': p.get('basescore', None), #'correct': True if p['pid'] in correctPIDs else False, 'desc': p.get('desc') }) """ problems.sort(key=lambda k: (k['basescore'] if 'basescore' in k else 99999, k['pid'])) cache.set('problems', json.dumps(problems), 60 * 60) else: problems = json.loads(problems) return problems
def get_verified_teams_zju(): """Get list of email-verified teams zju Do a cached query. """ verified_teams = cache.get('verified_teams_zju') if verified_teams is None: verified_teams = list(db.teams.find({ "email_verified": True, "email": {"$regex": r".*zju\.edu\.cn$"} }, { "_id": 0, "teamname": 1, "tid": 1 })) cache.set('verified_teams_zju', json.dumps(verified_teams), 60 * 60) else: verified_teams = json.loads(verified_teams) return verified_teams
def load_problems_tid(tid): """Gets the list of all problems, with the solved/unsolved info of tid. First check for 'problems' in the cache, if it exists return it otherwise rebuild the unlocked list. Query all problems from the database as well as all submissions from the current team. Cycle over all problems while looking at their weightmap, check to see if problems in the weightmap are solved. Increment the threshold counter for solved weightmap problems. If the threshold counter is higher than the problem threshold then add the problem to the return list (ret). """ problems_tid = cache.get('problems_' + tid) if problems_tid is None: solved = get_solved_problems(tid) problems_tid = load_problems() for p in problems_tid: p['correct'] = p['pid'] in solved cache.set('problems_' + tid, json.dumps(problems_tid), 60 * 60) else: problems_tid = json.loads(problems_tid) return problems_tid
def get_solved_problems(tid): """Returns a list of all problems the team has solved. Checks for 'solved_<tid>' in the cache, if the list does not exists it rebuilds/inserts it. Queries the database for all submissions by the logged in team where correct == True. Finds all problems with a PID in the list of correct submissions. All solved problems are returned as a pid and display name. """ solved = cache.get('solved_' + tid) if solved is not None: return json.loads(solved) sPIDs = {d['pid'] for d in list(db.submissions.find({"tid": tid, "correct": True}))} probs = list(db.problems.find({"pid": {"$in": list(sPIDs)}}, {'pid': 1, 'displayname': 1, 'basescore': 1})) solved = sorted([{'pid': p['pid'], 'displayname': p.get('displayname', None), 'basescore': p.get('basescore', None)} for p in probs], key=lambda k: k['basescore'] if 'basescore' in k else 99999, reverse=True) cache.set('solved_' + tid, json.dumps(solved), 60 * 60) return solved
def run(self): self.last_results = None logger.info('%s: run process (cache key %s)' % (self.uuid, self.cache_key)) def step_callback(total, age_groups=None, by_variant=None, force=False): now = time.time() res = dict(total=total, age_groups=age_groups, by_variant=by_variant) if force or self.last_results is None or now - self.last_results > 0.5: logger.debug('%s: set results to %s' % (self.uuid, self.cache_key)) cache.set('%s-results' % self.cache_key, res, timeout=self.cache_expiration) self.last_results = now return True try: df, adf = simulate_individuals(step_callback=step_callback, variable_store=self.variables) except ExecutionInterrupted: logger.error('%s: process cancelled' % self.uuid) except Exception as e: cache.set('%s-finished' % self.cache_key, True, self.cache_expiration) cache.set('%s-error' % self.cache_key, str(e), self.cache_expiration) raise else: logger.info('%s: computation finished' % self.uuid) step_callback(df, age_groups=adf, force=True) cache.set('%s-finished' % self.cache_key, True, self.cache_expiration) logger.info('%s: process finished' % self.uuid)
def load_group_scoreboard(group): """Build the scoreboard for an entire group of teams. Get all of he team names, tid's, and affiliations for all teams that are a member of the given group. Iterate over all of the teams grabbing the last correct submission date (tie breaker). If the last subdate does not exist in the cache rebuild it by grabbing all of a teams correct submission and sorting by submission timestamp. Sort all team score's by their last submission date, we then sort the list by the score. The python sorting algorithm is guaranteed stable so equal scores will be ordered by last submission date. Cache the entire scoreboard. """ teams = [ {'tid': t['tid'], 'teamname': t['teamname'], 'affiliation': t['affiliation'] if 'affiliation' in t else None} for t in list(db.teams.find({'tid': {'$in': group['members']}}, {'tid': 1, 'teamname': 1, 'affiliation': 1}))] for t in teams: lastsubdate = cache.get('lastsubdate_' + t['tid']) if lastsubdate is None: subs = list(db.submissions.find({'tid': t['tid'], 'correct': True, 'timestamp': {"$lt": end}})) if len(subs) == 0: lastsubdate = str(datetime(2000, 01, 01)) else: sortedsubs = sorted(subs, key=lambda k: str(k['timestamp']), reverse=True) lastsubdate = str(sortedsubs[0]['timestamp']) cache.set('lastsubdate_' + t['tid'], lastsubdate, 1 * 30) t['lastsubdate'] = lastsubdate teams.sort(key=lambda k: k['lastsubdate']) top_scores = [x for x in sorted( [{'teamname': esc(t['teamname']), 'affiliation': esc(t['affiliation']), 'score': load_team_score(t['tid'])} for t in teams], key=lambda k: k['score'], reverse=True) if x['score'] > 0] # print group # print teams # print top_scores cache.set('groupscoreboard_' + str(group['name']), json.dumps({'group': group['name'], 'scores': top_scores}), 1 * 30)
def get_solved_problems(tid): """Returns a list of all problems the team has solved. Checks for 'solved_<tid>' in the cache, if the list does not exists it rebuilds/inserts it. Queries the database for all submissions by the logged in team where correct == True. Finds all problems with a PID in the list of correct submissions. All solved problems are returned as a pid and display name. """ solved = cache.get('solved_' + tid) if solved is None: solved = list((p['pid'] for p in db.submissions.find({ "tid": tid, "correct": True, "timestamp": {"$lt": scoreboard.ctf_end} }, { "pid": 1 }))) cache.set('solved_' + tid, json.dumps(solved), 60 * 60) else: solved = json.loads(solved) return solved
def get_solved_problems(tid): """Returns a list of all problems the team has solved. Checks for 'solved_<tid>' in the cache, if the list does not exists it rebuilds/inserts it. Queries the database for all submissions by the logged in team where correct == True. Finds all problems with a PID in the list of correct submissions. All solved problems are returned as a pid and display name. """ solved = cache.get('solved_' + tid) if solved is not None: return json.loads(solved) sPIDs = { d['pid'] for d in list(db.submissions.find({ "tid": tid, "correct": True })) } probs = list( db.problems.find({"pid": { "$in": list(sPIDs) }}, { 'pid': 1, 'displayname': 1, 'basescore': 1 })) solved = sorted([{ 'pid': p['pid'], 'displayname': p.get('displayname', None), 'basescore': p.get('basescore', None) } for p in probs], key=lambda k: k['basescore'] if 'basescore' in k else 99999, reverse=True) cache.set('solved_' + tid, json.dumps(solved), 60 * 60) return solved
def load_group_scoreboard(group): """Build the scoreboard for an entire group of teams. Get all of he team names, tid's, and affiliations for all teams that are a member of the given group. Iterate over all of the teams grabbing the last correct submission date (tie breaker). If the last subdate does not exist in the cache rebuild it by grabbing all of a teams correct submission and sorting by submission timestamp. Sort all team score's by their last submission date, we then sort the list by the score. The python sorting algorithm is guaranteed stable so equal scores will be ordered by last submission date. Cache the entire scoreboard. """ teams = [ {'tid': t['tid'], 'teamname': t['teamname'], 'affiliation': t['affiliation'] if 'affiliation' in t else None} for t in list(db.teams.find({'tid': {'$in': group['members']}}, {'tid': 1, 'teamname': 1, 'affiliation': 1}))] for t in teams: lastsubdate = cache.get('lastsubdate_' + t['tid']) if lastsubdate is None: subs = list(db.submissions.find({'tid': t['tid'], 'correct': True, 'timestamp': {"$lt": end}})) if len(subs) == 0: lastsubdate = str(datetime(2000, 01, 01)) else: sortedsubs = sorted(subs, key=lambda k: str(k['timestamp']), reverse=True) lastsubdate = str(sortedsubs[0]['timestamp']) cache.set('lastsubdate_' + t['tid'], lastsubdate, 60 * 30) t['lastsubdate'] = lastsubdate teams.sort(key=lambda k: k['lastsubdate']) top_scores = [x for x in sorted( [{'teamname': esc(t['teamname']), 'affiliation': esc(t['affiliation']), 'score': load_team_score(t['tid'])} for t in teams], key=lambda k: k['score'], reverse=True) if x['score'] > 0] cache.set('groupscoreboard_' + str(group['name']), json.dumps({'group': group['name'], 'scores': top_scores}), 60 * 30)
def wrap_calc_func(*args, **kwargs): should_profile = os.environ.get('PROFILE_CALC', '').lower() in ('1', 'true', 'yes') only_if_in_cache = kwargs.pop('only_if_in_cache', False) skip_cache = kwargs.pop('skip_cache', False) var_store = kwargs.pop('variable_store', None) if should_profile: pc = PerfCounter('%s.%s' % (func.__module__, func.__name__)) pc.display('enter') hash_data = _get_func_hash_data(func, None) cache_key = _calculate_cache_key(func, hash_data, var_store=var_store) assert 'variables' not in kwargs assert 'datasets' not in kwargs unknown_kwargs = set(kwargs.keys()) - set(['step_callback']) if not args and not unknown_kwargs and not skip_cache: should_cache_func = True else: should_cache_func = False if should_cache_func: ret = cache.get(cache_key) if ret is not None: # calcfuncs must not return None if should_profile: pc.display('cache hit (%s)' % cache_key) return ret if only_if_in_cache: if should_profile: pc.display('cache miss so leaving as requested (%s)' % cache_key) return None if variables is not None: kwargs['variables'] = {x: get_variable(y, var_store=var_store) for x, y in variables.items()} if datasets is not None: datasets_to_load = set(list(datasets.values())) - set(_dataset_cache.keys()) if datasets_to_load: loaded_datasets = [] for dataset_name in datasets_to_load: if should_profile: ds_pc = PerfCounter('dataset %s' % dataset_name) df = load_datasets(dataset_name) if should_profile: ds_pc.display('loaded') del ds_pc loaded_datasets.append(df) for dataset_name, dataset in zip(datasets_to_load, loaded_datasets): _dataset_cache[dataset_name] = dataset kwargs['datasets'] = {ds_name: _dataset_cache[ds_url] for ds_name, ds_url in datasets.items()} ret = func(*args, **kwargs) if should_profile: pc.display('func ret') if should_cache_func: assert ret is not None cache.set(cache_key, ret, timeout=3600) return ret
def submit_problem(tid, request, is_zju_user): """Handle problem submission. Gets the key and pid from the submitted problem, calls the respective grading function if the values aren't empty. If correct all relevant cache values are cleared. The submission is the inserted into the database (an attempt is made). A relevant message is returned if the problem has already been solved or the answer has been tried. """ # Nginx Configuration Fixed --libmaru """ import common common.log('Hello, '+request.remote_addr, 'ERROR') """ """ response = captcha.submit( request.form.get('recaptcha_challenge', ''), request.form.get('recaptcha_response', ''), '6LcPFPESAAAAAIkncbbAOfUi6sTSrMMxKVA9EcMq', request.remote_addr ) if not response.is_valid: return {"status": 0, "points": 0, "message": "验证码不正确."} """ t_interval = 10 last_submitted = cache.get('last_submitted_' + tid) if not last_submitted: cache.set('last_submitted_' + tid, True, t_interval) else: return {"status": 0, "points": 0, "message": "相邻提交之间隔须多于%d秒, 请稍后再试." % t_interval} pid = request.form.get('pid', '') key = request.form.get('key', '') if pid == '': return {"status": 0, "points": 0, "message": "题目名字不能为空."} if key == '': return {"status": 0, "points": 0, "message": "答案不能为空."} #if pid not in [p['pid'] for p in load_unlocked_problems(tid)]: # return {"status": 0, "points": 0, "message": "You cannot submit problems you have not unlocked."} pid = pid.encode('utf8').strip() # key = key.encode('utf8').strip() prob = cache.get('problem_' + pid) if prob is None: prob = db.problems.find_one({"pid": pid}) if prob is None: return {"status": 0, "points": 0, "message": "未找到题目'%s'." %pid} del prob['_id'] cache.set('problem_' + pid, json.dumps(prob), 60 * 60) else: prob = json.loads(prob) correct = False grader_type = prob.get('grader-type', 'key') if grader_type == 'file': (correct, message) = imp.load_source(prob['grader'][:-3], "./graders/" + prob['grader']).grade(tid, key) elif grader_type == 'key': correct = prob['key'] == key message = prob.get('correct_msg', '回答正确!') if correct else prob.get('wrong_msg', '回答错误!') message = message.encode('utf8') tstamp = utilities.timestamp(datetime.utcnow()) submission = {'tid': tid, 'timestamp': tstamp, 'pid': pid, 'ip': request.headers.get('X-Real-IP', None), 'key': key, 'correct': correct} if correct: #cache.delete('unlocked_' + tid) # Clear the unlocked problem cache as it needs updating cache.delete('solved_' + tid) # Clear the list of solved problems cache.delete('problems_' + tid) if is_zju_user: cache.delete('scoreboard_zju') else: cache.delete('scoreboard_public') cache.delete('teamscore_' + tid) # Clear the team's cached score cache.delete('lastsubdate_' + tid) try: db.submissions.insert(submission) except DuplicateKeyError: return {"status": 0, "points": 0, "message": "你已解决此题!"} else: try: db.submissions.insert(submission) except DuplicateKeyError: return {"status": 0, "points": 0, "message": "你已提交过这一错误答案!"} return {"status": 1 if correct else 0, "points": prob.get('basescore', 0), "message": message}