def submission_enqueue_operations(self, submission): """Push in queue the operations required by a submission. submission (Submission): a submission. return (int): the number of actually enqueued operations. """ new_operations = 0 for dataset in get_datasets_to_judge(submission.task): submission_result = submission.get_result(dataset) number_of_operations = 0 for operation, priority, timestamp in submission_get_operations( submission_result, submission, dataset): number_of_operations += 1 if self.enqueue(operation, priority, timestamp): new_operations += 1 # If we got 0 operations, but the submission result is to # evaluate, it means that we just need to finalize the # evaluation. if number_of_operations == 0 and submission_to_evaluate( submission_result): logger.info( "Result %d(%d) has already all evaluations, " "finalizing it.", submission.id, dataset.id) submission_result.set_evaluation_outcome() submission_result.sa_session.commit() self.evaluation_ended(submission_result) return new_operations
def submission_enqueue_operations(self, submission): """Push in queue the operations required by a submission. submission (Submission): a submission. return (int): the number of actually enqueued operations. """ new_operations = 0 for dataset in get_datasets_to_judge(submission.task): submission_result = submission.get_result(dataset) number_of_operations = 0 for operation, priority, timestamp in submission_get_operations( submission_result, submission, dataset): number_of_operations += 1 if self.enqueue(operation, priority, timestamp): new_operations += 1 # If we got 0 operations, but the submission result is to # evaluate, it means that we just need to finalize the # evaluation. if number_of_operations == 0 and submission_to_evaluate( submission_result): logger.info("Result %d(%d) has already all evaluations, " "finalizing it.", submission.id, dataset.id) submission_result.set_evaluation_outcome() submission_result.sa_session.commit() self.evaluation_ended(submission_result) return new_operations
def user_test_enqueue_operations(self, user_test): """Push in queue the operations required by a user test. user_test (UserTest): a user test. return (int): the number of actually enqueued operations. """ new_operations = 0 for dataset in get_datasets_to_judge(user_test.task): for operation, priority, timestamp in user_test_get_operations( user_test, dataset): if self.enqueue(operation, priority, timestamp): new_operations += 1 return new_operations
def search_jobs_not_done(self): """Look in the database for submissions that have not been scored for no good reasons. Put the missing job in the queue. """ # Do this only if we are not still loading old submission # (from the start of the service). if self.scoring_old_submission: return True with SessionGen(commit=False) as session: contest = Contest.get_from_id(self.contest_id, session) new_submission_results_to_score = set() new_submissions_to_token = set() for submission in contest.get_submissions(): for dataset in get_datasets_to_judge(submission.task): sr = submission.get_result(dataset) sr_id = (submission.id, dataset.id) if sr is not None and (sr.evaluated() or sr.compilation_outcome == "fail") and \ sr_id not in self.submission_results_scored: new_submission_results_to_score.add(sr_id) if submission.tokened() and \ submission.id not in self.submissions_tokened: new_submissions_to_token.add(submission.id) new_s = len(new_submission_results_to_score) old_s = len(self.submission_results_to_score) new_t = len(new_submissions_to_token) old_t = len(self.submissions_to_token) logger.info("Submissions found to score/token: %d, %d." % (new_s, new_t)) if new_s + new_t > 0: self.submission_results_to_score |= new_submission_results_to_score self.submissions_to_token |= new_submissions_to_token if old_s + old_t == 0: self.add_timeout(self.score_old_submissions, None, 0.5, immediately=False) # Run forever. return True
def get_user_test_operations(self, user_test): """Push in queue the operations required by a user test. user_test (UserTest): a user test. return ([ESOperation, int, datetime]): operations to enqueue, together with priority and timestamp. """ operations = [] for dataset in get_datasets_to_judge(user_test.task): for operation, priority, timestamp in user_test_get_operations( user_test, dataset): job = Job.from_operation(operation, user_test, dataset).export_to_dict() operations.append([operation, priority, timestamp, job]) return operations
def get_submission_operations(self, submission, dataset=None): """Push in queue the operations required by a submission. submission (Submission): a submission. return ([ESOperation, int, datetime]): operations to enqueue, together with priority and timestamp. """ operations = [] if dataset is None: datasets = get_datasets_to_judge(submission.task) else: datasets = [dataset] for dataset in datasets: submission_result = submission.get_result(dataset) number_of_operations = 0 for operation, priority, timestamp in submission_get_operations( submission_result, submission, dataset): number_of_operations += 1 job = Job.from_operation(operation, submission, dataset).export_to_dict() operations.append([operation, priority, timestamp, job]) # If we got 0 operations, but the submission result is to # evaluate, it means that we just need to finalize the # evaluation. if number_of_operations == 0 and submission_to_evaluate( submission_result): logger.info( "Result %d(%d) has already all evaluations, " "finalizing it.", submission.id, dataset.id) submission_result.set_evaluation_outcome() submission_result.sa_session.commit() self.evaluation_ended(submission_result) return operations