def import_from_dict(cls, data): data['files'] = dict( (k, File(k, v)) for k, v in data['files'].iteritems()) data['managers'] = dict( (k, Manager(k, v)) for k, v in data['managers'].iteritems()) data['executables'] = dict( (k, Executable(k, v)) for k, v in data['executables'].iteritems()) return cls(**data)
def import_from_dict(cls, data): """Create a Job from the output of export_to_dict.""" if data['operation'] is not None: data['operation'] = ESOperation.from_dict(data['operation']) data['files'] = dict((k, File(k, v)) for k, v in data['files'].items()) data['managers'] = dict( (k, Manager(k, v)) for k, v in data['managers'].items()) data['executables'] = dict( (k, Executable(k, v)) for k, v in data['executables'].items()) return cls(**data)
def add_file(self, submission=None, **kwargs): """Create a file and add it to the session""" if submission is None: submission = self.add_submission() args = { "submission": submission, "filename": unique_unicode_id(), "digest": unique_digest(), } args.update(kwargs) file_ = File(**args) self.session.add(file_) return file_
def add_submission(contest_id, username, task_name, timestamp, files): file_cacher = FileCacher() with SessionGen() as session: participation = session.query(Participation)\ .join(Participation.user)\ .filter(Participation.contest_id == contest_id)\ .filter(User.username == username)\ .first() if participation is None: logging.critical("User `%s' does not exists or " "does not participate in the contest.", username) return False task = session.query(Task)\ .filter(Task.contest_id == contest_id)\ .filter(Task.name == task_name)\ .first() if task is None: logging.critical("Unable to find task `%s'.", task_name) return False elements = [format.filename for format in task.submission_format] for file_ in files: if file_ not in elements: logging.critical("File `%s' is not in the submission format " "for the task.", file_) return False if any(element not in files for element in elements): logger.warning("Not all files from the submission format were " "provided.") # files and elements now coincide. We compute the language for # each file and check that they do not mix. language = None for file_ in files: this_language = filename_to_language(files[file_]) if this_language is None and "%l" in file_: logger.critical("Cannot recognize language for file `%s'.", file_) return False if language is None: language = this_language elif this_language is not None and language != this_language: logger.critical("Mixed-language submission detected.") return False # Store all files from the arguments, and obtain their digests.. file_digests = {} try: for file_ in files: digest = file_cacher.put_file_from_path( files[file_], "Submission file %s sent by %s at %d." % (file_, username, timestamp)) file_digests[file_] = digest except: logger.critical("Error while storing submission's file.", exc_info=True) return False # Create objects in the DB. submission = Submission(make_datetime(timestamp), language, participation=participation, task=task) for filename, digest in file_digests.items(): session.add(File(filename, digest, submission=submission)) session.add(submission) session.commit() return True
def accept_submission(sql_session, file_cacher, participation, task, timestamp, tornado_files, language_name, official): """Process a contestant's request to submit a submission. Parse and validate the data that a contestant sent for a submission and, if all checks and operations succeed, add the result to the database and return it. sql_session (Session): the DB session to use to fetch and add data. file_cacher (FileCacher): the file cacher to use to store the files. participation (Participation): the contestant who is submitting. task (Task): the task on which they are submitting. timestamp (datetime): the moment in time they submitted at. tornado_files ({str: [tornado.httputil.HTTPFile]}): the files they sent in. language_name (str|None): the language they declared their files are in (None means unknown and thus auto-detect). official (bool): whether the submission was sent in during a regular contest phase (and should be counted towards the score/rank) or during the analysis mode. return (Submission): the resulting submission, if all went well. raise (UnacceptableSubmission): if the contestant wasn't allowed to hand in a submission, if the provided data was invalid, if there were critical failures in the process. """ contest = participation.contest assert task.contest is contest # Check whether the contestant is allowed to submit. if not check_max_number(sql_session, contest.max_submission_number, participation, contest=contest): raise UnacceptableSubmission( N_("Too many submissions!"), N_("You have reached the maximum limit of " "at most %d submissions among all tasks.") % contest.max_submission_number) if not check_max_number( sql_session, task.max_submission_number, participation, task=task): raise UnacceptableSubmission( N_("Too many submissions!"), N_("You have reached the maximum limit of " "at most %d submissions on this task.") % task.max_submission_number) if not check_min_interval(sql_session, contest.min_submission_interval, timestamp, participation, contest=contest): raise UnacceptableSubmission( N_("Submissions too frequent!"), N_("Among all tasks, you can submit again " "after %d seconds from last submission.") % contest.min_submission_interval.total_seconds()) if not check_min_interval(sql_session, task.min_submission_interval, timestamp, participation, task=task): raise UnacceptableSubmission( N_("Submissions too frequent!"), N_("For this task, you can submit again " "after %d seconds from last submission.") % task.min_submission_interval.total_seconds()) # Process the data we received and ensure it's valid. required_codenames = set(task.submission_format) try: received_files = extract_files_from_tornado(tornado_files) except InvalidArchive: raise UnacceptableSubmission( N_("Invalid archive format!"), N_("The submitted archive could not be opened.")) try: files, language = match_files_and_language(received_files, language_name, required_codenames, contest.languages) except InvalidFilesOrLanguage: raise UnacceptableSubmission(N_("Invalid submission format!"), N_("Please select the correct files.")) digests = dict() missing_codenames = required_codenames.difference(iterkeys(files)) if len(missing_codenames) > 0: if task.active_dataset.task_type_object.ALLOW_PARTIAL_SUBMISSION: digests = fetch_file_digests_from_previous_submission( sql_session, participation, task, language, missing_codenames) else: raise UnacceptableSubmission( N_("Invalid submission format!"), N_("Please select the correct files.")) if any( len(content) > config.max_submission_length for content in itervalues(files)): raise UnacceptableSubmission( N_("Submission too big!"), N_("Each source file must be at most %d bytes long.") % config.max_submission_length) # All checks done, submission accepted. if config.submit_local_copy: try: store_local_copy(config.submit_local_copy_path, participation, task, timestamp, files) except StorageFailed: logger.error("Submission local copy failed.", exc_info=True) # We now have to send all the files to the destination... try: for codename, content in iteritems(files): digest = file_cacher.put_file_content( content, "Submission file %s sent by %s at %d." % (codename, participation.user.username, make_timestamp(timestamp))) digests[codename] = digest # In case of error, the server aborts the submission except Exception as error: logger.error("Storage failed! %s", error) raise UnacceptableSubmission(N_("Submission storage failed!"), N_("Please try again.")) # All the files are stored, ready to submit! logger.info("All files stored for submission sent by %s", participation.user.username) submission = Submission( timestamp=timestamp, language=language.name if language is not None else None, task=task, participation=participation, official=official) sql_session.add(submission) for codename, digest in iteritems(digests): sql_session.add( File(filename=codename, digest=digest, submission=submission)) return submission
def add_submissions(contest_name, task_name, username, items): """ Add submissions from the given user to the given task in the given contest. Each item corresponds to a submission, and should contain a dictionary which maps formatted file names to paths. For example, in batch tasks the format is "Task.%l", so one submission would be {"Task.%l": "path/to/task.cpp"}. """ # We connect to evaluation service to try and notify it about # the new submissions. Otherwise, it will pick it up only on # the next sweep for missed operations. rs = RemoteServiceClient(ServiceCoord("EvaluationService", 0)) rs.connect() with SessionGen() as session: user = get_user(session, username) contest = get_contest(session, contest_name) participation = get_participation(session, contest, user) task = get_task(session, task_name, contest) elements = set(format_element.filename for format_element in task.submission_format) file_cacher = FileCacher() # We go over all submissions twice. First we validate the # submission format. for submission_dict in items: for (format_file_name, path) in submission_dict.iteritems(): if format_file_name not in elements: raise Exception("Unexpected submission file: %s. " "Expected elements: %s" % (format_file_name, elements)) if not os.path.isfile(path): raise Exception("File not found: %s" % path) # Now add to database. for submission_dict in items: if not submission_dict: continue timestamp = time.time() file_digests = {} language_name = None for (format_file_name, path) in submission_dict.iteritems(): digest = file_cacher.put_file_from_path( path, "Submission file %s sent by %s at %d." % (path, username, timestamp)) file_digests[format_file_name] = digest current_language = filename_to_language(path) if current_language is not None: language_name = current_language.name submission = Submission(make_datetime(timestamp), language_name, participation=participation, task=task) for filename, digest in file_digests.items(): session.add(File(filename, digest, submission=submission)) session.add(submission) session.commit() rs.new_submission(submission_id=submission.id) rs.disconnect()
def post(self, task_name): participation = self.current_user try: task = self.contest.get_task(task_name) except KeyError: raise tornado.web.HTTPError(404) # Alias for easy access contest = self.contest # Enforce maximum number of submissions try: if contest.max_submission_number is not None: submission_c = self.sql_session\ .query(func.count(Submission.id))\ .join(Submission.task)\ .filter(Task.contest == contest)\ .filter(Submission.participation == participation)\ .scalar() if submission_c >= contest.max_submission_number and \ not self.current_user.unrestricted: raise ValueError( self._("You have reached the maximum limit of " "at most %d submissions among all tasks.") % contest.max_submission_number) if task.max_submission_number is not None: submission_t = self.sql_session\ .query(func.count(Submission.id))\ .filter(Submission.task == task)\ .filter(Submission.participation == participation)\ .scalar() if submission_t >= task.max_submission_number and \ not self.current_user.unrestricted: raise ValueError( self._("You have reached the maximum limit of " "at most %d submissions on this task.") % task.max_submission_number) except ValueError as error: self.application.service.add_notification( participation.user.username, self.timestamp, self._("Too many submissions!"), error.message, NOTIFICATION_ERROR) self.redirect("/tasks/%s/submissions" % quote(task.name, safe='')) return # Enforce minimum time between submissions try: if contest.min_submission_interval is not None: last_submission_c = self.sql_session.query(Submission)\ .join(Submission.task)\ .filter(Task.contest == contest)\ .filter(Submission.participation == participation)\ .order_by(Submission.timestamp.desc())\ .first() if last_submission_c is not None and \ self.timestamp - last_submission_c.timestamp < \ contest.min_submission_interval and \ not self.current_user.unrestricted: raise ValueError( self._("Among all tasks, you can submit again " "after %d seconds from last submission.") % contest.min_submission_interval.total_seconds()) # We get the last submission even if we may not need it # for min_submission_interval because we may need it later, # in case this is a ALLOW_PARTIAL_SUBMISSION task. last_submission_t = self.sql_session.query(Submission)\ .filter(Submission.task == task)\ .filter(Submission.participation == participation)\ .order_by(Submission.timestamp.desc())\ .first() if task.min_submission_interval is not None: if last_submission_t is not None and \ self.timestamp - last_submission_t.timestamp < \ task.min_submission_interval and \ not self.current_user.unrestricted: raise ValueError( self._("For this task, you can submit again " "after %d seconds from last submission.") % task.min_submission_interval.total_seconds()) except ValueError as error: self.application.service.add_notification( participation.user.username, self.timestamp, self._("Submissions too frequent!"), error.message, NOTIFICATION_ERROR) self.redirect("/tasks/%s/submissions" % quote(task.name, safe='')) return # Ensure that the user did not submit multiple files with the # same name. if any(len(filename) != 1 for filename in self.request.files.values()): self.application.service.add_notification( participation.user.username, self.timestamp, self._("Invalid submission format!"), self._("Please select the correct files."), NOTIFICATION_ERROR) self.redirect("/tasks/%s/submissions" % quote(task.name, safe='')) return # If the user submitted an archive, extract it and use content # as request.files. if len(self.request.files) == 1 and \ self.request.files.keys()[0] == "submission": archive_data = self.request.files["submission"][0] del self.request.files["submission"] # Create the archive. archive = Archive.from_raw_data(archive_data["body"]) if archive is None: self.application.service.add_notification( participation.user.username, self.timestamp, self._("Invalid archive format!"), self._("The submitted archive could not be opened."), NOTIFICATION_ERROR) self.redirect("/tasks/%s/submissions" % quote(task.name, safe='')) return # Extract the archive. unpacked_dir = archive.unpack() for name in archive.namelist(): filename = os.path.basename(name) body = open(os.path.join(unpacked_dir, filename), "r").read() self.request.files[filename] = [{ 'filename': filename, 'body': body }] archive.cleanup() # This ensure that the user sent one file for every name in # submission format and no more. Less is acceptable if task # type says so. task_type = get_task_type(dataset=task.active_dataset) required = set([sfe.filename for sfe in task.submission_format]) provided = set(self.request.files.keys()) if not (required == provided or (task_type.ALLOW_PARTIAL_SUBMISSION and required.issuperset(provided))): self.application.service.add_notification( participation.user.username, self.timestamp, self._("Invalid submission format!"), self._("Please select the correct files."), NOTIFICATION_ERROR) self.redirect("/tasks/%s/submissions" % quote(task.name, safe='')) return # Add submitted files. After this, files is a dictionary indexed # by *our* filenames (something like "output01.txt" or # "taskname.%l", and whose value is a couple # (user_assigned_filename, content). files = {} for uploaded, data in self.request.files.iteritems(): files[uploaded] = (data[0]["filename"], data[0]["body"]) # If we allow partial submissions, implicitly we recover the # non-submitted files from the previous submission. And put them # in file_digests (i.e. like they have already been sent to FS). submission_lang = None file_digests = {} if task_type.ALLOW_PARTIAL_SUBMISSION and \ last_submission_t is not None: for filename in required.difference(provided): if filename in last_submission_t.files: # If we retrieve a language-dependent file from # last submission, we take not that language must # be the same. if "%l" in filename: submission_lang = last_submission_t.language file_digests[filename] = \ last_submission_t.files[filename].digest # We need to ensure that everytime we have a .%l in our # filenames, the user has the extension of an allowed # language, and that all these are the same (i.e., no # mixed-language submissions). error = None for our_filename in files: user_filename = files[our_filename][0] if our_filename.find(".%l") != -1: lang = filename_to_language(user_filename) if lang is None: error = self._("Cannot recognize submission's language.") break elif submission_lang is not None and \ submission_lang != lang: error = self._("All sources must be in the same language.") break elif lang not in contest.languages: error = self._("Language %s not allowed in this contest." % lang) break else: submission_lang = lang if error is not None: self.application.service.add_notification( participation.user.username, self.timestamp, self._("Invalid submission!"), error, NOTIFICATION_ERROR) self.redirect("/tasks/%s/submissions" % quote(task.name, safe='')) return # Check if submitted files are small enough. if any( [len(f[1]) > config.max_submission_length for f in files.values()]): self.application.service.add_notification( participation.user.username, self.timestamp, self._("Submission too big!"), self._("Each source file must be at most %d bytes long.") % config.max_submission_length, NOTIFICATION_ERROR) self.redirect("/tasks/%s/submissions" % quote(task.name, safe='')) return # All checks done, submission accepted. # Attempt to store the submission locally to be able to # recover a failure. if config.submit_local_copy: try: path = os.path.join( config.submit_local_copy_path.replace( "%s", config.data_dir), participation.user.username) if not os.path.exists(path): os.makedirs(path) # Pickle in ASCII format produces str, not unicode, # therefore we open the file in binary mode. with io.open( os.path.join(path, "%d" % make_timestamp(self.timestamp)), "wb") as file_: pickle.dump((self.contest.id, participation.user.id, task.id, files), file_) except Exception as error: logger.warning("Submission local copy failed.", exc_info=True) # We now have to send all the files to the destination... try: for filename in files: digest = self.application.service.file_cacher.put_file_content( files[filename][1], "Submission file %s sent by %s at %d." % (filename, participation.user.username, make_timestamp(self.timestamp))) file_digests[filename] = digest # In case of error, the server aborts the submission except Exception as error: logger.error("Storage failed! %s", error) self.application.service.add_notification( participation.user.username, self.timestamp, self._("Submission storage failed!"), self._("Please try again."), NOTIFICATION_ERROR) self.redirect("/tasks/%s/submissions" % quote(task.name, safe='')) return # All the files are stored, ready to submit! logger.info("All files stored for submission sent by %s", participation.user.username) submission = Submission(self.timestamp, submission_lang, task=task, participation=participation) for filename, digest in file_digests.items(): self.sql_session.add(File(filename, digest, submission=submission)) self.sql_session.add(submission) self.sql_session.commit() self.application.service.evaluation_service.new_submission( submission_id=submission.id) self.application.service.add_notification( participation.user.username, self.timestamp, self._("Submission received"), self._("Your submission has been received " "and is currently being evaluated."), NOTIFICATION_SUCCESS) # The argument (encripted submission id) is not used by CWS # (nor it discloses information to the user), but it is useful # for automatic testing to obtain the submission id). # FIXME is it actually used by something? self.redirect( "/tasks/%s/submissions?%s" % (quote(task.name, safe=''), encrypt_number(submission.id)))
from future.builtins.disabled import * # noqa from future.builtins import * # noqa import unittest from mock import MagicMock, call, ANY from cms.db import File, Manager, Executable from cms.grading.Job import CompilationJob, EvaluationJob from cms.grading.tasktypes.Batch import Batch from cmstestsuite.unit_tests.grading.tasktypes.tasktypetestutils import \ COMPILATION_COMMAND_1, COMPILATION_COMMAND_2, EVALUATION_COMMAND_1, \ LANG_1, LANG_2, OUTCOME, STATS_OK, STATS_RE, TEXT, \ TaskTypeTestMixin, fake_compilation_commands, fake_evaluation_commands FILE_FOO_L1 = File(digest="digest of foo.l1", filename="foo.%l") FILE_BAR_L1 = File(digest="digest of bar.l1", filename="bar.%l") GRADER_L1 = Manager(digest="digest of grader.l1", filename="grader.l1") GRADER_L2 = Manager(digest="digest of grader.l2", filename="grader.l2") HEADER_L1 = Manager(digest="digest of grader.hl1", filename="graderl.hl1") EXE_FOO = Executable(digest="digest of foo", filename="foo") class TestGetCompilationCommands(TaskTypeTestMixin, unittest.TestCase): """Tests for get_compilation_commands().""" def setUp(self): super(TestGetCompilationCommands, self).setUp() self.setUpMocks("Batch") self.languages.update({LANG_1, LANG_2})
def submission_handler(self): if local.data['action'] == 'list': task = local.session.query(Task)\ .filter(Task.name == local.data['task_name']).first() if task is None: return 'Not found' if local.user is None: return 'Unauthorized' subs = local.session.query(Submission)\ .filter(Submission.participation_id == local.participation.id)\ .filter(Submission.task_id == task.id)\ .order_by(desc(Submission.timestamp)).all() submissions = [] for s in subs: submission = dict() submission['id'] = s.id submission['task_id'] = s.task_id submission['timestamp'] = make_timestamp(s.timestamp) submission['files'] = [] for name, f in s.files.iteritems(): fi = dict() if s.language is None: fi['name'] = name else: fi['name'] = name.replace('%l', s.language) fi['digest'] = f.digest submission['files'].append(fi) result = s.get_result() for i in ['compilation_outcome', 'evaluation_outcome']: submission[i] = getattr(result, i, None) if result is not None and result.score is not None: submission['score'] = round(result.score, 2) submissions.append(submission) local.resp['submissions'] = submissions elif local.data['action'] == 'details': s = local.session.query(Submission)\ .filter(Submission.id == local.data['id']).first() if s is None: return 'Not found' if local.user is None or s.participation_id != local.participation.id: return 'Unauthorized' submission = dict() submission['id'] = s.id submission['task_id'] = s.task_id submission['timestamp'] = make_timestamp(s.timestamp) submission['language'] = s.language submission['files'] = [] for name, f in s.files.iteritems(): fi = dict() if s.language is None: fi['name'] = name else: fi['name'] = name.replace('%l', s.language) fi['digest'] = f.digest submission['files'].append(fi) result = s.get_result() for i in ['compilation_outcome', 'evaluation_outcome', 'compilation_stdout', 'compilation_stderr', 'compilation_time', 'compilation_memory']: submission[i] = getattr(result, i, None) if result is not None and result.score is not None: submission['score'] = round(result.score, 2) if result is not None and result.score_details is not None: tmp = json.loads(result.score_details) if len(tmp) > 0 and 'text' in tmp[0]: subt = dict() subt['testcases'] = tmp subt['score'] = submission['score'] subt['max_score'] = 100 submission['score_details'] = [subt] else: submission['score_details'] = tmp for subtask in submission['score_details']: for testcase in subtask['testcases']: data = json.loads(testcase['text']) testcase['text'] = data[0] % tuple(data[1:]) else: submission['score_details'] = None local.resp = submission elif local.data['action'] == 'new': if local.user is None: return 'Unauthorized' lastsub = local.session.query(Submission)\ .filter(Submission.participation_id == local.participation.id)\ .order_by(desc(Submission.timestamp)).first() if lastsub is not None and \ make_datetime() - lastsub.timestamp < timedelta(seconds=20): return 'Too frequent submissions!' try: task = local.session.query(Task)\ .join(SocialTask)\ .filter(Task.name == local.data['task_name'])\ .filter(SocialTask.access_level >= local.access_level).first() except KeyError: return 'Not found' def decode_file(f): f['data'] = f['data'].split(',')[-1] f['body'] = b64decode(f['data']) del f['data'] return f if len(local.data['files']) == 1 and \ 'submission' in local.data['files']: archive_data = decode_file(local.data['files']['submission']) del local.data['files']['submission'] # Create the archive. archive = Archive.from_raw_data(archive_data["body"]) if archive is None: return 'Invalid archive!' # Extract the archive. unpacked_dir = archive.unpack() for name in archive.namelist(): filename = os.path.basename(name) body = open(os.path.join(unpacked_dir, filename), "r").read() local.data['files'][filename] = { 'filename': filename, 'body': body } files_sent = local.data['files'] archive.cleanup() else: files_sent = \ dict([(k, decode_file(v)) for k, v in local.data['files'].iteritems()]) # TODO: implement partial submissions (?) # Detect language files = [] sub_lang = None for sfe in task.submission_format: f = files_sent.get(sfe.filename) if f is None: return 'Some files are missing!' if len(f['body']) > config.get("core", "max_submission_length"): return 'The files you sent are too big!' f['name'] = sfe.filename files.append(f) if sfe.filename.endswith('.%l'): language = None for ext, l in SOURCE_EXT_TO_LANGUAGE_MAP.iteritems(): if f['filename'].endswith(ext): language = l if language is None: return 'The language of the files you sent is not ' + \ 'recognized!' elif sub_lang is not None and sub_lang != language: return 'The files you sent are in different languages!' else: sub_lang = language # Add the submission timestamp = make_datetime() submission = Submission(timestamp, sub_lang, participation=local.participation, task=task) for f in files: digest = self.file_cacher.put_file_content( f['body'], 'Submission file %s sent by %s at %d.' % ( f['name'], local.user.username, make_timestamp(timestamp))) local.session.add(File(f['name'], digest, submission=submission)) local.session.add(submission) local.session.commit() # Notify ES self.evaluation_service.new_submission( submission_id=submission.id ) # Answer with submission data local.resp['id'] = submission.id local.resp['task_id'] = submission.task_id local.resp['timestamp'] = make_timestamp(submission.timestamp) local.resp['compilation_outcome'] = None local.resp['evaluation_outcome'] = None local.resp['score'] = None local.resp['files'] = [] for name, f in submission.files.iteritems(): fi = dict() if submission.language is None: fi['name'] = name else: fi['name'] = name.replace('%l', submission.language) fi['digest'] = f.digest local.resp['files'].append(fi) else: return 'Bad request'
# # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """Tests for the OutputOnly task type.""" import unittest from unittest.mock import MagicMock from cms.db import File from cms.grading.Job import EvaluationJob from cms.grading.tasktypes.OutputOnly import OutputOnly from cms.service.esoperations import ESOperation from cmstestsuite.unit_tests.grading.tasktypes.tasktypetestutils import \ OUTCOME, TEXT, TaskTypeTestMixin FILE_001 = File(digest="digest of 001", filename="output_001.txt") FILE_023 = File(digest="digest of 023", filename="output_023.txt") class TestEvaluate(TaskTypeTestMixin, unittest.TestCase): """Tests for evaluate(). prepare() creates a task type and a job with the given arguments, and in addition sets up successful return values for eval_output. """ def setUp(self): super().setUp() self.setUpMocks("OutputOnly") self.file_cacher = MagicMock()
def test_testcases(base_dir, soluzione, language, assume=None): global task, file_cacher # Use a disabled FileCacher with a FSBackend in order to avoid to fill # the database with junk and to save up space. if file_cacher is None: file_cacher = FileCacher(path=os.path.join(config.cache_dir, 'cmsMake'), enabled=False) # Load the task if task is None: loader = YamlLoader(os.path.realpath(os.path.join(base_dir, "..")), file_cacher) # Normally we should import the contest before, but YamlLoader # accepts get_task() even without previous get_contest() calls task = loader.get_task(os.path.split(os.path.realpath(base_dir))[1]) # Prepare the EvaluationJob dataset = task.active_dataset if dataset.task_type != "OutputOnly": digest = file_cacher.put_file_from_path( os.path.join(base_dir, soluzione), "Solution %s for task %s" % (soluzione, task.name)) executables = { task.name: Executable(filename=task.name, digest=digest) } jobs = [(t, EvaluationJob(language=language, task_type=dataset.task_type, task_type_parameters=json.loads( dataset.task_type_parameters), managers=dict(dataset.managers), executables=executables, input=dataset.testcases[t].input, output=dataset.testcases[t].output, time_limit=dataset.time_limit, memory_limit=dataset.memory_limit)) for t in dataset.testcases] tasktype = get_task_type(dataset=dataset) else: print("Generating outputs...", end='') files = {} for t in sorted(dataset.testcases.keys()): with file_cacher.get_file(dataset.testcases[t].input) as fin: with TemporaryFile() as fout: print(str(t), end='') call(soluzione, stdin=fin, stdout=fout, cwd=base_dir) fout.seek(0) digest = file_cacher.put_file_from_fobj(fout) outname = "output_%s.txt" % t files[outname] = File(filename=outname, digest=digest) jobs = [(t, EvaluationJob(task_type=dataset.task_type, task_type_parameters=json.loads( dataset.task_type_parameters), managers=dict(dataset.managers), files=files, input=dataset.testcases[t].input, output=dataset.testcases[t].output, time_limit=dataset.time_limit, memory_limit=dataset.memory_limit)) for t in dataset.testcases] for k, job in jobs: job._key = k tasktype = get_task_type(dataset=dataset) print() ask_again = True last_status = "ok" status = "ok" stop = False info = [] points = [] comments = [] tcnames = [] for jobinfo in sorted(jobs): print(jobinfo[0], end='') sys.stdout.flush() job = jobinfo[1] # Skip the testcase if we decide to consider everything to # timeout if stop: info.append("Time limit exceeded") points.append(0.0) comments.append("Timeout.") continue # Evaluate testcase last_status = status tasktype.evaluate(job, file_cacher) if dataset.task_type != "OutputOnly": status = job.plus["exit_status"] info.append("Time: %5.3f Wall: %5.3f Memory: %s" % (job.plus["execution_time"], job.plus["execution_wall_clock_time"], mem_human(job.plus["execution_memory"]))) else: status = "ok" info.append("N/A") points.append(float(job.outcome)) comments.append(format_status_text(job.text)) tcnames.append(jobinfo[0]) # If we saw two consecutive timeouts, ask wether we want to # consider everything to timeout if ask_again and status == "timeout" and last_status == "timeout": print() print("Want to stop and consider everything to timeout? [y/N]", end='') if assume is not None: print(assume) tmp = assume else: tmp = raw_input().lower() if tmp in ['y', 'yes']: stop = True else: ask_again = False # Result pretty printing print() clen = max(len(c) for c in comments) ilen = max(len(i) for i in info) for (i, p, c, b) in zip(tcnames, points, comments, info): print("%s) %5.2lf --- %s [%s]" % (i, p, c.ljust(clen), b.center(ilen))) return zip(points, comments, info)
def add_submission(contest_id, username, task_name, timestamp, files): file_cacher = FileCacher() with SessionGen() as session: participation = session.query(Participation)\ .join(Participation.user)\ .filter(Participation.contest_id == contest_id)\ .filter(User.username == username)\ .first() if participation is None: logging.critical("User `%s' does not exists or " "does not participate in the contest.", username) return False task = session.query(Task)\ .filter(Task.contest_id == contest_id)\ .filter(Task.name == task_name)\ .first() if task is None: logging.critical("Unable to find task `%s'.", task_name) return False elements = set(task.submission_format) for file_ in files: if file_ not in elements: logging.critical("File `%s' is not in the submission format " "for the task.", file_) return False if any(element not in files for element in elements): logger.warning("Not all files from the submission format were " "provided.") # files is now a subset of elements. # We ensure we can infer a language if the task requires it. language = None need_lang = any(element.find(".%l") != -1 for element in elements) if need_lang: try: language = language_from_submitted_files(files) except ValueError as e: logger.critical(e) return False if language is None: # This might happen in case not all files were provided. logger.critical("Unable to infer language from submission.") return False language_name = None if language is None else language.name # Store all files from the arguments, and obtain their digests.. file_digests = {} try: for file_ in files: digest = file_cacher.put_file_from_path( files[file_], "Submission file %s sent by %s at %d." % (file_, username, timestamp)) file_digests[file_] = digest except Exception as e: logger.critical("Error while storing submission's file: %s.", e) return False # Create objects in the DB. submission = Submission(make_datetime(timestamp), language_name, participation=participation, task=task) for filename, digest in file_digests.items(): session.add(File(filename, digest, submission=submission)) session.add(submission) session.commit() maybe_send_notification(submission.id) return True
# # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """Tests for the OutputOnly task type.""" import unittest from unittest.mock import MagicMock from cms.db import File from cms.grading.Job import EvaluationJob from cms.grading.tasktypes.OutputOnly import OutputOnly from cms.service.esoperations import ESOperation from cmstestsuite.unit_tests.grading.tasktypes.tasktypetestutils import \ OUTCOME, TEXT, TaskTypeTestMixin FILE_001 = File(digest="digest of 001", filename="001.out") FILE_023 = File(digest="digest of 023", filename="023.out") class TestEvaluate(TaskTypeTestMixin, unittest.TestCase): """Tests for evaluate(). prepare() creates a task type and a job with the given arguments, and in addition sets up successful return values for eval_output. """ def setUp(self): super().setUp() self.setUpMocks("OutputOnly") self.file_cacher = MagicMock()
from future.builtins.disabled import * # noqa from future.builtins import * # noqa import unittest from mock import MagicMock, call, ANY from cms.db import File, Manager, Executable from cms.grading.Job import CompilationJob, EvaluationJob from cms.grading.tasktypes.Batch import Batch from cmstestsuite.unit_tests.grading.tasktypes.tasktypetestutils import \ COMPILATION_COMMAND_1, COMPILATION_COMMAND_2, EVALUATION_COMMAND_1, \ LANG_1, LANG_2, OUTCOME, STATS_OK, STATS_RE, TEXT, \ TaskTypeTestMixin, fake_compilation_commands, fake_evaluation_commands FILE_FOO_L1 = File(digest="digest of foo.l1", filename="foo.%l") GRADER_L1 = Manager(digest="digest of grader.l1", filename="grader.l1") GRADER_L2 = Manager(digest="digest of grader.l2", filename="grader.l2") HEADER_L1 = Manager(digest="digest of grader.hl1", filename="graderl.hl1") EXE_FOO = Executable(digest="digest of foo", filename="foo") class TestGetCompilationCommands(TaskTypeTestMixin, unittest.TestCase): """Tests for get_compilation_commands().""" def setUp(self): super(TestGetCompilationCommands, self).setUp() self.setUpMocks("Batch") self.languages.update({LANG_1, LANG_2}) def test_alone(self):
def post(self, task_name): participation = self.current_user try: task = self.contest.get_task(task_name) except KeyError: raise tornado.web.HTTPError(404) self.fallback_page = ["tasks", task.name, "submissions"] # Alias for easy access contest = self.contest # Enforce maximum number of submissions try: if contest.max_submission_number is not None: submission_c = self.sql_session\ .query(func.count(Submission.id))\ .join(Submission.task)\ .filter(Task.contest == contest)\ .filter(Submission.participation == participation)\ .scalar() if submission_c >= contest.max_submission_number and \ not self.current_user.unrestricted: raise ValueError( self._("You have reached the maximum limit of " "at most %d submissions among all tasks.") % contest.max_submission_number) if task.max_submission_number is not None: submission_t = self.sql_session\ .query(func.count(Submission.id))\ .filter(Submission.task == task)\ .filter(Submission.participation == participation)\ .scalar() if submission_t >= task.max_submission_number and \ not self.current_user.unrestricted: raise ValueError( self._("You have reached the maximum limit of " "at most %d submissions on this task.") % task.max_submission_number) except ValueError as error: self._send_error(self._("Too many submissions!"), str(error)) return # Enforce minimum time between submissions try: if contest.min_submission_interval is not None: last_submission_c = self.sql_session.query(Submission)\ .join(Submission.task)\ .filter(Task.contest == contest)\ .filter(Submission.participation == participation)\ .order_by(Submission.timestamp.desc())\ .first() if last_submission_c is not None and \ self.timestamp - last_submission_c.timestamp < \ contest.min_submission_interval and \ not self.current_user.unrestricted: raise ValueError( self._("Among all tasks, you can submit again " "after %d seconds from last submission.") % contest.min_submission_interval.total_seconds()) # We get the last submission even if we may not need it # for min_submission_interval because we may need it later, # in case this is a ALLOW_PARTIAL_SUBMISSION task. last_submission_t = self.sql_session.query(Submission)\ .filter(Submission.task == task)\ .filter(Submission.participation == participation)\ .order_by(Submission.timestamp.desc())\ .first() if task.min_submission_interval is not None: if last_submission_t is not None and \ self.timestamp - last_submission_t.timestamp < \ task.min_submission_interval and \ not self.current_user.unrestricted: raise ValueError( self._("For this task, you can submit again " "after %d seconds from last submission.") % task.min_submission_interval.total_seconds()) except ValueError as error: self._send_error(self._("Submissions too frequent!"), str(error)) return # Required files from the user. required = set([sfe.filename for sfe in task.submission_format]) # Ensure that the user did not submit multiple files with the # same name. if any( len(filename) != 1 for filename in itervalues(self.request.files)): self._send_error(self._("Invalid submission format!"), self._("Please select the correct files.")) return # If the user submitted an archive, extract it and use content # as request.files. But only valid for "output only" (i.e., # not for submissions requiring a programming language # identification). if len(self.request.files) == 1 and \ next(iterkeys(self.request.files)) == "submission": if any(filename.endswith(".%l") for filename in required): self._send_error(self._("Invalid submission format!"), self._("Please select the correct files."), task) return archive_data = self.request.files["submission"][0] del self.request.files["submission"] # Create the archive. archive = Archive.from_raw_data(archive_data["body"]) if archive is None: self._send_error( self._("Invalid archive format!"), self._("The submitted archive could not be opened.")) return # Extract the archive. unpacked_dir = archive.unpack() for name in archive.namelist(): filename = os.path.basename(name) body = open(os.path.join(unpacked_dir, filename), "r").read() self.request.files[filename] = [{ 'filename': filename, 'body': body }] archive.cleanup() # This ensure that the user sent one file for every name in # submission format and no more. Less is acceptable if task # type says so. task_type = get_task_type(dataset=task.active_dataset) provided = set(iterkeys(self.request.files)) if not (required == provided or (task_type.ALLOW_PARTIAL_SUBMISSION and required.issuperset(provided))): self._send_error(self._("Invalid submission format!"), self._("Please select the correct files.")) return # Add submitted files. After this, files is a dictionary indexed # by *our* filenames (something like "output01.txt" or # "taskname.%l", and whose value is a couple # (user_assigned_filename, content). files = {} for uploaded, data in iteritems(self.request.files): files[uploaded] = (data[0]["filename"], data[0]["body"]) # Read the submission language provided in the request; we # integrate it with the language fetched from the previous # submission (if we use it) and later make sure it is # recognized and allowed. submission_lang = self.get_argument("language", None) need_lang = any( our_filename.find(".%l") != -1 for our_filename in files) # If we allow partial submissions, we implicitly recover the # non-submitted files from the previous submission (if it has # the same programming language of the current one), and put # them in file_digests (since they are already in FS). file_digests = {} if task_type.ALLOW_PARTIAL_SUBMISSION and \ last_submission_t is not None and \ (submission_lang is None or submission_lang == last_submission_t.language): submission_lang = last_submission_t.language for filename in required.difference(provided): if filename in last_submission_t.files: file_digests[filename] = \ last_submission_t.files[filename].digest # Throw an error if task needs a language, but we don't have # it or it is not allowed / recognized. if need_lang: error = None if submission_lang is None: error = self._("Cannot recognize the submission language.") elif submission_lang not in contest.languages: error = self._("Language %s not allowed in this contest.") \ % submission_lang if error is not None: self._send_error(self._("Invalid submission!"), error) return # Check if submitted files are small enough. if any([ len(f[1]) > config.max_submission_length for f in itervalues(files) ]): self._send_error( self._("Submission too big!"), self._("Each source file must be at most %d bytes long.") % config.max_submission_length) return # All checks done, submission accepted. # Attempt to store the submission locally to be able to # recover a failure. if config.submit_local_copy: try: path = os.path.join( config.submit_local_copy_path.replace( "%s", config.data_dir), participation.user.username) if not os.path.exists(path): os.makedirs(path) # Pickle in ASCII format produces str, not unicode, # therefore we open the file in binary mode. with io.open( os.path.join(path, "%d" % make_timestamp(self.timestamp)), "wb") as file_: pickle.dump((self.contest.id, participation.user.id, task.id, files), file_) except Exception as error: logger.warning("Submission local copy failed.", exc_info=True) # We now have to send all the files to the destination... try: for filename in files: digest = self.service.file_cacher.put_file_content( files[filename][1], "Submission file %s sent by %s at %d." % (filename, participation.user.username, make_timestamp(self.timestamp))) file_digests[filename] = digest # In case of error, the server aborts the submission except Exception as error: logger.error("Storage failed! %s", error) self._send_error(self._("Submission storage failed!"), self._("Please try again.")) return # All the files are stored, ready to submit! logger.info("All files stored for submission sent by %s", participation.user.username) # Only set the official bit when the user can compete and we are not in # analysis mode. official = self.r_params["actual_phase"] == 0 submission = Submission(self.timestamp, submission_lang, task=task, participation=participation, official=official) for filename, digest in iteritems(file_digests): self.sql_session.add(File(filename, digest, submission=submission)) self.sql_session.add(submission) self.sql_session.commit() self.service.evaluation_service.new_submission( submission_id=submission.id) self.service.add_notification( participation.user.username, self.timestamp, self._("Submission received"), self._("Your submission has been received " "and is currently being evaluated."), NOTIFICATION_SUCCESS) # The argument (encripted submission id) is not used by CWS # (nor it discloses information to the user), but it is useful # for automatic testing to obtain the submission id). self.redirect( self.contest_url(*self.fallback_page, submission_id=encrypt_number( submission.id, config.secret_key)))