def compile(self, job, file_cacher): """See TaskType.compile.""" language = get_language(job.language) source_ext = language.source_extension if not check_files_number(job, 1, or_more=True): return # Prepare the files to copy in the sandbox and to add to the # compilation command. files_to_get = {} source_filenames = [] # The stub, that must have been provided (copy and add to compilation). stub_filename = self.STUB_BASENAME + source_ext if not check_manager_present(job, stub_filename): return source_filenames.append(stub_filename) files_to_get[stub_filename] = job.managers[stub_filename].digest # User's submitted file(s) (copy and add to compilation). for codename, file_ in iteritems(job.files): source_filename = codename.replace(".%l", source_ext) source_filenames.append(source_filename) files_to_get[source_filename] = file_.digest # Any other useful manager (just copy). for filename, manager in iteritems(job.managers): if is_manager_for_compilation(filename, language): files_to_get[filename] = manager.digest # Prepare the compilation command executable_filename = self._executable_filename(iterkeys(job.files)) commands = language.get_compilation_commands( source_filenames, executable_filename) # Create the sandbox. sandbox = create_sandbox(file_cacher, name="compile") job.sandboxes.append(sandbox.get_root_path()) # Copy all required files in the sandbox. for filename, digest in iteritems(files_to_get): sandbox.create_file_from_storage(filename, digest) # Run the compilation. box_success, compilation_success, text, stats = \ compilation_step(sandbox, commands) # Retrieve the compiled executables. job.success = box_success job.compilation_success = compilation_success job.text = text job.plus = stats if box_success and compilation_success: digest = sandbox.get_file_to_storage( executable_filename, "Executable %s for %s" % (executable_filename, job.info)) job.executables[executable_filename] = \ Executable(executable_filename, digest) # Cleanup. delete_sandbox(sandbox, job.success)
def import_from_dict(cls, data): data['files'] = dict( (k, File(k, v)) for k, v in data['files'].iteritems()) data['managers'] = dict( (k, Manager(k, v)) for k, v in data['managers'].iteritems()) data['executables'] = dict( (k, Executable(k, v)) for k, v in data['executables'].iteritems()) return cls(**data)
def import_from_dict(cls, data): """Create a Job from the output of export_to_dict.""" if data['operation'] is not None: data['operation'] = ESOperation.from_dict(data['operation']) data['files'] = dict((k, File(k, v)) for k, v in data['files'].items()) data['managers'] = dict( (k, Manager(k, v)) for k, v in data['managers'].items()) data['executables'] = dict( (k, Executable(k, v)) for k, v in data['executables'].items()) return cls(**data)
def compile(self, job, file_cacher): """See TaskType.compile.""" language = get_language(job.language) source_ext = language.source_extension if not check_files_number(job, 1): return user_file_format = next(iterkeys(job.files)) user_source_filename = user_file_format.replace(".%l", source_ext) executable_filename = user_file_format.replace(".%l", "") # Create the list of filenames to be passed to the compiler. If we use # a grader, it needs to be in first position in the command line, and # we check that it exists. source_filenames = [user_source_filename] if self._uses_grader(): grader_source_filename = Batch.GRADER_BASENAME + source_ext if not check_manager_present(job, grader_source_filename): return source_filenames.insert(0, grader_source_filename) # Prepare the compilation command. commands = language.get_compilation_commands(source_filenames, executable_filename) # Create the sandbox. sandbox = create_sandbox(file_cacher, name="compile") job.sandboxes.append(sandbox.path) # Copy required files in the sandbox (includes the grader if present). sandbox.create_file_from_storage(user_source_filename, job.files[user_file_format].digest) for filename, manager in iteritems(job.managers): if is_manager_for_compilation(filename, language): sandbox.create_file_from_storage(filename, manager.digest) # Run the compilation. box_success, compilation_success, text, stats = \ compilation_step(sandbox, commands) # Retrieve the compiled executables. job.success = box_success job.compilation_success = compilation_success job.text = text job.plus = stats if box_success and compilation_success: digest = sandbox.get_file_to_storage( executable_filename, "Executable %s for %s" % (executable_filename, job.info)) job.executables[executable_filename] = \ Executable(executable_filename, digest) # Cleanup. delete_sandbox(sandbox, job.success)
def add_executable(self, submission_result=None, **kwargs): """Create an executable and add it to the session""" submission_result = submission_result \ if submission_result is not None \ else self.add_submission_result() args = { "submission_result": submission_result, "digest": unique_digest(), "filename": unique_unicode_id(), } args.update(kwargs) executable = Executable(**args) self.session.add(executable) return executable
def compile(self, job, file_cacher): """See TaskType.compile.""" # Detect the submission's language. The checks about the # formal correctedness of the submission are done in CWS, # before accepting it. language = get_language(job.language) source_ext = language.source_extension # Create the sandbox sandbox = create_sandbox(file_cacher, job.multithreaded_sandbox) job.sandboxes.append(sandbox.path) # Prepare the source files in the sandbox files_to_get = {} source_filenames = [] # Stub. stub_filename = "stub%s" % source_ext source_filenames.append(stub_filename) files_to_get[stub_filename] = job.managers[stub_filename].digest # User's submission. for filename, fileinfo in job.files.iteritems(): source_filename = filename.replace(".%l", source_ext) source_filenames.append(source_filename) files_to_get[source_filename] = fileinfo.digest # Also copy all managers that might be useful during compilation. for filename in job.managers.iterkeys(): if any(filename.endswith(header) for header in HEADER_EXTS): files_to_get[filename] = \ job.managers[filename].digest elif any(filename.endswith(source) for source in SOURCE_EXTS): files_to_get[filename] = \ job.managers[filename].digest elif any(filename.endswith(obj) for obj in OBJECT_EXTS): files_to_get[filename] = \ job.managers[filename].digest for filename, digest in files_to_get.iteritems(): sandbox.create_file_from_storage(filename, digest) # Prepare the compilation command executable_filename = \ "_".join(pattern.replace(".%l", "") for pattern in job.files.keys()) commands = language.get_compilation_commands(source_filenames, executable_filename) # Run the compilation operation_success, compilation_success, text, plus = \ compilation_step(sandbox, commands) # Retrieve the compiled executables job.success = operation_success job.compilation_success = compilation_success job.plus = plus job.text = text if operation_success and compilation_success: digest = sandbox.get_file_to_storage( executable_filename, "Executable %s for %s" % (executable_filename, job.info)) job.executables[executable_filename] = \ Executable(executable_filename, digest) # Cleanup delete_sandbox(sandbox, job.success)
def compile(self, job, file_cacher): """See TaskType.compile.""" language = get_language(job.language) source_ext = language.source_extension header_ext = language.header_extension if not check_files_number(job, 2): return files_to_get = {} source_filenames = [] # Manager. manager_filename = "manager%s" % source_ext if not check_manager_present(job, manager_filename): return source_filenames.append(manager_filename) files_to_get[manager_filename] = \ job.managers[manager_filename].digest # Manager's header. if header_ext is not None: manager_filename = "manager%s" % header_ext if not check_manager_present(job, manager_filename): return source_filenames.append(manager_filename) files_to_get[manager_filename] = \ job.managers[manager_filename].digest # User's submissions and headers. for filename, file_ in job.files.items(): source_filename = filename.replace(".%l", source_ext) source_filenames.append(source_filename) files_to_get[source_filename] = file_.digest # Headers (fixing compile error again here). if header_ext is not None: header_filename = filename.replace(".%l", header_ext) if not check_manager_present(job, header_filename): return source_filenames.append(header_filename) files_to_get[header_filename] = \ job.managers[header_filename].digest # Get compilation command. executable_filename = "manager" commands = language.get_compilation_commands(source_filenames, executable_filename) # Create the sandbox and put the required files in it. sandbox = create_sandbox(file_cacher, name="compile") job.sandboxes.append(sandbox.get_root_path()) for filename, digest in files_to_get.items(): sandbox.create_file_from_storage(filename, digest) # Run the compilation. box_success, compilation_success, text, stats = \ compilation_step(sandbox, commands) # Retrieve the compiled executables job.success = box_success job.compilation_success = compilation_success job.text = text job.plus = stats if box_success and compilation_success: digest = sandbox.get_file_to_storage( executable_filename, "Executable %s for %s" % (executable_filename, job.info)) job.executables[executable_filename] = \ Executable(executable_filename, digest) # Cleanup delete_sandbox(sandbox, job.success, job.keep_sandbox)
from mock import MagicMock, call, ANY from cms.db import File, Manager, Executable from cms.grading.Job import CompilationJob, EvaluationJob from cms.grading.tasktypes.Batch import Batch from cmstestsuite.unit_tests.grading.tasktypes.tasktypetestutils import \ COMPILATION_COMMAND_1, COMPILATION_COMMAND_2, EVALUATION_COMMAND_1, \ LANG_1, LANG_2, OUTCOME, STATS_OK, STATS_RE, TEXT, \ TaskTypeTestMixin, fake_compilation_commands, fake_evaluation_commands FILE_FOO_L1 = File(digest="digest of foo.l1", filename="foo.%l") FILE_BAR_L1 = File(digest="digest of bar.l1", filename="bar.%l") GRADER_L1 = Manager(digest="digest of grader.l1", filename="grader.l1") GRADER_L2 = Manager(digest="digest of grader.l2", filename="grader.l2") HEADER_L1 = Manager(digest="digest of grader.hl1", filename="graderl.hl1") EXE_FOO = Executable(digest="digest of foo", filename="foo") class TestGetCompilationCommands(TaskTypeTestMixin, unittest.TestCase): """Tests for get_compilation_commands().""" def setUp(self): super(TestGetCompilationCommands, self).setUp() self.setUpMocks("Batch") self.languages.update({LANG_1, LANG_2}) def test_alone(self): tt = Batch(["alone", ["", ""], "diff"]) cc = tt.get_compilation_commands(["foo.%l"]) self.assertEqual(cc, { "L1": fake_compilation_commands(
def compile(self, job, file_cacher): """See TaskType.compile.""" # Detect the submission's language. The checks about the # formal correctedness of the submission are done in CWS, # before accepting it. language = job.language source_ext = LANGUAGE_TO_SOURCE_EXT_MAP[language] # TODO: here we are sure that submission.files are the same as # task.submission_format. The following check shouldn't be # here, but in the definition of the task, since this actually # checks that task's task type and submission format agree. if len(job.files) != 1: job.success = True job.compilation_success = False job.text = [N_("Invalid files in submission")] logger.error("Submission contains %d files, expecting 1", len(job.files), extra={"operation": job.info}) return True # Create the sandbox sandbox = create_sandbox(file_cacher) job.sandboxes.append(sandbox.path) # Prepare the source files in the sandbox files_to_get = {} format_filename = job.files.keys()[0] source_filenames = [] # Stub. source_filenames.append("stub%s" % source_ext) files_to_get[source_filenames[-1]] = \ job.managers["stub%s" % source_ext].digest # User's submission. source_filenames.append(format_filename.replace(".%l", source_ext)) files_to_get[source_filenames[-1]] = \ job.files[format_filename].digest # Also copy all managers that might be useful during compilation. for filename in job.managers.iterkeys(): if any( filename.endswith(header) for header in LANGUAGE_TO_HEADER_EXT_MAP.itervalues()): files_to_get[filename] = \ job.managers[filename].digest elif any( filename.endswith(source) for source in LANGUAGE_TO_SOURCE_EXT_MAP.itervalues()): files_to_get[filename] = \ job.managers[filename].digest elif any( filename.endswith(obj) for obj in LANGUAGE_TO_OBJ_EXT_MAP.itervalues()): files_to_get[filename] = \ job.managers[filename].digest for filename, digest in files_to_get.iteritems(): sandbox.create_file_from_storage(filename, digest) # Prepare the compilation command executable_filename = format_filename.replace(".%l", "") commands = get_compilation_commands(language, source_filenames, executable_filename) # Run the compilation operation_success, compilation_success, text, plus = \ compilation_step(sandbox, commands) # Retrieve the compiled executables job.success = operation_success job.compilation_success = compilation_success job.plus = plus job.text = text if operation_success and compilation_success: digest = sandbox.get_file_to_storage( executable_filename, "Executable %s for %s" % (executable_filename, job.info)) job.executables[executable_filename] = \ Executable(executable_filename, digest) # Cleanup delete_sandbox(sandbox)
def test_testcases(base_dir, soluzione, language, assume=None): global task, file_cacher # Use a FileCacher with a NullBackend in order to avoid to fill # the database with junk if file_cacher is None: file_cacher = FileCacher(null=True) # Load the task # TODO - This implies copying a lot of data to the FileCacher, # which is annoying if you have to do it continuously; it would be # better to use a persistent cache (although local, possibly # filesystem-based instead of database-based) and somehow detect # when the task has already been loaded if task is None: loader = YamlLoader( os.path.realpath(os.path.join(base_dir, "..")), file_cacher) # Normally we should import the contest before, but YamlLoader # accepts get_task() even without previous get_contest() calls task = loader.get_task(os.path.split(os.path.realpath(base_dir))[1]) # Prepare the EvaluationJob dataset = task.active_dataset digest = file_cacher.put_file_from_path( os.path.join(base_dir, soluzione), "Solution %s for task %s" % (soluzione, task.name)) executables = {task.name: Executable(filename=task.name, digest=digest)} jobs = [(t, EvaluationJob( language=language, task_type=dataset.task_type, task_type_parameters=json.loads(dataset.task_type_parameters), managers=dict(dataset.managers), executables=executables, input=dataset.testcases[t].input, output=dataset.testcases[t].output, time_limit=dataset.time_limit, memory_limit=dataset.memory_limit)) for t in dataset.testcases] tasktype = get_task_type(dataset=dataset) ask_again = True last_status = "ok" status = "ok" stop = False info = [] points = [] comments = [] tcnames = [] for jobinfo in sorted(jobs): print(jobinfo[0], end='') sys.stdout.flush() job = jobinfo[1] # Skip the testcase if we decide to consider everything to # timeout if stop: info.append("Time limit exceeded") points.append(0.0) comments.append("Timeout.") continue # Evaluate testcase last_status = status tasktype.evaluate(job, file_cacher) status = job.plus["exit_status"] info.append("Time: %5.3f Wall: %5.3f Memory: %s" % (job.plus["execution_time"], job.plus["execution_wall_clock_time"], mem_human(job.plus["execution_memory"]))) points.append(float(job.outcome)) comments.append(format_status_text(job.text)) tcnames.append(jobinfo[0]) # If we saw two consecutive timeouts, ask wether we want to # consider everything to timeout if ask_again and status == "timeout" and last_status == "timeout": print() print("Want to stop and consider everything to timeout? [y/N]", end='') if assume is not None: print(assume) tmp = assume else: tmp = raw_input().lower() if tmp in ['y', 'yes']: stop = True else: ask_again = False # Result pretty printing print() clen = max(len(c) for c in comments) ilen = max(len(i) for i in info) for (i, p, c, b) in zip(tcnames, points, comments, info): print("%s) %5.2lf --- %s [%s]" % (i, p, c.ljust(clen), b.center(ilen))) return zip(points, comments, info)
def compile(self, job, file_cacher): """See TaskType.compile.""" language = get_language(job.language) source_ext = language.source_extension if not check_files_number(job, 1, or_more=True): return # Create the list of filenames to be passed to the compiler. If we use # a grader, it needs to be in first position in the command line, and # we check that it exists. filenames_to_compile = [] filenames_and_digests_to_get = {} # The grader, that must have been provided (copy and add to # compilation). if self._uses_grader(): grader_filename = self.GRADER_BASENAME + source_ext if not check_manager_present(job, grader_filename): return filenames_to_compile.append(grader_filename) filenames_and_digests_to_get[grader_filename] = \ job.managers[grader_filename].digest # User's submitted file(s) (copy and add to compilation). for codename, file_ in job.files.items(): filename = codename.replace(".%l", source_ext) filenames_to_compile.append(filename) filenames_and_digests_to_get[filename] = file_.digest # Any other useful manager (just copy). for filename, manager in job.managers.items(): if is_manager_for_compilation(filename, language): filenames_and_digests_to_get[filename] = manager.digest # Prepare the compilation command. executable_filename = self._executable_filename(job.files.keys()) commands = language.get_compilation_commands(filenames_to_compile, executable_filename) # Create the sandbox. sandbox = create_sandbox(file_cacher, name="compile") job.sandboxes.append(sandbox.get_root_path()) # Copy required files in the sandbox (includes the grader if present). for filename, digest in filenames_and_digests_to_get.items(): sandbox.create_file_from_storage(filename, digest) # Run the compilation. box_success, compilation_success, text, stats = \ compilation_step(sandbox, commands) # Retrieve the compiled executables. job.success = box_success job.compilation_success = compilation_success job.text = text job.plus = stats if box_success and compilation_success: digest = sandbox.get_file_to_storage( executable_filename, "Executable %s for %s" % (executable_filename, job.info)) job.executables[executable_filename] = \ Executable(executable_filename, digest) # Cleanup. delete_sandbox(sandbox, job.success, job.keep_sandbox)
def test_testcases(base_dir, solution, language, assume=None): global task, file_cacher # Use a FileCacher with a NullBackend in order to avoid to fill # the database with junk if file_cacher is None: file_cacher = FileCacher(null=True) cmscontrib.loaders.italy_yaml.logger = NullLogger() # Load the task # TODO - This implies copying a lot of data to the FileCacher, # which is annoying if you have to do it continuously; it would be # better to use a persistent cache (although local, possibly # filesystem-based instead of database-based) and somehow detect # when the task has already been loaded if task is None: loader = cmscontrib.loaders.italy_yaml.YamlLoader( base_dir, file_cacher) task = loader.get_task(get_statement=False) # Prepare the EvaluationJob dataset = task.active_dataset digest = file_cacher.put_file_from_path( os.path.join(base_dir, solution), "Solution %s for task %s" % (solution, task.name)) executables = {task.name: Executable(filename=task.name, digest=digest)} jobs = [ (t, EvaluationJob( operation=ESOperation(ESOperation.EVALUATION, None, dataset.id, dataset.testcases[t].codename).to_dict(), language=language, task_type=dataset.task_type, task_type_parameters=json.loads(dataset.task_type_parameters), managers=dict(dataset.managers), executables=executables, input=dataset.testcases[t].input, output=dataset.testcases[t].output, time_limit=dataset.time_limit, memory_limit=dataset.memory_limit)) for t in dataset.testcases ] tasktype = get_task_type(dataset=dataset) ask_again = True last_status = "ok" status = "ok" stop = False info = [] points = [] comments = [] tcnames = [] for jobinfo in sorted(jobs): print(jobinfo[0]) sys.stdout.flush() job = jobinfo[1] # Skip the testcase if we decide to consider everything to # timeout if stop: info.append("Time limit exceeded") points.append(0.0) comments.append("Timeout.") move_cursor(directions.UP, erase=True) continue # Evaluate testcase last_status = status tasktype.evaluate(job, file_cacher) status = job.plus.get("exit_status") info.append( (job.plus.get("execution_time"), job.plus.get("execution_memory"))) points.append(float(job.outcome)) # Avoid printing unneeded newline job.text = [t.rstrip() for t in job.text] comments.append(format_status_text(job.text)) tcnames.append(jobinfo[0]) # If we saw two consecutive timeouts, ask wether we want to # consider everything to timeout if ask_again and status == "timeout" and last_status == "timeout": print("Want to stop and consider everything to timeout? [y/N] ", end='') sys.stdout.flush() if assume is not None: tmp = assume print(tmp) else: # User input with a timeout of 5 seconds, at the end of which # we automatically say "n". ready will be a list of input ready # for reading, or an empty list if the timeout expired. # See: http://stackoverflow.com/a/2904057 ready, _, _ = select.select([sys.stdin], [], [], 5) if ready: tmp = sys.stdin.readline().strip().lower() else: tmp = 'n' print(tmp) if tmp in ['y', 'yes']: stop = True else: ask_again = False print() move_cursor(directions.UP, erase=True) # Subtasks scoring subtasks = json.loads(dataset.score_type_parameters) if not isinstance(subtasks, list) or len(subtasks) == 0: subtasks = [[100, len(info)]] if dataset.score_type == 'GroupMin': scoreFun = min else: if dataset.score_type != 'Sum': logger.warning("Score type %s not yet supported! Using Sum" % dataset.score_type) def scoreFun(x): return sum(x) / len(x) pos = 0 sts = [] # For each subtask generate a list of testcase it owns, the score gained # and the highest time and memory usage. for i in subtasks: stscores = [] stsdata = [] worst = [0, 0] try: for _ in xrange(i[1]): stscores.append(points[pos]) stsdata.append( (tcnames[pos], points[pos], comments[pos], info[pos])) if info[pos][0] > worst[0]: worst[0] = info[pos][0] if info[pos][1] > worst[1]: worst[1] = info[pos][1] pos += 1 sts.append((scoreFun(stscores) * i[0], i[0], stsdata, worst)) except: sts.append((0, i[0], stsdata, [0, 0])) # Result pretty printing # Strips sol/ and _EVAL from the solution's name solution = solution[4:-5] print() clen = max(len(c) for c in comments) for st, d in enumerate(sts): print( "Subtask %d:" % st, add_color_to_string( "%5.2f/%d" % (d[0], d[1]), colors.RED if abs(d[0] - d[1]) > 0.01 else colors.GREEN, bold=True)) for (i, p, c, w) in d[2]: print("%s)" % i, add_color_to_string( "%5.2lf" % p, colors.RED if abs(p - 1) > 0.01 else colors.BLACK), "--- %s [Time:" % c.ljust(clen), add_color_to_string( ("%5.3f" % w[0]) if w[0] is not None else "N/A", colors.BLUE if w[0] is not None and w[0] >= 0.95 * d[3][0] else colors.BLACK), "Memory:", add_color_to_string( "%5s" % mem_human(w[1]) if w[1] is not None else "N/A", colors.BLUE if w[1] is not None and w[1] >= 0.95 * d[3][1] else colors.BLACK, ), end="]") move_cursor(directions.RIGHT, 1000) move_cursor(directions.LEFT, len(solution) - 1) print(add_color_to_string(solution, colors.BLACK, bold=True)) print() sols.append((solution, sum([st[0] for st in sts]))) global tested_something if not tested_something: tested_something = True atexit.register(print_at_exit) return zip(points, comments, info)
def compile(self, job, file_cacher): """See TaskType.compile.""" # Detect the submission's language. The checks about the # formal correctedness of the submission are done in CWS, # before accepting it. language = job.language source_ext = LANGUAGE_TO_SOURCE_EXT_MAP[language] # TODO: here we are sure that submission.files are the same as # task.submission_format. The following check shouldn't be # here, but in the definition of the task, since this actually # checks that task's task type and submission format agree. if len(job.files) != 1: job.success = True job.compilation_success = False job.text = [N_("Invalid files in submission")] logger.error("Submission contains %d files, expecting 1" % len(job.files), extra={"operation": job.info}) return True # Create the sandbox sandbox = create_sandbox(file_cacher) job.sandboxes.append(sandbox.path) # Prepare the source files in the sandbox files_to_get = {} format_filename = job.files.keys()[0] source_filenames = [] source_filenames.append(format_filename.replace(".%l", source_ext)) files_to_get[source_filenames[0]] = \ job.files[format_filename].digest # If a grader is specified, we add to the command line (and to # the files to get) the corresponding manager. The grader must # be the first file in source_filenames. if self.parameters[0] == "grader": source_filenames.insert(0, "grader%s" % source_ext) files_to_get["grader%s" % source_ext] = \ job.managers["grader%s" % source_ext].digest # Also copy all *.h and *lib.pas graders for filename in job.managers.iterkeys(): if filename.endswith('.h') or \ filename.endswith('lib.pas'): files_to_get[filename] = \ job.managers[filename].digest for filename, digest in files_to_get.iteritems(): sandbox.create_file_from_storage(filename, digest) # Prepare the compilation command executable_filename = format_filename.replace(".%l", "") commands = get_compilation_commands(language, source_filenames, executable_filename) # Run the compilation operation_success, compilation_success, text, plus = \ compilation_step(sandbox, commands) # Retrieve the compiled executables job.success = operation_success job.compilation_success = compilation_success job.plus = plus job.text = text if operation_success and compilation_success: digest = sandbox.get_file_to_storage( executable_filename, "Executable %s for %s" % (executable_filename, job.info)) job.executables[executable_filename] = \ Executable(executable_filename, digest) # Cleanup delete_sandbox(sandbox)
def compile(self, job, file_cacher): """See TaskType.compile.""" # Detect the submission's language. The checks about the # formal correctedness of the submission are done in CWS, # before accepting it. language = get_language(job.language) source_ext = language.source_extension header_ext = language.header_extension # TODO: here we are sure that submission.files are the same as # task.submission_format. The following check shouldn't be # here, but in the definition of the task, since this actually # checks that task's task type and submission format agree. if len(job.files) != 2: job.success = True job.compilation_success = False job.text = [N_("Invalid files in submission")] logger.error("Submission contains %d files, expecting 2", len(job.files), extra={"operation": job.info}) return # First and only one compilation. sandbox = create_sandbox(file_cacher, multithreaded=job.multithreaded_sandbox, name="compile") job.sandboxes.append(sandbox.path) files_to_get = {} source_filenames = [] # Manager. manager_filename = "manager%s" % source_ext source_filenames.append(manager_filename) files_to_get[manager_filename] = \ job.managers[manager_filename].digest # Manager's header. if header_ext is not None: manager_filename = "manager%s" % header_ext source_filenames.append(manager_filename) files_to_get[manager_filename] = \ job.managers[manager_filename].digest # User's submissions and headers. for filename, file_ in iteritems(job.files): source_filename = filename.replace(".%l", source_ext) source_filenames.append(source_filename) files_to_get[source_filename] = file_.digest # Headers (fixing compile error again here). if header_ext is not None: header_filename = filename.replace(".%l", header_ext) source_filenames.append(header_filename) files_to_get[header_filename] = \ job.managers[header_filename].digest for filename, digest in iteritems(files_to_get): sandbox.create_file_from_storage(filename, digest) # Get compilation command and compile. executable_filename = "manager" commands = language.get_compilation_commands(source_filenames, executable_filename) operation_success, compilation_success, text, plus = \ compilation_step(sandbox, commands) # Retrieve the compiled executables job.success = operation_success job.compilation_success = compilation_success job.plus = plus job.text = text if operation_success and compilation_success: digest = sandbox.get_file_to_storage( executable_filename, "Executable %s for %s" % (executable_filename, job.info)) job.executables[executable_filename] = \ Executable(executable_filename, digest) # Cleanup delete_sandbox(sandbox, job.success)
def compile(self, job, file_cacher): """See TaskType.compile.""" # Detect the submission's language. The checks about the # formal correctedness of the submission are done in CWS, # before accepting it. language = get_language(job.language) source_ext = language.source_extension # TODO: here we are sure that submission.files are the same as # task.submission_format. The following check shouldn't be # here, but in the definition of the task, since this actually # checks that task's task type and submission format agree. if len(job.files) != 1: job.success = True job.compilation_success = False job.text = [N_("Invalid files in submission")] job.plus = {} logger.error("Submission contains %d files, expecting 1", len(job.files), extra={"operation": job.info}) return True # Create the sandbox sandbox = create_sandbox(file_cacher, job.multithreaded_sandbox) job.sandboxes.append(sandbox.path) # Prepare the source files in the sandbox files_to_get = {} format_filename = job.files.keys()[0] source_filenames = [] source_filenames.append(format_filename.replace(".%l", source_ext)) files_to_get[source_filenames[0]] = \ job.files[format_filename].digest # If a grader is specified, we add to the command line (and to # the files to get) the corresponding manager. The grader must # be the first file in source_filenames. compile_command = [] if self._uses_grader(): files_to_get["grader%s" % source_ext] = \ job.managers["grader%s" % source_ext].digest # For solutions using C or C++, # we first compile the grader source # file and then delete it from sandbox, # to prevent the user's solution # files from including it. try: compile_command = language.get_compilation_no_link_command( ["grader%s" % source_ext]) compile_command += [["/bin/rm", "grader%s" % source_ext]] except NotImplementedError: compile_command = [] source_filenames.insert(0, "grader%s" % source_ext) else: source_filenames.insert(0, "grader%s" % language.object_extension) # Also copy all managers that might be useful during compilation. for filename in job.managers.iterkeys(): if any(filename.endswith(header) for header in language.header_extensions): files_to_get[filename] = \ job.managers[filename].digest elif any(filename.endswith(source) for source in language.source_extensions): files_to_get[filename] = \ job.managers[filename].digest elif any(filename.endswith(obj) for obj in language.object_extensions): files_to_get[filename] = \ job.managers[filename].digest for filename, digest in files_to_get.iteritems(): sandbox.create_file_from_storage(filename, digest) # Prepare the compilation command executable_filename = format_filename.replace(".%l", "") commands = compile_command + language.get_compilation_commands( source_filenames, executable_filename) # Run the compilation operation_success, compilation_success, text, plus = \ compilation_step(sandbox, commands) # Retrieve the compiled executables job.success = operation_success job.compilation_success = compilation_success job.plus = plus job.text = text if operation_success and compilation_success: digest = sandbox.get_file_to_storage( executable_filename, "Executable %s for %s" % (executable_filename, job.info)) job.executables[executable_filename] = \ Executable(executable_filename, digest) # Cleanup delete_sandbox(sandbox, job.success)
def compile(self, job, file_cacher): """See TaskType.compile.""" # Detect the submission's language. The checks about the # formal correctedness of the submission are done in CWS, # before accepting it. language = get_language(job.language) source_ext = language.source_extension # TODO: here we are sure that submission.files are the same as # task.submission_format. The following check shouldn't be # here, but in the definition of the task, since this actually # checks that task's task type and submission format agree. if len(job.files) != 1: job.success = True job.compilation_success = False job.text = [N_("Invalid files in submission")] logger.error("Submission contains %d files, expecting 1", len(job.files), extra={"operation": job.info}) return # Create the sandbox. sandbox = create_sandbox(file_cacher, multithreaded=job.multithreaded_sandbox, name="compile") job.sandboxes.append(sandbox.path) user_file_format = next(iterkeys(job.files)) user_source_filename = user_file_format.replace(".%l", source_ext) executable_filename = user_file_format.replace(".%l", "") # Copy required files in the sandbox (includes the grader if present). sandbox.create_file_from_storage(user_source_filename, job.files[user_file_format].digest) for filename in iterkeys(job.managers): if Batch._is_manager_for_compilation(filename): sandbox.create_file_from_storage(filename, job.managers[filename].digest) # Create the list of filenames to be passed to the compiler. If we use # a grader, it needs to be in first position in the command line. source_filenames = [user_source_filename] if self._uses_grader(): grader_source_filename = Batch.GRADER_BASENAME + source_ext source_filenames.insert(0, grader_source_filename) # Prepare the compilation command. commands = language.get_compilation_commands(source_filenames, executable_filename) # Run the compilation operation_success, compilation_success, text, plus = \ compilation_step(sandbox, commands) # Retrieve the compiled executables job.success = operation_success job.compilation_success = compilation_success job.plus = plus job.text = text if operation_success and compilation_success: digest = sandbox.get_file_to_storage( executable_filename, "Executable %s for %s" % (executable_filename, job.info)) job.executables[executable_filename] = \ Executable(executable_filename, digest) # Cleanup delete_sandbox(sandbox, job.success)
def test_testcases(base_dir, soluzione, language, assume=None): global task, file_cacher # Use a disabled FileCacher with a FSBackend in order to avoid to fill # the database with junk and to save up space. if file_cacher is None: file_cacher = FileCacher(path=os.path.join(config.cache_dir, 'cmsMake'), enabled=False) # Load the task if task is None: loader = YamlLoader(os.path.realpath(os.path.join(base_dir, "..")), file_cacher) # Normally we should import the contest before, but YamlLoader # accepts get_task() even without previous get_contest() calls task = loader.get_task(os.path.split(os.path.realpath(base_dir))[1]) # Prepare the EvaluationJob dataset = task.active_dataset if dataset.task_type != "OutputOnly": digest = file_cacher.put_file_from_path( os.path.join(base_dir, soluzione), "Solution %s for task %s" % (soluzione, task.name)) executables = { task.name: Executable(filename=task.name, digest=digest) } jobs = [(t, EvaluationJob(language=language, task_type=dataset.task_type, task_type_parameters=json.loads( dataset.task_type_parameters), managers=dict(dataset.managers), executables=executables, input=dataset.testcases[t].input, output=dataset.testcases[t].output, time_limit=dataset.time_limit, memory_limit=dataset.memory_limit)) for t in dataset.testcases] tasktype = get_task_type(dataset=dataset) else: print("Generating outputs...", end='') files = {} for t in sorted(dataset.testcases.keys()): with file_cacher.get_file(dataset.testcases[t].input) as fin: with TemporaryFile() as fout: print(str(t), end='') call(soluzione, stdin=fin, stdout=fout, cwd=base_dir) fout.seek(0) digest = file_cacher.put_file_from_fobj(fout) outname = "output_%s.txt" % t files[outname] = File(filename=outname, digest=digest) jobs = [(t, EvaluationJob(task_type=dataset.task_type, task_type_parameters=json.loads( dataset.task_type_parameters), managers=dict(dataset.managers), files=files, input=dataset.testcases[t].input, output=dataset.testcases[t].output, time_limit=dataset.time_limit, memory_limit=dataset.memory_limit)) for t in dataset.testcases] for k, job in jobs: job._key = k tasktype = get_task_type(dataset=dataset) print() ask_again = True last_status = "ok" status = "ok" stop = False info = [] points = [] comments = [] tcnames = [] for jobinfo in sorted(jobs): print(jobinfo[0], end='') sys.stdout.flush() job = jobinfo[1] # Skip the testcase if we decide to consider everything to # timeout if stop: info.append("Time limit exceeded") points.append(0.0) comments.append("Timeout.") continue # Evaluate testcase last_status = status tasktype.evaluate(job, file_cacher) if dataset.task_type != "OutputOnly": status = job.plus["exit_status"] info.append("Time: %5.3f Wall: %5.3f Memory: %s" % (job.plus["execution_time"], job.plus["execution_wall_clock_time"], mem_human(job.plus["execution_memory"]))) else: status = "ok" info.append("N/A") points.append(float(job.outcome)) comments.append(format_status_text(job.text)) tcnames.append(jobinfo[0]) # If we saw two consecutive timeouts, ask wether we want to # consider everything to timeout if ask_again and status == "timeout" and last_status == "timeout": print() print("Want to stop and consider everything to timeout? [y/N]", end='') if assume is not None: print(assume) tmp = assume else: tmp = raw_input().lower() if tmp in ['y', 'yes']: stop = True else: ask_again = False # Result pretty printing print() clen = max(len(c) for c in comments) ilen = max(len(i) for i in info) for (i, p, c, b) in zip(tcnames, points, comments, info): print("%s) %5.2lf --- %s [%s]" % (i, p, c.ljust(clen), b.center(ilen))) return zip(points, comments, info)