def language_from_submitted_files(files): """Return the language inferred from the submitted files. files ({str: str}): dictionary mapping the expected filename to a path in the file system. return (Language|None): the language inferred from the files. raise (ValueError): if different files point to different languages, or if it is impossible to extract the language from a file when it should be. """ # TODO: deduplicate with the code in SubmitHandler. language = None for filename in files.keys(): this_language = filename_to_language(files[filename]) if this_language is None and ".%l" in filename: raise ValueError( "Cannot recognize language for file `%s'." % filename) if language is None: language = this_language elif this_language is not None and language != this_language: raise ValueError("Mixed-language submission detected.") return language
def language_from_submitted_files(files): """Return the language inferred from the submitted files. files ({str: str}): dictionary mapping the expected filename to a path in the file system. return (Language|None): the language inferred from the files. raise (ValueError): if different files point to different languages, or if it is impossible to extract the language from a file when it should be. """ # TODO: deduplicate with the code in SubmitHandler. language = None for filename in iterkeys(files): this_language = filename_to_language(files[filename]) if this_language is None and ".%l" in filename: raise ValueError( "Cannot recognize language for file `%s'." % filename) if language is None: language = this_language elif this_language is not None and language != this_language: raise ValueError("Mixed-language submission detected.") return language
def _prepare(self): """Select a random solution and prepare it for submission. If task/ is the task directory, it might contain files (only if the submission format is with a single file) and directory. If it contains a file, it is assumed that it is the only element in the submission format, and is the basename without extension of the file. If it is a directory, all files inside are assumed to be part of the submission format with their basenames without extension. """ GenericRequest._prepare(self) # Select a random directory or file inside the task directory. task_path = os.path.join(self.submissions_path, self.task[1]) sources = os.listdir(task_path) source = random.choice(sources) lang = filename_to_language(source) if lang is not None: self.data["language"] = lang.name self.source_path = os.path.join(task_path, source) # Compose the submission format self.files = [] if os.path.isdir(self.source_path): submission_formats = os.listdir(self.source_path) self.files = [('%s.%%l' % (os.path.splitext(sf)[0]), os.path.join(self.source_path, sf)) for sf in submission_formats] else: submission_format = os.path.splitext(source)[0] self.files = [('%s.%%l' % (submission_format), self.source_path)]
def _prepare(self): """Select a random solution and prepare it for submission. If task/ is the task directory, it might contain files (only if the submission format is with a single file) and directory. If it contains a file, it is assumed that it is the only element in the submission format, and is the basename without extension of the file. If it is a directory, all files inside are assumed to be part of the submission format with their basenames without extension. """ GenericRequest._prepare(self) # Select a random directory or file inside the task directory. task_path = os.path.join(self.submissions_path, self.task[1]) sources = os.listdir(task_path) source = random.choice(sources) lang = filename_to_language(source) if lang is not None: self.data["language"] = lang.name self.source_path = os.path.join(task_path, source) # Compose the submission format self.files = [] if os.path.isdir(self.source_path): submission_formats = os.listdir(self.source_path) self.files = [('%s.%%l' % (os.path.splitext(sf)[0]), os.path.join(self.source_path, sf)) for sf in submission_formats] else: submission_format = os.path.splitext(source)[0] self.files = [('%s.%%l' % (submission_format), self.source_path)]
def __init__(self, browser, task, submission_format, filenames, language=None, base_url=None): GenericRequest.__init__(self, browser, base_url) self.url = "%s/tasks/%s/test" % (self.base_url, task[1]) self.task = task self.submission_format = submission_format self.filenames = filenames self.data = {} # If not passed, try to recover the language from the filenames. if language is None: for filename in filenames: lang = filename_to_language(filename) if lang is not None: language = lang.name break # Only send the language in the request if not None. if language is not None: self.data = {"language": language}
def __init__(self, browser, task, submission_format, filenames, language=None, base_url=None): GenericRequest.__init__(self, browser, base_url) self.url = "%s/tasks/%s/test" % (self.base_url, task[1]) self.task = task self.submission_format = submission_format self.filenames = filenames self.data = {} # If not passed, try to recover the language from the filenames. if language is None: for filename in filenames: lang = filename_to_language(filename) if lang is not None: language = lang.name break # Only send the language in the request if not None. if language is not None: self.data = {"language": language}
def build_checker_list(base_dir, task_type): check_dir = os.path.join(base_dir, CHECK_DIRNAME) actions = [] if os.path.exists(check_dir): for src in (os.path.join(CHECK_DIRNAME, x) for x in os.listdir(check_dir) if endswith2(x, SOL_EXTS)): exe, ext = basename2(src, CHECK_EXTS) lang = filename_to_language(src) def compile_check(src, exe, assume=None): commands = lang.get_compilation_commands([src], exe) for command in commands: call(base_dir, command) actions.append( ([src], [exe], functools.partial(compile_check, src, exe), 'compile checker')) return actions
def build_checker_list(base_dir, task_type): check_dir = os.path.join(base_dir, CHECK_DIRNAME) actions = [] if os.path.exists(check_dir): for src in (os.path.join(CHECK_DIRNAME, x) for x in os.listdir(check_dir) if endswith2(x, SOL_EXTS)): exe, ext = basename2(src, CHECK_EXTS) lang = filename_to_language(src) def compile_check(src, exe, assume=None): commands = lang.get_compilation_commands([src], exe) for command in commands: call(base_dir, command) actions.append(([src], [exe], functools.partial(compile_check, src, exe), 'compile checker')) return actions
def build_gen_list(base_dir, task_type, yaml_conf): input_dir = os.path.join(base_dir, INPUT_DIRNAME) output_dir = os.path.join(base_dir, OUTPUT_DIRNAME) gen_dir = os.path.join(base_dir, GEN_DIRNAME) gen_exe = None validator_exe = None for src in (x for x in os.listdir(gen_dir) if endswith2(x, GEN_EXTS)): base, ext = basename2(src, GEN_EXTS) lang = filename_to_language(src) if base == GEN_BASENAME: gen_exe = os.path.join(GEN_DIRNAME, base) gen_src = os.path.join(GEN_DIRNAME, base + ext) gen_lang = lang elif base == VALIDATOR_BASENAME: validator_exe = os.path.join(GEN_DIRNAME, base) validator_src = os.path.join(GEN_DIRNAME, base + ext) validator_lang = lang if gen_exe is None: raise Exception("Couldn't find generator") if validator_exe is None: raise Exception("Couldn't find validator") gen_GEN = os.path.join(GEN_DIRNAME, GEN_GEN) sol_exe = os.path.join(SOL_DIRNAME, SOL_FILENAME) # Count non-trivial lines in GEN and establish which external # files are needed for input generation testcases = list(iter_GEN(os.path.join(base_dir, gen_GEN))) testcase_num = len(testcases) copy_files = [x[1] for x in testcases if x[0]] def compile_src(src, exe, lang, assume=None): if lang.source_extension in ['.cpp', '.c', '.pas']: commands = lang.get_compilation_commands( [src], exe, for_evaluation=False) for command in commands: call(base_dir, command) elif lang.source_extension in ['.py', '.sh']: os.symlink(os.path.basename(src), exe) else: raise Exception("Wrong generator/validator language!") # Question: why, differently from outputs, inputs have to be # created all together instead of selectively over those that have # been changed since last execution? This is a waste of time, # usually generating inputs is a pretty long thing. Answer: # because cmsMake architecture, which is based on file timestamps, # doesn't make us able to understand which lines of gen/GEN have # been changed. Douch! We'll have to think better this thing for # the new format we're developing. def make_input(assume=None): n = 0 try: os.makedirs(input_dir) except OSError: pass for (is_copy, line, st) in testcases: print( "Generating", add_color_to_string("input # %d" % n, colors.BLACK, stream=sys.stderr, bold=True), file=sys.stderr ) new_input = os.path.join(input_dir, 'input%d.txt' % (n)) if is_copy: # Copy the file print("> Copy input file from:", line) copy_input = os.path.join(base_dir, line) shutil.copyfile(copy_input, new_input) else: # Call the generator with open(new_input, 'wb') as fout: call(base_dir, [gen_exe] + line.split(), stdout=fout) command = [validator_exe, new_input] if st != 0: command.append("%s" % st) call(base_dir, command) n += 1 for _ in range(3): move_cursor(directions.UP, erase=True, stream=sys.stderr) def make_output(n, assume=None): try: os.makedirs(output_dir) except OSError: pass print( "Generating", add_color_to_string("output # %d" % n, colors.BLACK, stream=sys.stderr, bold=True), file=sys.stderr ) temp_dir = tempfile.mkdtemp(prefix=os.path.join(base_dir, "tmp")) use_stdin = yaml_conf.get("infile") in {None, ""} use_stdout = yaml_conf.get("outfile") in {None, ""} # Names of the actual source and destination. infile = os.path.join(input_dir, 'input%d.txt' % (n)) outfile = os.path.join(output_dir, 'output%d.txt' % (n)) # Names of the input and output in temp directory. copied_infile = os.path.join( temp_dir, "input.txt" if use_stdin else yaml_conf.get("infile")) copied_outfile = os.path.join( temp_dir, "output.txt" if use_stdout else yaml_conf.get("outfile")) os.symlink(infile, copied_infile) fin = None fout = None try: if use_stdin: fin = open(copied_infile, "rb") if use_stdout: fout = open(copied_outfile, 'wb') shutil.copy(sol_exe, temp_dir) # If the task of of type Communication, then there is # nothing to put in the output files if task_type != ['Communication', '']: call(temp_dir, [os.path.join(temp_dir, SOL_FILENAME)], stdin=fin, stdout=fout) move_cursor(directions.UP, erase=True, stream=sys.stderr) finally: if fin is not None: fin.close() if fout is not None: fout.close() os.rename(copied_outfile, outfile) shutil.rmtree(temp_dir) move_cursor(directions.UP, erase=True, stream=sys.stderr) actions = [] actions.append(([gen_src], [gen_exe], functools.partial(compile_src, gen_src, gen_exe, gen_lang), "compile the generator")) actions.append(([validator_src], [validator_exe], functools.partial(compile_src, validator_src, validator_exe, validator_lang), "compile the validator")) actions.append(([gen_GEN, gen_exe, validator_exe] + copy_files, [os.path.join(INPUT_DIRNAME, 'input%d.txt' % (x)) for x in range(0, testcase_num)], make_input, "input generation")) for n in range(testcase_num): actions.append(([os.path.join(INPUT_DIRNAME, 'input%d.txt' % (n)), sol_exe], [os.path.join(OUTPUT_DIRNAME, 'output%d.txt' % (n))], functools.partial(make_output, n), "output generation")) in_out_files = [os.path.join(INPUT_DIRNAME, 'input%d.txt' % (n)) for n in range(testcase_num)] + \ [os.path.join(OUTPUT_DIRNAME, 'output%d.txt' % (n)) for n in range(testcase_num)] return actions, in_out_files
def build_sols_list(base_dir, task_type, in_out_files, yaml_conf): if yaml_conf.get('only_gen', False): return [] sol_dir = os.path.join(base_dir, SOL_DIRNAME) actions = [] test_actions = [] for src in (os.path.join(SOL_DIRNAME, x) for x in os.listdir(sol_dir) if endswith2(x, SOL_EXTS)): exe, ext = basename2(src, SOL_EXTS) lang = filename_to_language(src) # Delete the dot ext = ext[1:] exe_EVAL = "%s_EVAL" % (exe) # Ignore things known to be auxiliary files if exe == os.path.join(SOL_DIRNAME, GRAD_BASENAME): continue if exe == os.path.join(SOL_DIRNAME, STUB_BASENAME): continue if ext == 'pas' and exe.endswith('lib'): continue srcs = [] # The grader, when present, must be in the first position of srcs. if task_type == ['Batch', 'Grad'] or \ task_type == ['Batch', 'GradComp']: srcs.append(os.path.join(SOL_DIRNAME, GRAD_BASENAME + '.%s' % (ext))) if task_type == ['Communication', '']: srcs.append(os.path.join(SOL_DIRNAME, STUB_BASENAME + '.%s' % (ext))) srcs.append(src) test_deps = [exe_EVAL] + in_out_files if task_type == ['Batch', 'Comp'] or \ task_type == ['Batch', 'GradComp']: test_deps.append('cor/correttore') if task_type == ['Communication', '']: test_deps.append('cor/manager') def compile_src(srcs, exe, for_evaluation, lang, assume=None): # We put everything in a temporary directory to reproduce # the same conditions that we have when compiling a # submission. tempdir = tempfile.mkdtemp() try: task_name = detect_task_name(base_dir) grader_num = 1 if len(srcs) > 1 else 0 new_srcs = [] for grader in srcs[:grader_num]: grader_name = os.path.basename(grader) shutil.copyfile(os.path.join(base_dir, grader), os.path.join(tempdir, grader_name)) new_srcs.append(os.path.join(tempdir, grader_name)) # For now, we assume we only have one non-grader source. source_name = task_name + lang.source_extension shutil.copyfile(os.path.join(base_dir, srcs[grader_num]), os.path.join(tempdir, source_name)) new_srcs.append(source_name) # Libraries are needed/used only for C/C++ and Pascal header_extension = lang.header_extension if header_extension is not None: lib_template = "%s" + header_extension lib_filename = lib_template % (task_name) lib_path = os.path.join( base_dir, SOL_DIRNAME, lib_filename) if os.path.exists(lib_path): shutil.copyfile(lib_path, os.path.join(tempdir, lib_filename)) new_exe = os.path.join(tempdir, task_name) compilation_commands = lang.get_compilation_commands( new_srcs, new_exe, for_evaluation=for_evaluation) for command in compilation_commands: call(tempdir, command) move_cursor(directions.UP, erase=True, stream=sys.stderr) shutil.copyfile(os.path.join(tempdir, new_exe), os.path.join(base_dir, exe)) shutil.copymode(os.path.join(tempdir, new_exe), os.path.join(base_dir, exe)) finally: shutil.rmtree(tempdir) def test_src(exe, lang, assume=None): # Solution names begin with sol/ and end with _EVAL, we strip that print( "Testing solution", add_color_to_string(exe[4:-5], colors.BLACK, bold=True) ) test_testcases( base_dir, exe, language=lang, assume=assume) actions.append( (srcs, [exe], functools.partial(compile_src, srcs, exe, False, lang), 'compile solution')) actions.append( (srcs, [exe_EVAL], functools.partial(compile_src, srcs, exe_EVAL, True, lang), 'compile solution with -DEVAL')) test_actions.append((test_deps, ['test_%s' % (os.path.split(exe)[1])], functools.partial(test_src, exe_EVAL, lang), 'test solution (compiled with -DEVAL)')) return actions + test_actions
def add_submissions(contest_name, task_name, username, items): """ Add submissions from the given user to the given task in the given contest. Each item corresponds to a submission, and should contain a dictionary which maps formatted file names to paths. For example, in batch tasks the format is "Task.%l", so one submission would be {"Task.%l": "path/to/task.cpp"}. """ # We connect to evaluation service to try and notify it about # the new submissions. Otherwise, it will pick it up only on # the next sweep for missed operations. rs = RemoteServiceClient(ServiceCoord("EvaluationService", 0)) rs.connect() with SessionGen() as session: user = get_user(session, username) contest = get_contest(session, contest_name) participation = get_participation(session, contest, user) task = get_task(session, task_name, contest) elements = set(format_element.filename for format_element in task.submission_format) file_cacher = FileCacher() # We go over all submissions twice. First we validate the # submission format. for submission_dict in items: for (format_file_name, path) in submission_dict.iteritems(): if format_file_name not in elements: raise Exception("Unexpected submission file: %s. " "Expected elements: %s" % (format_file_name, elements)) if not os.path.isfile(path): raise Exception("File not found: %s" % path) # Now add to database. for submission_dict in items: if not submission_dict: continue timestamp = time.time() file_digests = {} language_name = None for (format_file_name, path) in submission_dict.iteritems(): digest = file_cacher.put_file_from_path( path, "Submission file %s sent by %s at %d." % (path, username, timestamp)) file_digests[format_file_name] = digest current_language = filename_to_language(path) if current_language is not None: language_name = current_language.name submission = Submission(make_datetime(timestamp), language_name, participation=participation, task=task) for filename, digest in file_digests.items(): session.add(File(filename, digest, submission=submission)) session.add(submission) session.commit() rs.new_submission(submission_id=submission.id) rs.disconnect()
def build_gen_list(base_dir, task_type, yaml_conf): input_dir = os.path.join(base_dir, INPUT_DIRNAME) output_dir = os.path.join(base_dir, OUTPUT_DIRNAME) gen_dir = os.path.join(base_dir, GEN_DIRNAME) gen_exe = None validator_exe = None for src in (x for x in os.listdir(gen_dir) if endswith2(x, GEN_EXTS)): base, ext = basename2(src, GEN_EXTS) lang = filename_to_language(src) if base == GEN_BASENAME: gen_exe = os.path.join(GEN_DIRNAME, base) gen_src = os.path.join(GEN_DIRNAME, base + ext) gen_lang = lang elif base == VALIDATOR_BASENAME: validator_exe = os.path.join(GEN_DIRNAME, base) validator_src = os.path.join(GEN_DIRNAME, base + ext) validator_lang = lang if gen_exe is None: raise Exception("Couldn't find generator") if validator_exe is None: raise Exception("Couldn't find validator") gen_GEN = os.path.join(GEN_DIRNAME, GEN_GEN) sol_exe = os.path.join(SOL_DIRNAME, SOL_FILENAME) # Count non-trivial lines in GEN and establish which external # files are needed for input generation testcases = list(iter_GEN(os.path.join(base_dir, gen_GEN))) testcase_num = len(testcases) copy_files = [x[1] for x in testcases if x[0]] def compile_src(src, exe, lang, assume=None): if lang.source_extension in ['.cpp', '.c', '.pas']: commands = lang.get_compilation_commands([src], exe, for_evaluation=False) for command in commands: call(base_dir, command) elif lang.source_extension in ['.py', '.sh']: os.symlink(os.path.basename(src), exe) else: raise Exception("Wrong generator/validator language!") # Question: why, differently from outputs, inputs have to be # created all together instead of selectively over those that have # been changed since last execution? This is a waste of time, # usually generating inputs is a pretty long thing. Answer: # because cmsMake architecture, which is based on file timestamps, # doesn't make us able to understand which lines of gen/GEN have # been changed. Douch! We'll have to think better this thing for # the new format we're developing. def make_input(assume=None): n = 0 try: os.makedirs(input_dir) except OSError: pass for (is_copy, line, st) in testcases: print("Generating", add_color_to_string("input # %d" % n, colors.BLACK, stream=sys.stderr, bold=True), file=sys.stderr) new_input = os.path.join(input_dir, 'input%d.txt' % (n)) if is_copy: # Copy the file print("> Copy input file from:", line) copy_input = os.path.join(base_dir, line) shutil.copyfile(copy_input, new_input) else: # Call the generator with open(new_input, 'wb') as fout: call(base_dir, [gen_exe] + line.split(), stdout=fout) command = [validator_exe, new_input] if st != 0: command.append("%s" % st) call(base_dir, command) n += 1 for _ in range(3): move_cursor(directions.UP, erase=True, stream=sys.stderr) def make_output(n, assume=None): try: os.makedirs(output_dir) except OSError: pass print("Generating", add_color_to_string("output # %d" % n, colors.BLACK, stream=sys.stderr, bold=True), file=sys.stderr) temp_dir = tempfile.mkdtemp(prefix=os.path.join(base_dir, "tmp")) use_stdin = yaml_conf.get("infile") in {None, ""} use_stdout = yaml_conf.get("outfile") in {None, ""} # Names of the actual source and destination. infile = os.path.join(input_dir, 'input%d.txt' % (n)) outfile = os.path.join(output_dir, 'output%d.txt' % (n)) # Names of the input and output in temp directory. copied_infile = os.path.join( temp_dir, "input.txt" if use_stdin else yaml_conf.get("infile")) copied_outfile = os.path.join( temp_dir, "output.txt" if use_stdout else yaml_conf.get("outfile")) os.symlink(infile, copied_infile) fin = None fout = None try: if use_stdin: fin = open(copied_infile, "rb") if use_stdout: fout = open(copied_outfile, 'wb') shutil.copy(sol_exe, temp_dir) # If the task of of type Communication, then there is # nothing to put in the output files if task_type != ['Communication', '']: call(temp_dir, [os.path.join(temp_dir, SOL_FILENAME)], stdin=fin, stdout=fout) move_cursor(directions.UP, erase=True, stream=sys.stderr) finally: if fin is not None: fin.close() if fout is not None: fout.close() os.rename(copied_outfile, outfile) shutil.rmtree(temp_dir) move_cursor(directions.UP, erase=True, stream=sys.stderr) actions = [] actions.append(([gen_src], [gen_exe], functools.partial(compile_src, gen_src, gen_exe, gen_lang), "compile the generator")) actions.append( ([validator_src], [validator_exe], functools.partial(compile_src, validator_src, validator_exe, validator_lang), "compile the validator")) actions.append(([gen_GEN, gen_exe, validator_exe] + copy_files, [ os.path.join(INPUT_DIRNAME, 'input%d.txt' % (x)) for x in range(0, testcase_num) ], make_input, "input generation")) for n in range(testcase_num): actions.append( ([os.path.join(INPUT_DIRNAME, 'input%d.txt' % (n)), sol_exe], [os.path.join(OUTPUT_DIRNAME, 'output%d.txt' % (n))], functools.partial(make_output, n), "output generation")) in_out_files = [os.path.join(INPUT_DIRNAME, 'input%d.txt' % (n)) for n in range(testcase_num)] + \ [os.path.join(OUTPUT_DIRNAME, 'output%d.txt' % (n)) for n in range(testcase_num)] return actions, in_out_files
def build_sols_list(base_dir, task_type, in_out_files, yaml_conf): if yaml_conf.get('only_gen', False): return [] sol_dir = os.path.join(base_dir, SOL_DIRNAME) actions = [] test_actions = [] for src in (os.path.join(SOL_DIRNAME, x) for x in os.listdir(sol_dir) if endswith2(x, SOL_EXTS)): exe, ext = basename2(src, SOL_EXTS) lang = filename_to_language(src) # Delete the dot ext = ext[1:] exe_EVAL = "%s_EVAL" % (exe) # Ignore things known to be auxiliary files if exe == os.path.join(SOL_DIRNAME, GRAD_BASENAME): continue if exe == os.path.join(SOL_DIRNAME, STUB_BASENAME): continue if ext == 'pas' and exe.endswith('lib'): continue srcs = [] # The grader, when present, must be in the first position of srcs. if task_type == ['Batch', 'Grad'] or \ task_type == ['Batch', 'GradComp']: srcs.append( os.path.join(SOL_DIRNAME, GRAD_BASENAME + '.%s' % (ext))) if task_type == ['Communication', '']: srcs.append( os.path.join(SOL_DIRNAME, STUB_BASENAME + '.%s' % (ext))) srcs.append(src) test_deps = [exe_EVAL] + in_out_files if task_type == ['Batch', 'Comp'] or \ task_type == ['Batch', 'GradComp']: test_deps.append('cor/correttore') if task_type == ['Communication', '']: test_deps.append('cor/manager') def compile_src(srcs, exe, for_evaluation, lang, assume=None): # We put everything in a temporary directory to reproduce # the same conditions that we have when compiling a # submission. tempdir = tempfile.mkdtemp() try: task_name = detect_task_name(base_dir) grader_num = 1 if len(srcs) > 1 else 0 new_srcs = [] for grader in srcs[:grader_num]: grader_name = os.path.basename(grader) shutil.copyfile(os.path.join(base_dir, grader), os.path.join(tempdir, grader_name)) new_srcs.append(os.path.join(tempdir, grader_name)) # For now, we assume we only have one non-grader source. source_name = task_name + lang.source_extension shutil.copyfile(os.path.join(base_dir, srcs[grader_num]), os.path.join(tempdir, source_name)) new_srcs.append(source_name) # Libraries are needed/used only for C/C++ and Pascal header_extension = lang.header_extension if header_extension is not None: lib_template = "%s" + header_extension lib_filename = lib_template % (task_name) lib_path = os.path.join(base_dir, SOL_DIRNAME, lib_filename) if os.path.exists(lib_path): shutil.copyfile(lib_path, os.path.join(tempdir, lib_filename)) new_exe = os.path.join(tempdir, task_name) compilation_commands = lang.get_compilation_commands( new_srcs, new_exe, for_evaluation=for_evaluation) for command in compilation_commands: call(tempdir, command) move_cursor(directions.UP, erase=True, stream=sys.stderr) shutil.copyfile(os.path.join(tempdir, new_exe), os.path.join(base_dir, exe)) shutil.copymode(os.path.join(tempdir, new_exe), os.path.join(base_dir, exe)) finally: shutil.rmtree(tempdir) def test_src(exe, lang, assume=None): # Solution names begin with sol/ and end with _EVAL, we strip that print("Testing solution", add_color_to_string(exe[4:-5], colors.BLACK, bold=True)) test_testcases(base_dir, exe, language=lang, assume=assume) actions.append( (srcs, [exe], functools.partial(compile_src, srcs, exe, False, lang), 'compile solution')) actions.append( (srcs, [exe_EVAL], functools.partial(compile_src, srcs, exe_EVAL, True, lang), 'compile solution with -DEVAL')) test_actions.append( (test_deps, ['test_%s' % (os.path.split(exe)[1])], functools.partial(test_src, exe_EVAL, lang), 'test solution (compiled with -DEVAL)')) return actions + test_actions
def add_submission(contest_id, username, task_name, timestamp, files): file_cacher = FileCacher() with SessionGen() as session: participation = session.query(Participation)\ .join(Participation.user)\ .filter(Participation.contest_id == contest_id)\ .filter(User.username == username)\ .first() if participation is None: logging.critical("User `%s' does not exists or " "does not participate in the contest.", username) return False task = session.query(Task)\ .filter(Task.contest_id == contest_id)\ .filter(Task.name == task_name)\ .first() if task is None: logging.critical("Unable to find task `%s'.", task_name) return False elements = [format.filename for format in task.submission_format] for file_ in files: if file_ not in elements: logging.critical("File `%s' is not in the submission format " "for the task.", file_) return False if any(element not in files for element in elements): logger.warning("Not all files from the submission format were " "provided.") # files and elements now coincide. We compute the language for # each file and check that they do not mix. language = None for file_ in files: this_language = filename_to_language(files[file_]) if this_language is None and ".%l" in file_: logger.critical("Cannot recognize language for file `%s'.", file_) return False if language is None: language = this_language elif this_language is not None and language != this_language: logger.critical("Mixed-language submission detected.") return False # Store all files from the arguments, and obtain their digests.. file_digests = {} try: for file_ in files: digest = file_cacher.put_file_from_path( files[file_], "Submission file %s sent by %s at %d." % (file_, username, timestamp)) file_digests[file_] = digest except: logger.critical("Error while storing submission's file.", exc_info=True) return False # Create objects in the DB. submission = Submission(make_datetime(timestamp), language.name, participation=participation, task=task) for filename, digest in file_digests.items(): session.add(File(filename, digest, submission=submission)) session.add(submission) session.commit() return True
def post(self, task_name): participation = self.current_user if not self.r_params["testing_enabled"]: raise tornado.web.HTTPError(404) try: task = self.contest.get_task(task_name) except KeyError: raise tornado.web.HTTPError(404) self.fallback_page = ["testing"] self.fallback_args = {"task_name": task.name} # Check that the task is testable task_type = get_task_type(dataset=task.active_dataset) if not task_type.testable: logger.warning("User %s tried to make test on task %s.", participation.user.username, task_name) raise tornado.web.HTTPError(404) # Alias for easy access contest = self.contest # Enforce maximum number of user_tests try: if contest.max_user_test_number is not None: user_test_c = self.sql_session.query(func.count(UserTest.id))\ .join(UserTest.task)\ .filter(Task.contest == contest)\ .filter(UserTest.participation == participation)\ .scalar() if user_test_c >= contest.max_user_test_number and \ not self.current_user.unrestricted: raise ValueError( self._("You have reached the maximum limit of " "at most %d tests among all tasks.") % contest.max_user_test_number) if task.max_user_test_number is not None: user_test_t = self.sql_session.query(func.count(UserTest.id))\ .filter(UserTest.task == task)\ .filter(UserTest.participation == participation)\ .scalar() if user_test_t >= task.max_user_test_number and \ not self.current_user.unrestricted: raise ValueError( self._("You have reached the maximum limit of " "at most %d tests on this task.") % task.max_user_test_number) except ValueError as error: self._send_error(self._("Too many tests!"), error.message) return # Enforce minimum time between user_tests try: if contest.min_user_test_interval is not None: last_user_test_c = self.sql_session.query(UserTest)\ .join(UserTest.task)\ .filter(Task.contest == contest)\ .filter(UserTest.participation == participation)\ .order_by(UserTest.timestamp.desc())\ .first() if last_user_test_c is not None and \ self.timestamp - last_user_test_c.timestamp < \ contest.min_user_test_interval and \ not self.current_user.unrestricted: raise ValueError( self._("Among all tasks, you can test again " "after %d seconds from last test.") % contest.min_user_test_interval.total_seconds()) # We get the last user_test even if we may not need it # for min_user_test_interval because we may need it later, # in case this is a ALLOW_PARTIAL_SUBMISSION task. last_user_test_t = self.sql_session.query(UserTest)\ .filter(UserTest.participation == participation)\ .filter(UserTest.task == task)\ .order_by(UserTest.timestamp.desc())\ .first() if task.min_user_test_interval is not None: if last_user_test_t is not None and \ self.timestamp - last_user_test_t.timestamp < \ task.min_user_test_interval and \ not self.current_user.unrestricted: raise ValueError( self._("For this task, you can test again " "after %d seconds from last test.") % task.min_user_test_interval.total_seconds()) except ValueError as error: self._send_error(self._("Tests too frequent!"), error.message) return submission_lang = self.get_argument("language", None) required_managers = [ name for name in task_type.get_user_managers(task.submission_format) if name.endswith(".%l") or filename_to_language(name) is None or submission_lang in filename_to_language_names(name) ] print(required_managers) # Required files from the user. required = set([sfe.filename for sfe in task.submission_format] + required_managers + ["input"]) # Ensure that the user did not submit multiple files with the # same name. if any(len(filename) != 1 for filename in self.request.files.values()): self._send_error(self._("Invalid test format!"), self._("Please select the correct files.")) return # If the user submitted an archive, extract it and use content # as request.files. But only valid for "output only" (i.e., # not for submissions requiring a programming language # identification). if len(self.request.files) == 1 and \ self.request.files.keys()[0] == "submission": if any(filename.endswith(".%l") for filename in required): self._send_error(self._("Invalid test format!"), self._("Please select the correct files."), task) return archive_data = self.request.files["submission"][0] del self.request.files["submission"] # Create the archive. archive = Archive.from_raw_data(archive_data["body"]) if archive is None: self._send_error( self._("Invalid archive format!"), self._("The submitted archive could not be opened.")) return # Extract the archive. unpacked_dir = archive.unpack() for name in archive.namelist(): filename = os.path.basename(name) body = open(os.path.join(unpacked_dir, filename), "r").read() self.request.files[filename] = [{ 'filename': filename, 'body': body }] archive.cleanup() # This ensure that the user sent one file for every name in # submission format and no more. Less is acceptable if task # type says so. provided = set(self.request.files.keys()) if not (required == provided or (task_type.ALLOW_PARTIAL_SUBMISSION and required.issuperset(provided))): self._send_error(self._("Invalid test format!"), self._("Please select the correct files.")) return # Add submitted files. After this, files is a dictionary indexed # by *our* filenames (something like "output01.txt" or # "taskname.%l", and whose value is a couple # (user_assigned_filename, content). files = {} for uploaded, data in self.request.files.iteritems(): files[uploaded] = (data[0]["filename"], data[0]["body"]) # Read the submission language provided in the request; we # integrate it with the language fetched from the previous # submission (if we use it) and later make sure it is # recognized and allowed. need_lang = any( our_filename.find(".%l") != -1 for our_filename in files) # If we allow partial submissions, implicitly we recover the # non-submitted files from the previous user test. And put them # in file_digests (i.e. like they have already been sent to FS). file_digests = {} if task_type.ALLOW_PARTIAL_SUBMISSION and \ last_user_test_t is not None and \ (submission_lang is None or submission_lang == last_user_test_t.language): submission_lang = last_user_test_t.language for filename in required.difference(provided): if filename in last_user_test_t.files: file_digests[filename] = \ last_user_test_t.files[filename].digest # Throw an error if task needs a language, but we don't have # it or it is not allowed / recognized. if need_lang: error = None if submission_lang is None: error = self._("Cannot recognize the user test language.") elif submission_lang not in contest.languages: error = self._("Language %s not allowed in this contest.") \ % submission_lang if error is not None: self._send_error(self._("Invalid test!"), error) return # Check if submitted files are small enough. if any([ len(f[1]) > config.max_submission_length for n, f in files.items() if n != "input" ]): self._send_error( self._("Test too big!"), self._("Each source file must be at most %d bytes long.") % config.max_submission_length) return if len(files["input"][1]) > config.max_input_length: self._send_error( self._("Input too big!"), self._("The input file must be at most %d bytes long.") % config.max_input_length) return # All checks done, submission accepted. # Attempt to store the submission locally to be able to # recover a failure. if config.tests_local_copy: try: path = os.path.join( config.tests_local_copy_path.replace( "%s", config.data_dir), participation.user.username) if not os.path.exists(path): os.makedirs(path) # Pickle in ASCII format produces str, not unicode, # therefore we open the file in binary mode. with io.open( os.path.join(path, "%d" % make_timestamp(self.timestamp)), "wb") as file_: pickle.dump((self.contest.id, participation.user.id, task.id, files), file_) except Exception as error: logger.error("Test local copy failed.", exc_info=True) # We now have to send all the files to the destination... try: for filename in files: digest = self.application.service.file_cacher.put_file_content( files[filename][1], "Test file %s sent by %s at %d." % (filename, participation.user.username, make_timestamp(self.timestamp))) file_digests[filename] = digest # In case of error, the server aborts the submission except Exception as error: logger.error("Storage failed! %s", error) self._send_error(self._("Test storage failed!"), self._("Please try again.")) return # All the files are stored, ready to submit! logger.info("All files stored for test sent by %s", participation.user.username) user_test = UserTest(self.timestamp, submission_lang, file_digests["input"], participation=participation, task=task) for filename in [sfe.filename for sfe in task.submission_format]: digest = file_digests[filename] self.sql_session.add( UserTestFile(filename, digest, user_test=user_test)) for filename in required_managers: digest = file_digests[filename] if submission_lang is not None: extension = get_language(submission_lang).source_extension filename = filename.replace(".%l", extension) self.sql_session.add( UserTestManager(filename, digest, user_test=user_test)) self.sql_session.add(user_test) self.sql_session.commit() try: random_service(self.application.service.evaluation_services)\ .new_user_test(user_test_id=user_test.id) except IndexError: logger.error("No evaluation services found. " "Leaving the submission to be " "discovered by sweep. ") self.application.service.add_notification( participation.user.username, self.timestamp, self._("Test received"), self._("Your test has been received " "and is currently being executed."), NOTIFICATION_SUCCESS) # The argument (encripted user test id) is not used by CWS # (nor it discloses information to the user), but it is useful # for automatic testing to obtain the user test id). self.redirect( self.contest_url(*self.fallback_page, user_test_id=encrypt_number(user_test.id), **self.fallback_args))