def get_submission_format(self, dest): """Parse the submission format. Using the two arguments "submission_format_choice" and "submission_format" set the "submission_format" item of the given dictionary. dest (dict): a place to store the result. """ choice = self.get_argument("submission_format_choice", "other") if choice == "simple": filename = "%s.%%l" % dest["name"] format_ = [SubmissionFormatElement(filename)] elif choice == "other": value = self.get_argument("submission_format", "[]") if value == "": value = "[]" format_ = [] try: for filename in json.loads(value): format_ += [SubmissionFormatElement(filename)] except ValueError: raise ValueError("Submission format not recognized.") else: raise ValueError("Submission format not recognized.") dest["submission_format"] = format_
def put_task_submission_format(self, args): """ Put the task's submission format in the given args. """ if self.task_type == "Batch": # Batch programs are always named Task.cpp, Task.java, etc. # Note that in Java this means the class must be "Task". args["submission_format"] = [SubmissionFormatElement("Task.%l")] elif self.task_type == "OutputOnly": # Output files must always be in the form "output_01.01.txt", # "output_01.02.txt", and so on. args["submission_format"] = [] for (subtask_index, subtask) in enumerate(self.subtasks): for testcase_index in xrange(len(subtask["testcases"])): args["submission_format"] += [ SubmissionFormatElement("output_%02d.%02d.txt" % (subtask_index + 1, testcase_index + 1)) ] elif self.task_type == "TwoSteps": # TwoSteps files are always "encoder" and "decoder". args["submission_format"] = [ SubmissionFormatElement("encoder.%l"), SubmissionFormatElement("decoder.%l") ]
def post(self): fallback_page = "/tasks/add" try: attrs = dict() self.get_string(attrs, "name", empty=None) self.get_string(attrs, "category") assert attrs.get("name") is not None, "No task name specified." attrs["title"] = attrs["name"] # Set default submission format as ["taskname.%l"] attrs["submission_format"] = \ [SubmissionFormatElement("%s.%%l" % attrs["name"])] # Create the task. task = Task(**attrs) self.sql_session.add(task) except Exception as error: self.application.service.add_notification(make_datetime(), "Invalid field(s)", repr(error)) self.redirect(fallback_page) return try: attrs = dict() # Create its first dataset. attrs["description"] = "Default" attrs["autojudge"] = True attrs["task_type"] = "Batch" attrs["task_type_parameters"] = '["alone", ["", ""], "diff"]' attrs["score_type"] = "Sum" attrs["score_type_parameters"] = '100' attrs["task"] = task dataset = Dataset(**attrs) self.sql_session.add(dataset) # Make the dataset active. Life works better that way. task.active_dataset = dataset except Exception as error: self.application.service.add_notification(make_datetime(), "Invalid field(s)", repr(error)) self.redirect(fallback_page) return if self.try_commit(): # Create the task on RWS. self.application.service.proxy_service.reinitialize() self.redirect("/task/%s" % task.id) else: self.redirect(fallback_page)
def add_submission_format_element(self, task=None, **kwargs): """Create a submission format element and add it to the session""" task = task if task is not None else self.add_task() args = { "task": task, "filename": unique_unicode_id(), } args.update(kwargs) sfe = SubmissionFormatElement(**args) self.session.add(sfe) return sfe
def get_task(self, name): """See docstring in class Loader. """ try: num = self.tasks_order[name] # Here we expose an undocumented behavior, so that cmsMake can # import a task even without the whole contest; this is not to # be relied upon in general except AttributeError: num = 1 task_path = os.path.join(self.path, name) # We first look for the yaml file inside the task folder, # and eventually fallback to a yaml file in its parent folder. try: conf = yaml.safe_load( io.open(os.path.join(task_path, "task.yaml"), "rt", encoding="utf-8")) except IOError: conf = yaml.safe_load( io.open(os.path.join(self.path, name + ".yaml"), "rt", encoding="utf-8")) logger.info("Loading parameters for task %s." % name) # Here we update the time of the last import touch(os.path.join(task_path, ".itime")) # If this file is not deleted, then the import failed touch(os.path.join(task_path, ".import_error")) args = {} args["num"] = num load(conf, args, ["name", "nome_breve"]) load(conf, args, ["title", "nome"]) assert name == args["name"] if args["name"] == args["title"]: logger.warning("Short name equals long name (title). " "Please check.") primary_language = load(conf, None, "primary_language") if primary_language is None: primary_language = 'it' paths = [os.path.join(task_path, "statement", "statement.pdf"), os.path.join(task_path, "testo", "testo.pdf")] for path in paths: if os.path.exists(path): digest = self.file_cacher.put_file_from_path( path, "Statement for task %s (lang: %s)" % (name, primary_language)) break else: logger.critical("Couldn't find any task statement, aborting...") sys.exit(1) args["statements"] = [Statement(primary_language, digest)] args["primary_statements"] = '["%s"]' % (primary_language) args["attachments"] = [] # FIXME Use auxiliary args["submission_format"] = [ SubmissionFormatElement("%s.%%l" % name)] # Use the new token settings format if detected. if "token_mode" in conf: load(conf, args, "token_mode") load(conf, args, "token_max_number") load(conf, args, "token_min_interval", conv=make_timedelta) load(conf, args, "token_gen_initial") load(conf, args, "token_gen_number") load(conf, args, "token_gen_interval", conv=make_timedelta) load(conf, args, "token_gen_max") # Otherwise fall back on the old one. else: logger.warning( "%s.yaml uses a deprecated format for token settings which " "will soon stop being supported, you're advised to update it.", name) # Determine the mode. if conf.get("token_initial", None) is None: args["token_mode"] = "disabled" elif conf.get("token_gen_number", 0) > 0 and \ conf.get("token_gen_time", 0) == 0: args["token_mode"] = "infinite" else: args["token_mode"] = "finite" # Set the old default values. args["token_gen_initial"] = 0 args["token_gen_number"] = 0 args["token_gen_interval"] = timedelta() # Copy the parameters to their new names. load(conf, args, "token_total", "token_max_number") load(conf, args, "token_min_interval", conv=make_timedelta) load(conf, args, "token_initial", "token_gen_initial") load(conf, args, "token_gen_number") load(conf, args, "token_gen_time", "token_gen_interval", conv=make_timedelta) load(conf, args, "token_max", "token_gen_max") # Remove some corner cases. if args["token_gen_initial"] is None: args["token_gen_initial"] = 0 if args["token_gen_interval"].total_seconds() == 0: args["token_gen_interval"] = timedelta(minutes=1) load(conf, args, "max_submission_number") load(conf, args, "max_user_test_number") load(conf, args, "min_submission_interval", conv=make_timedelta) load(conf, args, "min_user_test_interval", conv=make_timedelta) # Attachments args["attachments"] = [] if os.path.exists(os.path.join(task_path, "att")): for filename in os.listdir(os.path.join(task_path, "att")): digest = self.file_cacher.put_file_from_path( os.path.join(task_path, "att", filename), "Attachment %s for task %s" % (filename, name)) args["attachments"] += [Attachment(filename, digest)] task = Task(**args) args = {} args["task"] = task args["description"] = conf.get("version", "Default") args["autojudge"] = False load(conf, args, ["time_limit", "timeout"], conv=float) load(conf, args, ["memory_limit", "memlimit"]) # Builds the parameters that depend on the task type args["managers"] = [] infile_param = conf.get("infile", "input.txt") outfile_param = conf.get("outfile", "output.txt") # If there is sol/grader.%l for some language %l, then, # presuming that the task type is Batch, we retrieve graders # in the form sol/grader.%l graders = False for lang in LANGUAGES: if os.path.exists(os.path.join( task_path, "sol", "grader.%s" % lang)): graders = True break if graders: # Read grader for each language for lang in LANGUAGES: grader_filename = os.path.join( task_path, "sol", "grader.%s" % lang) if os.path.exists(grader_filename): digest = self.file_cacher.put_file_from_path( grader_filename, "Grader for task %s and language %s" % (name, lang)) args["managers"] += [ Manager("grader.%s" % lang, digest)] else: logger.warning("Grader for language %s not found " % lang) # Read managers with other known file extensions for other_filename in os.listdir(os.path.join(task_path, "sol")): if other_filename.endswith('.h') or \ other_filename.endswith('lib.pas'): digest = self.file_cacher.put_file_from_path( os.path.join(task_path, "sol", other_filename), "Manager %s for task %s" % (other_filename, name)) args["managers"] += [ Manager(other_filename, digest)] compilation_param = "grader" else: compilation_param = "alone" # If there is check/checker (or equivalent), then, presuming # that the task type is Batch or OutputOnly, we retrieve the # comparator paths = [os.path.join(task_path, "check", "checker"), os.path.join(task_path, "cor", "correttore")] for path in paths: if os.path.exists(path): digest = self.file_cacher.put_file_from_path( path, "Manager for task %s" % name) args["managers"] += [ Manager("checker", digest)] evaluation_param = "comparator" break else: evaluation_param = "diff" # Detect subtasks by checking GEN gen_filename = os.path.join(task_path, 'gen', 'GEN') try: with io.open(gen_filename, "rt", encoding="utf-8") as gen_file: subtasks = [] testcases = 0 points = None for line in gen_file: line = line.strip() splitted = line.split('#', 1) if len(splitted) == 1: # This line represents a testcase, otherwise it's # just a blank if splitted[0] != '': testcases += 1 else: testcase, comment = splitted testcase_detected = False subtask_detected = False if testcase.strip() != '': testcase_detected = True comment = comment.strip() if comment.startswith('ST:'): subtask_detected = True if testcase_detected and subtask_detected: raise Exception("No testcase and subtask in the" " same line allowed") # This line represents a testcase and contains a # comment, but the comment doesn't start a new # subtask if testcase_detected: testcases += 1 # This line starts a new subtask if subtask_detected: # Close the previous subtask if points is None: assert(testcases == 0) else: subtasks.append([points, testcases]) # Open the new one testcases = 0 points = int(comment[3:].strip()) # Close last subtask (if no subtasks were defined, just # fallback to Sum) if points is None: args["score_type"] = "Sum" total_value = float(conf.get("total_value", 100.0)) input_value = 0.0 n_input = testcases if n_input != 0: input_value = total_value / n_input args["score_type_parameters"] = "%s" % input_value else: subtasks.append([points, testcases]) assert(100 == sum([int(st[0]) for st in subtasks])) n_input = sum([int(st[1]) for st in subtasks]) args["score_type"] = "GroupMin" args["score_type_parameters"] = "%s" % subtasks if "n_input" in conf: assert int(conf['n_input']) == n_input # If gen/GEN doesn't exist, just fallback to Sum except IOError: args["score_type"] = "Sum" total_value = float(conf.get("total_value", 100.0)) input_value = 0.0 n_input = int(conf['n_input']) if n_input != 0: input_value = total_value / n_input args["score_type_parameters"] = "%s" % input_value # If output_only is set, then the task type is OutputOnly if conf.get('output_only', False): args["task_type"] = "OutputOnly" args["time_limit"] = None args["memory_limit"] = None args["task_type_parameters"] = '["%s"]' % evaluation_param task.submission_format = [ SubmissionFormatElement("output_%03d.txt" % i) for i in xrange(n_input)] # If there is check/manager (or equivalent), then the task # type is Communication else: paths = [os.path.join(task_path, "check", "manager"), os.path.join(task_path, "cor", "manager")] for path in paths: if os.path.exists(path): args["task_type"] = "Communication" args["task_type_parameters"] = '[]' digest = self.file_cacher.put_file_from_path( path, "Manager for task %s" % name) args["managers"] += [ Manager("manager", digest)] for lang in LANGUAGES: stub_name = os.path.join( task_path, "sol", "stub.%s" % lang) if os.path.exists(stub_name): digest = self.file_cacher.put_file_from_path( stub_name, "Stub for task %s and language %s" % (name, lang)) args["managers"] += [ Manager("stub.%s" % lang, digest)] else: logger.warning("Stub for language %s not " "found." % lang) break # Otherwise, the task type is Batch else: args["task_type"] = "Batch" args["task_type_parameters"] = \ '["%s", ["%s", "%s"], "%s"]' % \ (compilation_param, infile_param, outfile_param, evaluation_param) args["testcases"] = [] for i in xrange(n_input): input_digest = self.file_cacher.put_file_from_path( os.path.join(task_path, "input", "input%d.txt" % i), "Input %d for task %s" % (i, name)) output_digest = self.file_cacher.put_file_from_path( os.path.join(task_path, "output", "output%d.txt" % i), "Output %d for task %s" % (i, name)) args["testcases"] += [ Testcase("%03d" % i, False, input_digest, output_digest)] if args["task_type"] == "OutputOnly": task.attachments += [ Attachment("input_%03d.txt" % i, input_digest)] public_testcases = load(conf, None, ["public_testcases", "risultati"], conv=lambda x: "" if x is None else x) if public_testcases != "": for x in public_testcases.split(","): args["testcases"][int(x.strip())].public = True dataset = Dataset(**args) task.active_dataset = dataset # Import was successful os.remove(os.path.join(task_path, ".import_error")) logger.info("Task parameters loaded.") return task
def get_task(self, get_statement=True): """See docstring in class Loader. """ json_src = os.path.join(self.path, 'problem.json') if not os.path.exists(json_src): logger.error('No task found.') with open(json_src) as json_file: data = json.load(json_file) name = data['code'] logger.info("Loading parameters for task %s.", name) args = {} # Here we update the time of the last import. touch(os.path.join(self.path, ".itime")) # If this file is not deleted, then the import failed. touch(os.path.join(self.path, ".import_error")) args["name"] = name args["title"] = data['name'] # Statements if get_statement: statements_dir = os.path.join(self.path, 'statements') if os.path.exists(statements_dir): statements = [ filename for filename in os.listdir(statements_dir) if filename[-4:] == ".pdf" ] if len(statements) > 0: args['statements'] = dict() logger.info('Statements found') for statement in statements: language = statement[:-4] if language == "en_US": args["primary_statements"] = '["en_US"]' digest = self.file_cacher.put_file_from_path( os.path.join(statements_dir, statement), "Statement for task %s (lang: %s)" % (name, language)) args['statements'][language] = Statement(language, digest) # Attachments args["attachments"] = dict() attachments_dir = os.path.join(self.path, 'attachments') if os.path.exists(attachments_dir): logger.info("Attachments found") for filename in os.listdir(attachments_dir): digest = self.file_cacher.put_file_from_path( os.path.join(attachments_dir, filename), "Attachment %s for task %s" % (filename, name)) args["attachments"][filename] = Attachment(filename, digest) data["task_type"] = data["task_type"][0].upper( ) + data["task_type"][1:] # Setting the submission format # Obtaining testcases' codename testcases_dir = os.path.join(self.path, 'tests') if not os.path.exists(testcases_dir): logger.warning('Testcase folder was not found') testcase_codenames = [] else: testcase_codenames = sorted([ filename[:-3] for filename in os.listdir(testcases_dir) if filename[-3:] == '.in' ]) if data["task_type"] == 'OutputOnly': args["submission_format"] = list() for codename in testcase_codenames: args["submission_format"].append( SubmissionFormatElement("%s.out" % codename)) elif data["task_type"] == 'Notice': args["submission_format"] = list() else: args["submission_format"] = [ SubmissionFormatElement("%s.%%l" % name) ] # These options cannot be configured in the CPS format. # Uncomment the following to set specific values for them. # args['max_submission_number'] = 100 # args['max_user_test_number'] = 100 # args['min_submission_interval'] = make_timedelta(60) # args['min_user_test_interval'] = make_timedelta(60) # args['max_user_test_number'] = 10 # args['min_user_test_interval'] = make_timedelta(60) # args['token_mode'] = 'infinite' # args['token_max_number'] = 100 # args['token_min_interval'] = make_timedelta(60) # args['token_gen_initial'] = 1 # args['token_gen_number'] = 1 # args['token_gen_interval'] = make_timedelta(1800) # args['token_gen_max'] = 2 if "score_precision" in data: args['score_precision'] = int(data["score_precision"]) else: args['score_precision'] = 2 args['max_submission_number'] = 50 args['max_user_test_number'] = 50 if data["task_type"] == 'OutputOnly': args['max_submission_number'] = 100 args['max_user_test_number'] = 100 args['min_submission_interval'] = make_timedelta(60) args['min_user_test_interval'] = make_timedelta(60) task = Task(**args) args = dict() args["task"] = task args["description"] = "Default" args["autojudge"] = True if data['task_type'] != 'OutputOnly' and data['task_type'] != 'Notice': args["time_limit"] = float(data['time_limit']) args["memory_limit"] = int(data['memory_limit']) args["managers"] = {} # Checker checker_dir = os.path.join(self.path, "checker") checker_src = os.path.join(checker_dir, "checker.cpp") if os.path.exists(checker_src): logger.info("Checker found, compiling") checker_exe = os.path.join(checker_dir, "checker") os.system("g++ -x c++ -std=gnu++14 -O2 -static -o %s %s" % (checker_exe, checker_src)) digest = self.file_cacher.put_file_from_path( checker_exe, "Manager for task %s" % name) args["managers"]['checker'] = Manager("checker", digest) evaluation_param = "comparator" else: logger.info("Checker not found, using diff if neccessary") evaluation_param = "diff" args["task_type"] = data['task_type'] if data['task_type'] != 'Notice': args["task_type"] += '2017' args["task_type_parameters"] = \ self._get_task_type_parameters(data, data['task_type'], evaluation_param) # Graders graders_dir = os.path.join(self.path, 'graders') if data['task_type'] == 'TwoSteps': pas_manager = name + 'lib.pas' pas_manager_path = os.path.join(graders_dir, pas_manager) if not os.path.exists(pas_manager_path): digest = self.file_cacher.put_file_content( ''.encode('utf-8'), 'Pascal manager for task %s' % name) args["managers"][pas_manager] = Manager(pas_manager, digest) if not os.path.exists(graders_dir): logger.warning('Grader folder was not found') graders_list = [] else: graders_list = \ [filename for filename in os.listdir(graders_dir) if filename != 'manager.cpp'] for grader_name in graders_list: grader_src = os.path.join(graders_dir, grader_name) digest = self.file_cacher.put_file_from_path( grader_src, "Manager for task %s" % name) args["managers"][grader_name] = Manager(grader_name, digest) # Manager manager_src = os.path.join(graders_dir, 'manager.cpp') if os.path.exists(manager_src): logger.info("Manager found, compiling") manager_exe = os.path.join(graders_dir, "manager") os.system("cat %s | \ g++ -x c++ -O2 -static -o %s -" % (manager_src, manager_exe)) digest = self.file_cacher.put_file_from_path( manager_exe, "Manager for task %s" % name) args["managers"]["manager"] = Manager("manager", digest) # Testcases args["testcases"] = {} for codename in testcase_codenames: infile = os.path.join(testcases_dir, "%s.in" % codename) outfile = os.path.join(testcases_dir, "%s.out" % codename) if not os.path.exists(outfile): logger.critical( 'Could not file the output file for testcase %s' % codename) logger.critical('Aborting...') return input_digest = self.file_cacher.put_file_from_path( infile, "Input %s for task %s" % (codename, name)) output_digest = self.file_cacher.put_file_from_path( outfile, "Output %s for task %s" % (codename, name)) testcase = Testcase(codename, True, input_digest, output_digest) args["testcases"][codename] = testcase # Score Type subtasks_dir = os.path.join(self.path, 'subtasks') if not os.path.exists(subtasks_dir): logger.warning('Subtask folder was not found') subtasks = [] else: subtasks = sorted(os.listdir(subtasks_dir)) if len(subtasks) == 0: number_tests = max(len(testcase_codenames), 1) args["score_type"] = "Sum" args["score_type_parameters"] = str(100 / number_tests) else: args["score_type"] = "GroupMinWithMaxScore" parsed_data = [ 100, ] subtask_no = -1 add_optional_name = False for subtask in subtasks: subtask_no += 1 with open(os.path.join(subtasks_dir, subtask)) as subtask_json: subtask_data = json.load(subtask_json) score = int(subtask_data["score"]) testcases = "|".join( re.escape(testcase) for testcase in subtask_data["testcases"]) optional_name = "Subtask %d" % subtask_no if subtask_no == 0 and score == 0: add_optional_name = True optional_name = "Samples" if add_optional_name: parsed_data.append([score, testcases, optional_name]) else: parsed_data.append([score, testcases]) args["score_type_parameters"] = json.dumps(parsed_data) args["description"] = datetime.utcnow()\ .strftime("%Y-%m-%d %H:%M:%S %Z%z") dataset = Dataset(**args) task.active_dataset = dataset os.remove(os.path.join(self.path, ".import_error")) logger.info("Task parameters loaded.") return task
def get_task(self, name): """See docstring in class Loader. """ try: num = self.tasks_order[name] # Here we expose an undocumented behavior, so that cmsMake can # import a task even without the whole contest; this is not to # be relied upon in general. except AttributeError: num = 1 task_path = os.path.join(self.path, "problems", name) logger.info("Loading parameters for task %s.", name) args = {} # Here we update the time of the last import. touch(os.path.join(task_path, ".itime")) # If this file is not deleted, then the import failed. touch(os.path.join(task_path, ".import_error")) args["num"] = num # Get alphabetical task index for use in title. index = None contest_tree = ET.parse(os.path.join(self.path, "contest.xml")) contest_root = contest_tree.getroot() for problem in contest_root.find('problems'): if os.path.basename(problem.attrib['url']) == name: index = problem.attrib['index'] tree = ET.parse(os.path.join(task_path, "problem.xml")) root = tree.getroot() args["name"] = name if index is not None: args["title"] = index.upper() + '. ' else: args["title"] = '' args["title"] += root.find('names') \ .find("name[@language='%s']" % self.primary_language) \ .attrib['value'] args["statements"] = [] args["primary_statements"] = [] for language in self.languages: path = os.path.join(task_path, 'statements', '.pdf', language, 'problem.pdf') if os.path.exists(path): lang = LANGUAGE_MAP[language] digest = self.file_cacher.put_file_from_path( path, "Statement for task %s (lang: %s)" % (name, language)) args["statements"].append(Statement(lang, digest)) args["primary_statements"].append(lang) args["primary_statements"] = '["%s"]' % \ '","'.join(args["primary_statements"]) args["submission_format"] = [SubmissionFormatElement("%s.%%l" % name)] # These options cannot be configured in the Polygon format. # Uncomment the following to set specific values for them. # args['max_submission_number'] = 100 # args['max_user_test_number'] = 100 # args['min_submission_interval'] = make_timedelta(60) # args['min_user_test_interval'] = make_timedelta(60) # args['max_user_test_number'] = 10 # args['min_user_test_interval'] = make_timedelta(60) # args['token_mode'] = 'infinite' # args['token_max_number'] = 100 # args['token_min_interval'] = make_timedelta(60) # args['token_gen_initial'] = 1 # args['token_gen_number'] = 1 # args['token_gen_interval'] = make_timedelta(1800) # args['token_gen_max'] = 2 task_cms_conf_path = os.path.join(task_path, 'files') task_cms_conf = None if os.path.exists(os.path.join(task_cms_conf_path, 'cms_conf.py')): sys.path.append(task_cms_conf_path) logger.info("Found additional CMS options for task %s.", name) task_cms_conf = __import__('cms_conf') # TODO: probably should find more clever way to get rid of caching task_cms_conf = reload(task_cms_conf) sys.path.pop() if task_cms_conf is not None and hasattr(task_cms_conf, "general"): args.update(task_cms_conf.general) task = Task(**args) judging = root.find('judging') testset = None for testset in judging: testset_name = testset.attrib["name"] args = {} args["task"] = task args["description"] = testset_name args["autojudge"] = False tl = float(testset.find('time-limit').text) ml = float(testset.find('memory-limit').text) args["time_limit"] = tl * 0.001 args["memory_limit"] = int(ml / (1024 * 1024)) args["managers"] = [] infile_param = judging.attrib['input-file'] outfile_param = judging.attrib['output-file'] checker_src = os.path.join(task_path, "files", "check.cpp") if os.path.exists(checker_src): logger.info("Checker found, compiling") checker_exe = os.path.join(task_path, "files", "checker") testlib_path = "/usr/local/include/cms/testlib.h" if not config.installed: testlib_path = os.path.join(os.path.dirname(__file__), "polygon", "testlib.h") os.system("cat %s | \ sed 's$testlib.h$%s$' | \ g++ -x c++ -O2 -static -o %s -" % (checker_src, testlib_path, checker_exe)) digest = self.file_cacher.put_file_from_path( checker_exe, "Manager for task %s" % name) args["managers"] += [Manager("checker", digest)] evaluation_param = "comparator" else: logger.info("Checker not found, using diff") evaluation_param = "diff" args["task_type"] = "Batch" args["task_type_parameters"] = \ '["%s", ["%s", "%s"], "%s"]' % \ ("alone", infile_param, outfile_param, evaluation_param) args["score_type"] = "Sum" total_value = 100.0 input_value = 0.0 testcases = int(testset.find('test-count').text) n_input = testcases if n_input != 0: input_value = total_value / n_input args["score_type_parameters"] = str(input_value) args["testcases"] = [] for i in xrange(testcases): infile = os.path.join(task_path, testset_name, "%02d" % (i + 1)) outfile = os.path.join(task_path, testset_name, "%02d.a" % (i + 1)) if self.dos2unix_found: os.system('dos2unix -q %s' % (infile, )) os.system('dos2unix -q %s' % (outfile, )) input_digest = self.file_cacher.put_file_from_path( infile, "Input %d for task %s" % (i, name)) output_digest = self.file_cacher.put_file_from_path( outfile, "Output %d for task %s" % (i, name)) testcase = Testcase("%03d" % (i, ), False, input_digest, output_digest) testcase.public = True args["testcases"] += [testcase] if task_cms_conf is not None and \ hasattr(task_cms_conf, "datasets") and \ testset_name in task_cms_conf.datasets: args.update(task_cms_conf.datasets[testset_name]) dataset = Dataset(**args) if testset_name == "tests": task.active_dataset = dataset os.remove(os.path.join(task_path, ".import_error")) logger.info("Task parameters loaded.") return task
def get_task(self, get_statement=True): """See docstring in class Loader. """ logger.info("Checking dos2unix presence") i = os.system('dos2unix -V 2>/dev/null') self.dos2unix_found = (i == 0) if not self.dos2unix_found: logger.error("dos2unix not found - tests will not be converted!") name = os.path.basename(self.path) logger.info("Loading parameters for task %s.", name) args = {} # Here we update the time of the last import. touch(os.path.join(self.path, ".itime")) # If this file is not deleted, then the import failed. touch(os.path.join(self.path, ".import_error")) # Get alphabetical task index for use in title. tree = ET.parse(os.path.join(self.path, "problem.xml")) root = tree.getroot() args["name"] = name args["title"] = root.find('names').find("name").attrib['value'] if get_statement: args["statements"] = {} args["primary_statements"] = [] for language, lang in iteritems(LANGUAGE_MAP): path = os.path.join(self.path, 'statements', '.pdf', language, 'problem.pdf') if os.path.exists(path): digest = self.file_cacher.put_file_from_path( path, "Statement for task %s (lang: %s)" % (name, language)) args["statements"][lang] = Statement(lang, digest) args["primary_statements"].append(lang) args["submission_format"] = [SubmissionFormatElement("%s.%%l" % name)] # These options cannot be configured in the Polygon format. # Uncomment the following to set specific values for them. # args['max_submission_number'] = 100 # args['max_user_test_number'] = 100 # args['min_submission_interval'] = make_timedelta(60) # args['min_user_test_interval'] = make_timedelta(60) # args['max_user_test_number'] = 10 # args['min_user_test_interval'] = make_timedelta(60) # args['token_mode'] = 'infinite' # args['token_max_number'] = 100 # args['token_min_interval'] = make_timedelta(60) # args['token_gen_initial'] = 1 # args['token_gen_number'] = 1 # args['token_gen_interval'] = make_timedelta(1800) # args['token_gen_max'] = 2 task_cms_conf_path = os.path.join(self.path, 'files', 'cms_conf.py') task_cms_conf = None if os.path.exists(task_cms_conf_path): logger.info("Found additional CMS options for task %s.", name) with open(task_cms_conf_path, 'r') as f: task_cms_conf = imp.load_module('cms_conf', f, task_cms_conf_path, ('.py', 'r', imp.PY_SOURCE)) if task_cms_conf is not None and hasattr(task_cms_conf, "general"): args.update(task_cms_conf.general) task = Task(**args) judging = root.find('judging') testset = None for testset in judging: testset_name = testset.attrib["name"] args = {} args["task"] = task args["description"] = testset_name args["autojudge"] = False tl = float(testset.find('time-limit').text) ml = float(testset.find('memory-limit').text) args["time_limit"] = tl * 0.001 args["memory_limit"] = ml // (1024 * 1024) args["managers"] = {} infile_param = judging.attrib['input-file'] outfile_param = judging.attrib['output-file'] # Checker can be in any of these two locations. checker_src = os.path.join(self.path, "files", "check.cpp") if not os.path.exists(checker_src): checker_src = os.path.join(self.path, "check.cpp") if os.path.exists(checker_src): logger.info("Checker found, compiling") checker_exe = os.path.join(os.path.dirname(checker_src), "checker") testlib_path = "/usr/local/include/cms/testlib.h" if not config.installed: testlib_path = os.path.join(os.path.dirname(__file__), "polygon", "testlib.h") os.system("cat %s | \ sed 's$testlib.h$%s$' | \ g++ -x c++ -O2 -static -o %s -" % (checker_src, testlib_path, checker_exe)) digest = self.file_cacher.put_file_from_path( checker_exe, "Manager for task %s" % name) args["managers"]["checker"] = Manager("checker", digest) evaluation_param = "comparator" else: logger.info("Checker not found, using diff") evaluation_param = "diff" args["task_type"] = "Batch" args["task_type_parameters"] = \ ["alone", [infile_param, outfile_param], evaluation_param] args["score_type"] = "Sum" total_value = 100.0 input_value = 0.0 testcases = int(testset.find('test-count').text) n_input = testcases if n_input != 0: input_value = total_value / n_input args["score_type_parameters"] = input_value args["testcases"] = {} for i in range(testcases): infile = os.path.join(self.path, testset_name, "%02d" % (i + 1)) outfile = os.path.join(self.path, testset_name, "%02d.a" % (i + 1)) if self.dos2unix_found: os.system('dos2unix -q %s' % (infile, )) os.system('dos2unix -q %s' % (outfile, )) input_digest = self.file_cacher.put_file_from_path( infile, "Input %d for task %s" % (i, name)) output_digest = self.file_cacher.put_file_from_path( outfile, "Output %d for task %s" % (i, name)) testcase = Testcase("%03d" % (i, ), False, input_digest, output_digest) testcase.public = True args["testcases"][testcase.codename] = testcase if task_cms_conf is not None and \ hasattr(task_cms_conf, "datasets") and \ testset_name in task_cms_conf.datasets: args.update(task_cms_conf.datasets[testset_name]) dataset = Dataset(**args) if testset_name == "tests": task.active_dataset = dataset os.remove(os.path.join(self.path, ".import_error")) logger.info("Task parameters loaded.") return task
def get_task(self, get_statement=True): """See docstring in class TaskLoader.""" name = os.path.split(self.path)[1] if (not os.path.exists(os.path.join(self.path, "task.yaml"))) and \ (not os.path.exists(os.path.join(self.path, "..", name + ".yaml"))): logger.critical("File missing: \"task.yaml\"") return None # We first look for the yaml file inside the task folder, # and eventually fallback to a yaml file in its parent folder. try: conf = yaml.safe_load( io.open(os.path.join(self.path, "task.yaml"), "rt", encoding="utf-8")) except IOError as err: try: deprecated_path = os.path.join(self.path, "..", name + ".yaml") conf = yaml.safe_load( io.open(deprecated_path, "rt", encoding="utf-8")) logger.warning( "You're using a deprecated location for the " "task.yaml file. You're advised to move %s to " "%s.", deprecated_path, os.path.join(self.path, "task.yaml")) except IOError: # Since both task.yaml and the (deprecated) "../taskname.yaml" # are missing, we will only warn the user that task.yaml is # missing (to avoid encouraging the use of the deprecated one) raise err # Here we update the time of the last import touch(os.path.join(self.path, ".itime")) # If this file is not deleted, then the import failed touch(os.path.join(self.path, ".import_error")) args = {} load(conf, args, ["name", "nome_breve"]) load(conf, args, ["title", "nome"]) if name != args["name"]: logger.info( "The task name (%s) and the directory name (%s) are " "different. The former will be used.", args["name"], name) if args["name"] == args["title"]: logger.warning("Short name equals long name (title). " "Please check.") name = args["name"] logger.info("Loading parameters for task %s.", name) if get_statement: primary_language = load(conf, None, "primary_language") if primary_language is None: primary_language = 'it' paths = [ os.path.join(self.path, "statement", "statement.pdf"), os.path.join(self.path, "testo", "testo.pdf") ] for path in paths: if os.path.exists(path): digest = self.file_cacher.put_file_from_path( path, "Statement for task %s (lang: %s)" % (name, primary_language)) break else: logger.critical("Couldn't find any task statement, aborting.") sys.exit(1) args["statements"] = { primary_language: Statement(primary_language, digest) } args["primary_statements"] = [primary_language] args["submission_format"] = [SubmissionFormatElement("%s.%%l" % name)] if conf.get("score_mode", None) == SCORE_MODE_MAX: args["score_mode"] = SCORE_MODE_MAX elif conf.get("score_mode", None) == SCORE_MODE_MAX_TOKENED_LAST: args["score_mode"] = SCORE_MODE_MAX_TOKENED_LAST # Use the new token settings format if detected. if "token_mode" in conf: load(conf, args, "token_mode") load(conf, args, "token_max_number") load(conf, args, "token_min_interval", conv=make_timedelta) load(conf, args, "token_gen_initial") load(conf, args, "token_gen_number") load(conf, args, "token_gen_interval", conv=make_timedelta) load(conf, args, "token_gen_max") # Otherwise fall back on the old one. else: logger.warning( "task.yaml uses a deprecated format for token settings which " "will soon stop being supported, you're advised to update it.") # Determine the mode. if conf.get("token_initial", None) is None: args["token_mode"] = "disabled" elif conf.get("token_gen_number", 0) > 0 and \ conf.get("token_gen_time", 0) == 0: args["token_mode"] = "infinite" else: args["token_mode"] = "finite" # Set the old default values. args["token_gen_initial"] = 0 args["token_gen_number"] = 0 args["token_gen_interval"] = timedelta() # Copy the parameters to their new names. load(conf, args, "token_total", "token_max_number") load(conf, args, "token_min_interval", conv=make_timedelta) load(conf, args, "token_initial", "token_gen_initial") load(conf, args, "token_gen_number") load(conf, args, "token_gen_time", "token_gen_interval", conv=make_timedelta) load(conf, args, "token_max", "token_gen_max") # Remove some corner cases. if args["token_gen_initial"] is None: args["token_gen_initial"] = 0 if args["token_gen_interval"].total_seconds() == 0: args["token_gen_interval"] = timedelta(minutes=1) load(conf, args, "max_submission_number") load(conf, args, "max_user_test_number") load(conf, args, "min_submission_interval", conv=make_timedelta) load(conf, args, "min_user_test_interval", conv=make_timedelta) # Attachments args["attachments"] = dict() if os.path.exists(os.path.join(self.path, "att")): for filename in os.listdir(os.path.join(self.path, "att")): digest = self.file_cacher.put_file_from_path( os.path.join(self.path, "att", filename), "Attachment %s for task %s" % (filename, name)) args["attachments"][filename] = Attachment(filename, digest) task = Task(**args) args = {} args["task"] = task args["description"] = conf.get("version", "Default") args["autojudge"] = False load(conf, args, ["time_limit", "timeout"], conv=float) load(conf, args, ["memory_limit", "memlimit"]) # Builds the parameters that depend on the task type args["managers"] = [] infile_param = conf.get("infile", "input.txt") outfile_param = conf.get("outfile", "output.txt") # If there is sol/grader.%l for some language %l, then, # presuming that the task type is Batch, we retrieve graders # in the form sol/grader.%l graders = False for lang in LANGUAGES: if os.path.exists( os.path.join(self.path, "sol", "grader%s" % lang.source_extension)): graders = True break if graders: # Read grader for each language for lang in LANGUAGES: extension = lang.source_extension grader_filename = os.path.join(self.path, "sol", "grader%s" % extension) if os.path.exists(grader_filename): digest = self.file_cacher.put_file_from_path( grader_filename, "Grader for task %s and language %s" % (task.name, lang)) args["managers"] += [ Manager("grader%s" % extension, digest) ] else: logger.warning("Grader for language %s not found ", lang) # Read managers with other known file extensions for other_filename in os.listdir(os.path.join(self.path, "sol")): if any( other_filename.endswith(header) for header in HEADER_EXTS): digest = self.file_cacher.put_file_from_path( os.path.join(self.path, "sol", other_filename), "Manager %s for task %s" % (other_filename, task.name)) args["managers"] += [Manager(other_filename, digest)] compilation_param = "grader" else: compilation_param = "alone" # If there is check/checker (or equivalent), then, presuming # that the task type is Batch or OutputOnly, we retrieve the # comparator paths = [ os.path.join(self.path, "check", "checker"), os.path.join(self.path, "cor", "correttore") ] for path in paths: if os.path.exists(path): digest = self.file_cacher.put_file_from_path( path, "Manager for task %s" % task.name) args["managers"] += [Manager("checker", digest)] evaluation_param = "comparator" break else: evaluation_param = "diff" # Detect subtasks by checking GEN gen_filename = os.path.join(self.path, 'gen', 'GEN') try: with io.open(gen_filename, "rt", encoding="utf-8") as gen_file: subtasks = [] testcases = 0 points = None for line in gen_file: line = line.strip() splitted = line.split('#', 1) if len(splitted) == 1: # This line represents a testcase, otherwise # it's just a blank if splitted[0] != '': testcases += 1 else: testcase, comment = splitted testcase = testcase.strip() comment = comment.strip() testcase_detected = len(testcase) > 0 copy_testcase_detected = comment.startswith("COPY:") subtask_detected = comment.startswith('ST:') flags = [ testcase_detected, copy_testcase_detected, subtask_detected ] if len([x for x in flags if x]) > 1: raise Exception("No testcase and command in" " the same line allowed") # This line represents a testcase and contains a # comment, but the comment doesn't start a new # subtask if testcase_detected or copy_testcase_detected: testcases += 1 # This line starts a new subtask if subtask_detected: # Close the previous subtask if points is None: assert (testcases == 0) else: subtasks.append([points, testcases]) # Open the new one testcases = 0 points = int(comment[3:].strip()) # Close last subtask (if no subtasks were defined, just # fallback to Sum) if points is None: args["score_type"] = "Sum" total_value = float(conf.get("total_value", 100.0)) input_value = 0.0 n_input = testcases if n_input != 0: input_value = total_value / n_input args["score_type_parameters"] = input_value else: subtasks.append([points, testcases]) assert (100 == sum([int(st[0]) for st in subtasks])) n_input = sum([int(st[1]) for st in subtasks]) args["score_type"] = "GroupMin" args["score_type_parameters"] = subtasks if "n_input" in conf: assert int(conf['n_input']) == n_input # If gen/GEN doesn't exist, just fallback to Sum except IOError: args["score_type"] = "Sum" total_value = float(conf.get("total_value", 100.0)) input_value = 0.0 n_input = int(conf['n_input']) if n_input != 0: input_value = total_value / n_input args["score_type_parameters"] = input_value # If output_only is set, then the task type is OutputOnly if conf.get('output_only', False): args["task_type"] = "OutputOnly" args["time_limit"] = None args["memory_limit"] = None args["task_type_parameters"] = [evaluation_param] task.submission_format = [ SubmissionFormatElement("output_%03d.txt" % i) for i in range(n_input) ] # If there is check/manager (or equivalent), then the task # type is Communication else: paths = [ os.path.join(self.path, "check", "manager"), os.path.join(self.path, "cor", "manager") ] for path in paths: if os.path.exists(path): num_processes = load(conf, None, "num_processes") if num_processes is None: num_processes = 1 logger.info("Task type Communication") args["task_type"] = "Communication" args["task_type_parameters"] = [num_processes] digest = self.file_cacher.put_file_from_path( path, "Manager for task %s" % task.name) args["managers"] += [Manager("manager", digest)] for lang in LANGUAGES: stub_name = os.path.join( self.path, "sol", "stub%s" % lang.source_extension) if os.path.exists(stub_name): digest = self.file_cacher.put_file_from_path( stub_name, "Stub for task %s and language %s" % (task.name, lang.name)) args["managers"] += [ Manager("stub%s" % lang.source_extension, digest) ] else: logger.warning( "Stub for language %s not " "found.", lang.name) for other_filename in os.listdir( os.path.join(self.path, "sol")): if any( other_filename.endswith(header) for header in HEADER_EXTS): digest = self.file_cacher.put_file_from_path( os.path.join(self.path, "sol", other_filename), "Stub %s for task %s" % (other_filename, task.name)) args["managers"] += [ Manager(other_filename, digest) ] break # Otherwise, the task type is Batch else: args["task_type"] = "Batch" args["task_type_parameters"] = \ [compilation_param, [infile_param, outfile_param], evaluation_param] args["testcases"] = [] for i in range(n_input): input_digest = self.file_cacher.put_file_from_path( os.path.join(self.path, "input", "input%d.txt" % i), "Input %d for task %s" % (i, task.name)) output_digest = self.file_cacher.put_file_from_path( os.path.join(self.path, "output", "output%d.txt" % i), "Output %d for task %s" % (i, task.name)) args["testcases"] += [ Testcase("%03d" % i, False, input_digest, output_digest) ] if args["task_type"] == "OutputOnly": task.attachments.set( Attachment("input_%03d.txt" % i, input_digest)) public_testcases = load(conf, None, ["public_testcases", "risultati"], conv=lambda x: "" if x is None else x) if public_testcases == "all": for t in args["testcases"]: t.public = True elif len(public_testcases) > 0: for x in public_testcases.split(","): args["testcases"][int(x.strip())].public = True args["testcases"] = dict((tc.codename, tc) for tc in args["testcases"]) args["managers"] = dict((mg.filename, mg) for mg in args["managers"]) dataset = Dataset(**args) task.active_dataset = dataset # Import was successful os.remove(os.path.join(self.path, ".import_error")) logger.info("Task parameters loaded.") return task
def get_task(self, get_statement=True): # Name name = os.path.split(self.path)[1] # Check for required files if not self.__require_file("problem.json"): return None # Load JSON problem_json = json.loads( open(os.path.join(self.path, 'problem.json'), 'r').read()) problem = problem_json['problem'] # Load info args = {} args['name'] = name args['title'] = problem['name'] logger.info("Loading parameters for task %s.", name) # Load statement if get_statement: language = 'ru' path = os.path.join(self.path, '..', '..', 'statements', name + '.pdf') if os.path.exists(path): digest = self.file_cacher.put_file_from_path( path, "Statement for task %s (lang: %s)" % (name, language)) args['statements'] = [Statement(language, digest)] args['primary_statements'] = '["%s"]' % (language) else: logger.error('No statements found for problem "%s"' % (name)) # Load other properties args['submission_format'] = [SubmissionFormatElement('%s.%%l' % name)] self.__load_token_submission_info(os.path.join(self.path, '..', '..'), args) args['score_mode'] = SCORE_MODE_MAX_TOKENED_LAST contest_mode = self.__get_contest_mode( os.path.join(self.path, '..', '..')) if contest_mode != 'running' and contest_mode != 'final': logger.critical('Invalid contest mode') return None task = Task(**args) # Load dataset info args = {} args['task'] = task args['description'] = '' args['autojudge'] = False args['time_limit'] = problem['timeLimit'] args['memory_limit'] = problem['memoryLimit'] args['managers'] = [] # Add checker checker_src = os.path.join(self.path, 'checker.cpp') checker_exe = os.path.join(self.path, 'checker') if os.path.exists(checker_src): logger.info("Checker found, compiling") os.system("g++ -x c++ -O2 -static -DCMS -o %s %s" % (checker_exe, checker_src)) digest = self.file_cacher.put_file_from_path( checker_exe, "Manager for task %s" % name) args['managers'] += [Manager('checker', digest)] evaluation_param = 'comparator' else: logger.info("Checker not found, using diff") evaluation_param = 'diff' # Add testcases args['testcases'] = [] pretest_cnt = self.__add_tests('pretests', task, args, 0, True, contest_mode) self.__add_tests('tests', task, args, pretest_cnt, False, contest_mode) # Add input/output infile_param = problem['input'] outfile_param = problem['output'] args["task_type"] = "Batch" args["task_type_parameters"] = \ '["%s", ["%s", "%s"], "%s"]' % \ ("alone", infile_param, outfile_param, evaluation_param) if problem['scoreType'] == 'subtask': subtasks = problem['subtasks'] if contest_mode == 'running': subtasks = [[1, 1]] * pretest_cnt + subtasks else: subtasks = [[0, pretest_cnt]] + subtasks args['score_type'] = 'GroupMin' args['score_type_parameters'] = str(subtasks) elif problem['scoreType'] == 'byTest': args['score_type'] = 'Sum' args['score_type_parameters'] = str(problem['cost']) else: logger.critical('Unknown scoring type: %s' % problem['scoreType']) # Finalize dataset dataset = Dataset(**args) task.active_dataset = dataset # Import was successful logger.info("Task parameters loaded.") return task