Ejemplo n.º 1
0
    def get_task(self, get_statement):
        """
        See docstring in base_loader.
        """

        # Cannot import a task that with generation errors.
        if os.path.isfile(self.task_error_mark):
            raise Exception("Task has an error mark: %s" %
                            self.task_error_mark)
        if not os.path.isfile(self.task_ok_mark):
            raise Exception("Task does not have an okay mark: %s" %
                            self.task_ok_mark)

        # Mark this import as an error until we finish.
        touch(self.contest_error_mark)
        if os.path.isfile(self.contest_ok_mark):
            os.remove(self.contest_ok_mark)

        args = {}

        self.put_names(args)
        if get_statement:
            self.put_statements(args)
        self.put_score_mode(args)
        self.put_task_submission_format(args)
        self.put_attachments(args)

        task = Task(**args)
        task.active_dataset = self.create_dataset(task)

        # Success - mark this task as okay.
        touch(self.contest_ok_mark)
        os.remove(self.contest_error_mark)
        return task
Ejemplo n.º 2
0
    def get_task(self, get_statement=True):
        """See docstring in class Loader.

        """

        logger.info("Checking dos2unix presence")
        i = os.system('dos2unix -V 2>/dev/null')
        self.dos2unix_found = (i == 0)
        if not self.dos2unix_found:
            logger.error("dos2unix not found - tests will not be converted!")

        name = os.path.basename(self.path)
        logger.info("Loading parameters for task %s.", name)

        args = {}

        # Here we update the time of the last import.
        touch(os.path.join(self.path, ".itime"))
        # If this file is not deleted, then the import failed.
        touch(os.path.join(self.path, ".import_error"))

        # Get alphabetical task index for use in title.

        tree = ET.parse(os.path.join(self.path, "problem.xml"))
        root = tree.getroot()

        args["name"] = name
        args["title"] = str(root.find('names').find("name").attrib['value'])

        if get_statement:
            args["statements"] = {}
            args["primary_statements"] = []
            for language, lang in iteritems(LANGUAGE_MAP):
                path = os.path.join(self.path, 'statements', '.pdf', language,
                                    'problem.pdf')
                if os.path.exists(path):
                    digest = self.file_cacher.put_file_from_path(
                        path,
                        "Statement for task %s (lang: %s)" % (name, language))
                    args["statements"][lang] = Statement(lang, digest)
                    args["primary_statements"].append(lang)

        args["submission_format"] = ["%s.%%l" % name]

        # These options cannot be configured in the Polygon format.
        # Uncomment the following to set specific values for them.

        # args['max_submission_number'] = 100
        # args['max_user_test_number'] = 100
        # args['min_submission_interval'] = make_timedelta(60)
        # args['min_user_test_interval'] = make_timedelta(60)

        # args['max_user_test_number'] = 10
        # args['min_user_test_interval'] = make_timedelta(60)

        # args['token_mode'] = 'infinite'
        # args['token_max_number'] = 100
        # args['token_min_interval'] = make_timedelta(60)
        # args['token_gen_initial'] = 1
        # args['token_gen_number'] = 1
        # args['token_gen_interval'] = make_timedelta(1800)
        # args['token_gen_max'] = 2

        task_cms_conf_path = os.path.join(self.path, 'files', 'cms_conf.py')
        task_cms_conf = None
        if os.path.exists(task_cms_conf_path):
            logger.info("Found additional CMS options for task %s.", name)
            with io.open(task_cms_conf_path, 'rb') as f:
                task_cms_conf = imp.load_module('cms_conf', f,
                                                task_cms_conf_path,
                                                ('.py', 'r', imp.PY_SOURCE))
        if task_cms_conf is not None and hasattr(task_cms_conf, "general"):
            args.update(task_cms_conf.general)

        task = Task(**args)

        judging = root.find('judging')
        testset = None
        for testset in judging:
            testset_name = testset.attrib["name"]

            args = {}
            args["task"] = task
            args["description"] = str(testset_name)
            args["autojudge"] = False

            tl = float(testset.find('time-limit').text)
            ml = int(testset.find('memory-limit').text)
            args["time_limit"] = tl * 0.001
            args["memory_limit"] = ml // (1024 * 1024)

            args["managers"] = {}
            infile_param = judging.attrib['input-file']
            outfile_param = judging.attrib['output-file']

            # Checker can be in any of these two locations.
            checker_src = os.path.join(self.path, "files", "check.cpp")
            if not os.path.exists(checker_src):
                checker_src = os.path.join(self.path, "check.cpp")

            if os.path.exists(checker_src):
                logger.info("Checker found, compiling")
                checker_exe = os.path.join(os.path.dirname(checker_src),
                                           "checker")
                testlib_path = "/usr/local/include/cms"
                testlib_include = os.path.join(testlib_path, "testlib.h")
                if not config.installed:
                    testlib_path = os.path.join(os.path.dirname(__file__),
                                                "polygon")
                code = subprocess.call([
                    "g++", "-x", "c++", "-O2", "-static", "-DCMS", "-I",
                    testlib_path, "-include", testlib_include, "-o",
                    checker_exe, checker_src
                ])
                if code != 0:
                    logger.critical("Could not compile checker")
                    return None
                digest = self.file_cacher.put_file_from_path(
                    checker_exe, "Manager for task %s" % name)
                args["managers"]["checker"] = Manager("checker", digest)
                evaluation_param = "comparator"
            else:
                logger.info("Checker not found, using diff")
                evaluation_param = "diff"

            args["task_type"] = "Batch"
            args["task_type_parameters"] = \
                ["alone", [infile_param, outfile_param], evaluation_param]

            args["score_type"] = "Sum"
            total_value = 100.0
            input_value = 0.0

            testcases = int(testset.find('test-count').text)

            n_input = testcases
            if n_input != 0:
                input_value = total_value / n_input
            args["score_type_parameters"] = input_value

            args["testcases"] = {}

            for i in range(testcases):
                infile = os.path.join(self.path, testset_name,
                                      "%02d" % (i + 1))
                outfile = os.path.join(self.path, testset_name,
                                       "%02d.a" % (i + 1))
                if self.dos2unix_found:
                    os.system('dos2unix -q %s' % (infile, ))
                    os.system('dos2unix -q %s' % (outfile, ))
                input_digest = self.file_cacher.put_file_from_path(
                    infile, "Input %d for task %s" % (i, name))
                output_digest = self.file_cacher.put_file_from_path(
                    outfile, "Output %d for task %s" % (i, name))
                testcase = Testcase("%03d" % (i, ), False, input_digest,
                                    output_digest)
                testcase.public = True
                args["testcases"][testcase.codename] = testcase

            if task_cms_conf is not None and \
               hasattr(task_cms_conf, "datasets") and \
               testset_name in task_cms_conf.datasets:
                args.update(task_cms_conf.datasets[testset_name])

            dataset = Dataset(**args)
            if testset_name == "tests":
                task.active_dataset = dataset

        os.remove(os.path.join(self.path, ".import_error"))

        logger.info("Task parameters loaded.")
        return task
Ejemplo n.º 3
0
    def get_task(self, name):
        """See docstring in class Loader.

        """
        try:
            num = self.tasks_order[name]

        # Here we expose an undocumented behavior, so that cmsMake can
        # import a task even without the whole contest; this is not to
        # be relied upon in general
        except AttributeError:
            num = 1

        task_path = os.path.join(self.path, name)

        # We first look for the yaml file inside the task folder,
        # and eventually fallback to a yaml file in its parent folder.
        try:
            conf = yaml.safe_load(
                io.open(os.path.join(task_path, "task.yaml"),
                        "rt", encoding="utf-8"))
        except IOError:
            conf = yaml.safe_load(
                io.open(os.path.join(self.path, name + ".yaml"),
                        "rt", encoding="utf-8"))

        logger.info("Loading parameters for task %s." % name)

        # Here we update the time of the last import
        touch(os.path.join(task_path, ".itime"))
        # If this file is not deleted, then the import failed
        touch(os.path.join(task_path, ".import_error"))

        args = {}

        args["num"] = num
        load(conf, args, ["name", "nome_breve"])
        load(conf, args, ["title", "nome"])

        assert name == args["name"]

        if args["name"] == args["title"]:
            logger.warning("Short name equals long name (title). "
                           "Please check.")

        primary_language = load(conf, None, "primary_language")
        if primary_language is None:
            primary_language = 'it'
        paths = [os.path.join(task_path, "statement", "statement.pdf"),
                 os.path.join(task_path, "testo", "testo.pdf")]
        for path in paths:
            if os.path.exists(path):
                digest = self.file_cacher.put_file_from_path(
                    path,
                    "Statement for task %s (lang: %s)" % (name,
                                                          primary_language))
                break
        else:
            logger.critical("Couldn't find any task statement, aborting...")
            sys.exit(1)
        args["statements"] = [Statement(primary_language, digest)]

        args["primary_statements"] = '["%s"]' % (primary_language)

        args["attachments"] = []  # FIXME Use auxiliary

        args["submission_format"] = [
            SubmissionFormatElement("%s.%%l" % name)]

        # Use the new token settings format if detected.
        if "token_mode" in conf:
            load(conf, args, "token_mode")
            load(conf, args, "token_max_number")
            load(conf, args, "token_min_interval", conv=make_timedelta)
            load(conf, args, "token_gen_initial")
            load(conf, args, "token_gen_number")
            load(conf, args, "token_gen_interval", conv=make_timedelta)
            load(conf, args, "token_gen_max")
        # Otherwise fall back on the old one.
        else:
            logger.warning(
                "%s.yaml uses a deprecated format for token settings which "
                "will soon stop being supported, you're advised to update it.",
                name)
            # Determine the mode.
            if conf.get("token_initial", None) is None:
                args["token_mode"] = "disabled"
            elif conf.get("token_gen_number", 0) > 0 and \
                    conf.get("token_gen_time", 0) == 0:
                args["token_mode"] = "infinite"
            else:
                args["token_mode"] = "finite"
            # Set the old default values.
            args["token_gen_initial"] = 0
            args["token_gen_number"] = 0
            args["token_gen_interval"] = timedelta()
            # Copy the parameters to their new names.
            load(conf, args, "token_total", "token_max_number")
            load(conf, args, "token_min_interval", conv=make_timedelta)
            load(conf, args, "token_initial", "token_gen_initial")
            load(conf, args, "token_gen_number")
            load(conf, args, "token_gen_time", "token_gen_interval",
                 conv=make_timedelta)
            load(conf, args, "token_max", "token_gen_max")
            # Remove some corner cases.
            if args["token_gen_initial"] is None:
                args["token_gen_initial"] = 0
            if args["token_gen_interval"].total_seconds() == 0:
                args["token_gen_interval"] = timedelta(minutes=1)

        load(conf, args, "max_submission_number")
        load(conf, args, "max_user_test_number")
        load(conf, args, "min_submission_interval", conv=make_timedelta)
        load(conf, args, "min_user_test_interval", conv=make_timedelta)

        # Attachments
        args["attachments"] = []
        if os.path.exists(os.path.join(task_path, "att")):
            for filename in os.listdir(os.path.join(task_path, "att")):
                digest = self.file_cacher.put_file_from_path(
                    os.path.join(task_path, "att", filename),
                    "Attachment %s for task %s" % (filename, name))
                args["attachments"] += [Attachment(filename, digest)]

        task = Task(**args)

        args = {}
        args["task"] = task
        args["description"] = conf.get("version", "Default")
        args["autojudge"] = False

        load(conf, args, ["time_limit", "timeout"], conv=float)
        load(conf, args, ["memory_limit", "memlimit"])

        # Builds the parameters that depend on the task type
        args["managers"] = []
        infile_param = conf.get("infile", "input.txt")
        outfile_param = conf.get("outfile", "output.txt")

        # If there is sol/grader.%l for some language %l, then,
        # presuming that the task type is Batch, we retrieve graders
        # in the form sol/grader.%l
        graders = False
        for lang in LANGUAGES:
            if os.path.exists(os.path.join(
                    task_path, "sol", "grader.%s" % lang)):
                graders = True
                break
        if graders:
            # Read grader for each language
            for lang in LANGUAGES:
                grader_filename = os.path.join(
                    task_path, "sol", "grader.%s" % lang)
                if os.path.exists(grader_filename):
                    digest = self.file_cacher.put_file_from_path(
                        grader_filename,
                        "Grader for task %s and language %s" % (name, lang))
                    args["managers"] += [
                        Manager("grader.%s" % lang, digest)]
                else:
                    logger.warning("Grader for language %s not found " % lang)
            # Read managers with other known file extensions
            for other_filename in os.listdir(os.path.join(task_path, "sol")):
                if other_filename.endswith('.h') or \
                        other_filename.endswith('lib.pas'):
                    digest = self.file_cacher.put_file_from_path(
                        os.path.join(task_path, "sol", other_filename),
                        "Manager %s for task %s" % (other_filename, name))
                    args["managers"] += [
                        Manager(other_filename, digest)]
            compilation_param = "grader"
        else:
            compilation_param = "alone"

        # If there is check/checker (or equivalent), then, presuming
        # that the task type is Batch or OutputOnly, we retrieve the
        # comparator
        paths = [os.path.join(task_path, "check", "checker"),
                 os.path.join(task_path, "cor", "correttore")]
        for path in paths:
            if os.path.exists(path):
                digest = self.file_cacher.put_file_from_path(
                    path,
                    "Manager for task %s" % name)
                args["managers"] += [
                    Manager("checker", digest)]
                evaluation_param = "comparator"
                break
        else:
            evaluation_param = "diff"

        # Detect subtasks by checking GEN
        gen_filename = os.path.join(task_path, 'gen', 'GEN')
        try:
            with io.open(gen_filename, "rt", encoding="utf-8") as gen_file:
                subtasks = []
                testcases = 0
                points = None
                for line in gen_file:
                    line = line.strip()
                    splitted = line.split('#', 1)

                    if len(splitted) == 1:
                        # This line represents a testcase, otherwise it's
                        # just a blank
                        if splitted[0] != '':
                            testcases += 1

                    else:
                        testcase, comment = splitted
                        testcase_detected = False
                        subtask_detected = False
                        if testcase.strip() != '':
                            testcase_detected = True
                        comment = comment.strip()
                        if comment.startswith('ST:'):
                            subtask_detected = True

                        if testcase_detected and subtask_detected:
                            raise Exception("No testcase and subtask in the"
                                            " same line allowed")

                        # This line represents a testcase and contains a
                        # comment, but the comment doesn't start a new
                        # subtask
                        if testcase_detected:
                            testcases += 1

                        # This line starts a new subtask
                        if subtask_detected:
                            # Close the previous subtask
                            if points is None:
                                assert(testcases == 0)
                            else:
                                subtasks.append([points, testcases])
                            # Open the new one
                            testcases = 0
                            points = int(comment[3:].strip())

                # Close last subtask (if no subtasks were defined, just
                # fallback to Sum)
                if points is None:
                    args["score_type"] = "Sum"
                    total_value = float(conf.get("total_value", 100.0))
                    input_value = 0.0
                    n_input = testcases
                    if n_input != 0:
                        input_value = total_value / n_input
                    args["score_type_parameters"] = "%s" % input_value
                else:
                    subtasks.append([points, testcases])
                    assert(100 == sum([int(st[0]) for st in subtasks]))
                    n_input = sum([int(st[1]) for st in subtasks])
                    args["score_type"] = "GroupMin"
                    args["score_type_parameters"] = "%s" % subtasks

                if "n_input" in conf:
                    assert int(conf['n_input']) == n_input

        # If gen/GEN doesn't exist, just fallback to Sum
        except IOError:
            args["score_type"] = "Sum"
            total_value = float(conf.get("total_value", 100.0))
            input_value = 0.0
            n_input = int(conf['n_input'])
            if n_input != 0:
                input_value = total_value / n_input
            args["score_type_parameters"] = "%s" % input_value

        # If output_only is set, then the task type is OutputOnly
        if conf.get('output_only', False):
            args["task_type"] = "OutputOnly"
            args["time_limit"] = None
            args["memory_limit"] = None
            args["task_type_parameters"] = '["%s"]' % evaluation_param
            task.submission_format = [
                SubmissionFormatElement("output_%03d.txt" % i)
                for i in xrange(n_input)]

        # If there is check/manager (or equivalent), then the task
        # type is Communication
        else:
            paths = [os.path.join(task_path, "check", "manager"),
                     os.path.join(task_path, "cor", "manager")]
            for path in paths:
                if os.path.exists(path):
                    args["task_type"] = "Communication"
                    args["task_type_parameters"] = '[]'
                    digest = self.file_cacher.put_file_from_path(
                        path,
                        "Manager for task %s" % name)
                    args["managers"] += [
                        Manager("manager", digest)]
                    for lang in LANGUAGES:
                        stub_name = os.path.join(
                            task_path, "sol", "stub.%s" % lang)
                        if os.path.exists(stub_name):
                            digest = self.file_cacher.put_file_from_path(
                                stub_name,
                                "Stub for task %s and language %s" % (name,
                                                                      lang))
                            args["managers"] += [
                                Manager("stub.%s" % lang, digest)]
                        else:
                            logger.warning("Stub for language %s not "
                                           "found." % lang)
                    break

            # Otherwise, the task type is Batch
            else:
                args["task_type"] = "Batch"
                args["task_type_parameters"] = \
                    '["%s", ["%s", "%s"], "%s"]' % \
                    (compilation_param, infile_param, outfile_param,
                     evaluation_param)

        args["testcases"] = []
        for i in xrange(n_input):
            input_digest = self.file_cacher.put_file_from_path(
                os.path.join(task_path, "input", "input%d.txt" % i),
                "Input %d for task %s" % (i, name))
            output_digest = self.file_cacher.put_file_from_path(
                os.path.join(task_path, "output", "output%d.txt" % i),
                "Output %d for task %s" % (i, name))
            args["testcases"] += [
                Testcase("%03d" % i, False, input_digest, output_digest)]
            if args["task_type"] == "OutputOnly":
                task.attachments += [
                    Attachment("input_%03d.txt" % i, input_digest)]
        public_testcases = load(conf, None, ["public_testcases", "risultati"],
                                conv=lambda x: "" if x is None else x)
        if public_testcases != "":
            for x in public_testcases.split(","):
                args["testcases"][int(x.strip())].public = True

        dataset = Dataset(**args)
        task.active_dataset = dataset

        # Import was successful
        os.remove(os.path.join(task_path, ".import_error"))

        logger.info("Task parameters loaded.")

        return task
Ejemplo n.º 4
0
Archivo: cps.py Proyecto: ioi-2017/cms
    def get_task(self, get_statement=True):
        """See docstring in class Loader.

        """

        json_src = os.path.join(self.path, 'problem.json')
        if not os.path.exists(json_src):
            logger.error('No task found.')
        with open(json_src) as json_file:
            data = json.load(json_file)

        name = data['code']
        logger.info("Loading parameters for task %s.", name)

        args = {}

        # Here we update the time of the last import.
        touch(os.path.join(self.path, ".itime"))
        # If this file is not deleted, then the import failed.
        touch(os.path.join(self.path, ".import_error"))

        args["name"] = name
        args["title"] = data['name']

        # Statements
        if get_statement:
            statements_dir = os.path.join(self.path, 'statements')
            if os.path.exists(statements_dir):
                statements = [
                    filename for filename in os.listdir(statements_dir)
                    if filename[-4:] == ".pdf"
                ]
                if len(statements) > 0:
                    args['statements'] = dict()
                    logger.info('Statements found')
                for statement in statements:
                    language = statement[:-4]
                    if language == "en_US":
                        args["primary_statements"] = '["en_US"]'
                    digest = self.file_cacher.put_file_from_path(
                        os.path.join(statements_dir, statement),
                        "Statement for task %s (lang: %s)" % (name, language))
                    args['statements'][language] = Statement(language, digest)

        # Attachments
        args["attachments"] = dict()
        attachments_dir = os.path.join(self.path, 'attachments')
        if os.path.exists(attachments_dir):
            logger.info("Attachments found")
            for filename in os.listdir(attachments_dir):
                digest = self.file_cacher.put_file_from_path(
                    os.path.join(attachments_dir, filename),
                    "Attachment %s for task %s" % (filename, name))
                args["attachments"][filename] = Attachment(filename, digest)

        data["task_type"] = data["task_type"][0].upper(
        ) + data["task_type"][1:]

        # Setting the submission format
        # Obtaining testcases' codename
        testcases_dir = os.path.join(self.path, 'tests')
        if not os.path.exists(testcases_dir):
            logger.warning('Testcase folder was not found')
            testcase_codenames = []
        else:
            testcase_codenames = sorted([
                filename[:-3] for filename in os.listdir(testcases_dir)
                if filename[-3:] == '.in'
            ])
        if data["task_type"] == 'OutputOnly':
            args["submission_format"] = list()
            for codename in testcase_codenames:
                args["submission_format"].append(
                    SubmissionFormatElement("%s.out" % codename))
        elif data["task_type"] == 'Notice':
            args["submission_format"] = list()
        else:
            args["submission_format"] = [
                SubmissionFormatElement("%s.%%l" % name)
            ]

        # These options cannot be configured in the CPS format.
        # Uncomment the following to set specific values for them.

        # args['max_submission_number'] = 100
        # args['max_user_test_number'] = 100
        # args['min_submission_interval'] = make_timedelta(60)
        # args['min_user_test_interval'] = make_timedelta(60)

        # args['max_user_test_number'] = 10
        # args['min_user_test_interval'] = make_timedelta(60)

        # args['token_mode'] = 'infinite'
        # args['token_max_number'] = 100
        # args['token_min_interval'] = make_timedelta(60)
        # args['token_gen_initial'] = 1
        # args['token_gen_number'] = 1
        # args['token_gen_interval'] = make_timedelta(1800)
        # args['token_gen_max'] = 2
        if "score_precision" in data:
            args['score_precision'] = int(data["score_precision"])
        else:
            args['score_precision'] = 2
        args['max_submission_number'] = 50
        args['max_user_test_number'] = 50
        if data["task_type"] == 'OutputOnly':
            args['max_submission_number'] = 100
            args['max_user_test_number'] = 100

        args['min_submission_interval'] = make_timedelta(60)
        args['min_user_test_interval'] = make_timedelta(60)

        task = Task(**args)

        args = dict()

        args["task"] = task
        args["description"] = "Default"
        args["autojudge"] = True

        if data['task_type'] != 'OutputOnly' and data['task_type'] != 'Notice':
            args["time_limit"] = float(data['time_limit'])
            args["memory_limit"] = int(data['memory_limit'])

        args["managers"] = {}

        # Checker
        checker_dir = os.path.join(self.path, "checker")
        checker_src = os.path.join(checker_dir, "checker.cpp")

        if os.path.exists(checker_src):
            logger.info("Checker found, compiling")
            checker_exe = os.path.join(checker_dir, "checker")
            os.system("g++ -x c++ -std=gnu++14 -O2 -static -o %s %s" %
                      (checker_exe, checker_src))
            digest = self.file_cacher.put_file_from_path(
                checker_exe, "Manager for task %s" % name)
            args["managers"]['checker'] = Manager("checker", digest)
            evaluation_param = "comparator"
        else:
            logger.info("Checker not found, using diff if neccessary")
            evaluation_param = "diff"

        args["task_type"] = data['task_type']
        if data['task_type'] != 'Notice':
            args["task_type"] += '2017'
        args["task_type_parameters"] = \
            self._get_task_type_parameters(data, data['task_type'], evaluation_param)

        # Graders
        graders_dir = os.path.join(self.path, 'graders')

        if data['task_type'] == 'TwoSteps':
            pas_manager = name + 'lib.pas'
            pas_manager_path = os.path.join(graders_dir, pas_manager)
            if not os.path.exists(pas_manager_path):
                digest = self.file_cacher.put_file_content(
                    ''.encode('utf-8'), 'Pascal manager for task %s' % name)
                args["managers"][pas_manager] = Manager(pas_manager, digest)

        if not os.path.exists(graders_dir):
            logger.warning('Grader folder was not found')
            graders_list = []
        else:
            graders_list = \
                [filename for filename in os.listdir(graders_dir) if filename != 'manager.cpp']
        for grader_name in graders_list:
            grader_src = os.path.join(graders_dir, grader_name)
            digest = self.file_cacher.put_file_from_path(
                grader_src, "Manager for task %s" % name)
            args["managers"][grader_name] = Manager(grader_name, digest)

        # Manager
        manager_src = os.path.join(graders_dir, 'manager.cpp')

        if os.path.exists(manager_src):
            logger.info("Manager found, compiling")
            manager_exe = os.path.join(graders_dir, "manager")
            os.system("cat %s | \
                            g++ -x c++ -O2 -static -o %s -" %
                      (manager_src, manager_exe))
            digest = self.file_cacher.put_file_from_path(
                manager_exe, "Manager for task %s" % name)
            args["managers"]["manager"] = Manager("manager", digest)

        # Testcases
        args["testcases"] = {}

        for codename in testcase_codenames:
            infile = os.path.join(testcases_dir, "%s.in" % codename)
            outfile = os.path.join(testcases_dir, "%s.out" % codename)
            if not os.path.exists(outfile):
                logger.critical(
                    'Could not file the output file for testcase %s' %
                    codename)
                logger.critical('Aborting...')
                return

            input_digest = self.file_cacher.put_file_from_path(
                infile, "Input %s for task %s" % (codename, name))
            output_digest = self.file_cacher.put_file_from_path(
                outfile, "Output %s for task %s" % (codename, name))
            testcase = Testcase(codename, True, input_digest, output_digest)
            args["testcases"][codename] = testcase

        # Score Type
        subtasks_dir = os.path.join(self.path, 'subtasks')
        if not os.path.exists(subtasks_dir):
            logger.warning('Subtask folder was not found')
            subtasks = []
        else:
            subtasks = sorted(os.listdir(subtasks_dir))

        if len(subtasks) == 0:
            number_tests = max(len(testcase_codenames), 1)
            args["score_type"] = "Sum"
            args["score_type_parameters"] = str(100 / number_tests)
        else:
            args["score_type"] = "GroupMinWithMaxScore"
            parsed_data = [
                100,
            ]
            subtask_no = -1
            add_optional_name = False
            for subtask in subtasks:
                subtask_no += 1
                with open(os.path.join(subtasks_dir, subtask)) as subtask_json:
                    subtask_data = json.load(subtask_json)
                    score = int(subtask_data["score"])
                    testcases = "|".join(
                        re.escape(testcase)
                        for testcase in subtask_data["testcases"])
                    optional_name = "Subtask %d" % subtask_no
                    if subtask_no == 0 and score == 0:
                        add_optional_name = True
                        optional_name = "Samples"
                    if add_optional_name:
                        parsed_data.append([score, testcases, optional_name])
                    else:
                        parsed_data.append([score, testcases])
            args["score_type_parameters"] = json.dumps(parsed_data)
        args["description"] = datetime.utcnow()\
            .strftime("%Y-%m-%d %H:%M:%S %Z%z")

        dataset = Dataset(**args)
        task.active_dataset = dataset

        os.remove(os.path.join(self.path, ".import_error"))

        logger.info("Task parameters loaded.")

        return task
Ejemplo n.º 5
0
    def get_task(self, name):
        """See docstring in class Loader.

        """
        try:
            num = self.tasks_order[name]

        # Here we expose an undocumented behavior, so that cmsMake can
        # import a task even without the whole contest; this is not to
        # be relied upon in general.
        except AttributeError:
            num = 1

        task_path = os.path.join(self.path, "problems", name)

        logger.info("Loading parameters for task %s.", name)

        args = {}

        # Here we update the time of the last import.
        touch(os.path.join(task_path, ".itime"))
        # If this file is not deleted, then the import failed.
        touch(os.path.join(task_path, ".import_error"))

        args["num"] = num

        # Get alphabetical task index for use in title.

        index = None
        contest_tree = ET.parse(os.path.join(self.path, "contest.xml"))
        contest_root = contest_tree.getroot()
        for problem in contest_root.find('problems'):
            if os.path.basename(problem.attrib['url']) == name:
                index = problem.attrib['index']

        tree = ET.parse(os.path.join(task_path, "problem.xml"))
        root = tree.getroot()

        args["name"] = name
        if index is not None:
            args["title"] = index.upper() + '. '
        else:
            args["title"] = ''
        args["title"] += root.find('names') \
            .find("name[@language='%s']" % self.primary_language) \
            .attrib['value']

        args["statements"] = []
        args["primary_statements"] = []
        for language in self.languages:
            path = os.path.join(task_path, 'statements', '.pdf', language,
                                'problem.pdf')
            if os.path.exists(path):
                lang = LANGUAGE_MAP[language]
                digest = self.file_cacher.put_file_from_path(
                    path,
                    "Statement for task %s (lang: %s)" % (name, language))
                args["statements"].append(Statement(lang, digest))
                args["primary_statements"].append(lang)
        args["primary_statements"] = '["%s"]' % \
            '","'.join(args["primary_statements"])
        args["submission_format"] = [SubmissionFormatElement("%s.%%l" % name)]

        # These options cannot be configured in the Polygon format.
        # Uncomment the following to set specific values for them.

        # args['max_submission_number'] = 100
        # args['max_user_test_number'] = 100
        # args['min_submission_interval'] = make_timedelta(60)
        # args['min_user_test_interval'] = make_timedelta(60)

        # args['max_user_test_number'] = 10
        # args['min_user_test_interval'] = make_timedelta(60)

        # args['token_mode'] = 'infinite'
        # args['token_max_number'] = 100
        # args['token_min_interval'] = make_timedelta(60)
        # args['token_gen_initial'] = 1
        # args['token_gen_number'] = 1
        # args['token_gen_interval'] = make_timedelta(1800)
        # args['token_gen_max'] = 2

        task_cms_conf_path = os.path.join(task_path, 'files')
        task_cms_conf = None
        if os.path.exists(os.path.join(task_cms_conf_path, 'cms_conf.py')):
            sys.path.append(task_cms_conf_path)
            logger.info("Found additional CMS options for task %s.", name)
            task_cms_conf = __import__('cms_conf')
            # TODO: probably should find more clever way to get rid of caching
            task_cms_conf = reload(task_cms_conf)
            sys.path.pop()
        if task_cms_conf is not None and hasattr(task_cms_conf, "general"):
            args.update(task_cms_conf.general)

        task = Task(**args)

        judging = root.find('judging')
        testset = None
        for testset in judging:
            testset_name = testset.attrib["name"]

            args = {}
            args["task"] = task
            args["description"] = testset_name
            args["autojudge"] = False

            tl = float(testset.find('time-limit').text)
            ml = float(testset.find('memory-limit').text)
            args["time_limit"] = tl * 0.001
            args["memory_limit"] = int(ml / (1024 * 1024))

            args["managers"] = []
            infile_param = judging.attrib['input-file']
            outfile_param = judging.attrib['output-file']

            checker_src = os.path.join(task_path, "files", "check.cpp")
            if os.path.exists(checker_src):
                logger.info("Checker found, compiling")
                checker_exe = os.path.join(task_path, "files", "checker")
                testlib_path = "/usr/local/include/cms/testlib.h"
                if not config.installed:
                    testlib_path = os.path.join(os.path.dirname(__file__),
                                                "polygon", "testlib.h")
                os.system("cat %s | \
                    sed 's$testlib.h$%s$' | \
                    g++ -x c++ -O2 -static -o %s -" %
                          (checker_src, testlib_path, checker_exe))
                digest = self.file_cacher.put_file_from_path(
                    checker_exe, "Manager for task %s" % name)
                args["managers"] += [Manager("checker", digest)]
                evaluation_param = "comparator"
            else:
                logger.info("Checker not found, using diff")
                evaluation_param = "diff"

            args["task_type"] = "Batch"
            args["task_type_parameters"] = \
                '["%s", ["%s", "%s"], "%s"]' % \
                ("alone", infile_param, outfile_param, evaluation_param)

            args["score_type"] = "Sum"
            total_value = 100.0
            input_value = 0.0

            testcases = int(testset.find('test-count').text)

            n_input = testcases
            if n_input != 0:
                input_value = total_value / n_input
            args["score_type_parameters"] = str(input_value)

            args["testcases"] = []

            for i in xrange(testcases):
                infile = os.path.join(task_path, testset_name,
                                      "%02d" % (i + 1))
                outfile = os.path.join(task_path, testset_name,
                                       "%02d.a" % (i + 1))
                if self.dos2unix_found:
                    os.system('dos2unix -q %s' % (infile, ))
                    os.system('dos2unix -q %s' % (outfile, ))
                input_digest = self.file_cacher.put_file_from_path(
                    infile, "Input %d for task %s" % (i, name))
                output_digest = self.file_cacher.put_file_from_path(
                    outfile, "Output %d for task %s" % (i, name))
                testcase = Testcase("%03d" % (i, ), False, input_digest,
                                    output_digest)
                testcase.public = True
                args["testcases"] += [testcase]

            if task_cms_conf is not None and \
               hasattr(task_cms_conf, "datasets") and \
               testset_name in task_cms_conf.datasets:
                args.update(task_cms_conf.datasets[testset_name])

            dataset = Dataset(**args)
            if testset_name == "tests":
                task.active_dataset = dataset

        os.remove(os.path.join(task_path, ".import_error"))

        logger.info("Task parameters loaded.")
        return task
Ejemplo n.º 6
0
    def get_task(self, get_statement=True):
        """See docstring in class TaskLoader."""
        name = os.path.split(self.path)[1]

        if (not os.path.exists(os.path.join(self.path, "task.yaml"))) and \
           (not os.path.exists(os.path.join(self.path, "..", name + ".yaml"))):
            logger.critical("File missing: \"task.yaml\"")
            return None

        # We first look for the yaml file inside the task folder,
        # and eventually fallback to a yaml file in its parent folder.
        try:
            conf = load_yaml_from_path(os.path.join(self.path, "task.yaml"))
        except OSError as err:
            try:
                deprecated_path = os.path.join(self.path, "..", name + ".yaml")
                conf = load_yaml_from_path(deprecated_path)

                logger.warning("You're using a deprecated location for the "
                               "task.yaml file. You're advised to move %s to "
                               "%s.", deprecated_path,
                               os.path.join(self.path, "task.yaml"))
            except OSError:
                # Since both task.yaml and the (deprecated) "../taskname.yaml"
                # are missing, we will only warn the user that task.yaml is
                # missing (to avoid encouraging the use of the deprecated one)
                raise err

        # Here we update the time of the last import
        touch(os.path.join(self.path, ".itime"))
        # If this file is not deleted, then the import failed
        touch(os.path.join(self.path, ".import_error"))

        args = {}

        load(conf, args, ["name", "nome_breve"])
        load(conf, args, ["title", "nome"])

        if name != args["name"]:
            logger.info("The task name (%s) and the directory name (%s) are "
                        "different. The former will be used.", args["name"],
                        name)

        if args["name"] == args["title"]:
            logger.warning("Short name equals long name (title). "
                           "Please check.")

        name = args["name"]

        logger.info("Loading parameters for task %s.", name)

        if get_statement:
            primary_language = load(conf, None, "primary_language")
            if primary_language is None:
                primary_language = 'it'
            paths = [os.path.join(self.path, "statement", "statement.pdf"),
                     os.path.join(self.path, "testo", "testo.pdf")]
            for path in paths:
                if os.path.exists(path):
                    digest = self.file_cacher.put_file_from_path(
                        path,
                        "Statement for task %s (lang: %s)" %
                        (name, primary_language))
                    break
            else:
                logger.critical("Couldn't find any task statement, aborting.")
                sys.exit(1)
            args["statements"] = {
                primary_language: Statement(primary_language, digest)
            }

            args["primary_statements"] = [primary_language]

        args["submission_format"] = ["%s.%%l" % name]

        # Import the feedback level when explicitly set to full
        # (default behaviour is restricted)
        if conf.get("feedback_level", None) == FEEDBACK_LEVEL_FULL:
            args["feedback_level"] = FEEDBACK_LEVEL_FULL
        elif conf.get("feedback_level", None) == FEEDBACK_LEVEL_RESTRICTED:
            args["feedback_level"] = FEEDBACK_LEVEL_RESTRICTED

        if conf.get("score_mode", None) == SCORE_MODE_MAX:
            args["score_mode"] = SCORE_MODE_MAX
        elif conf.get("score_mode", None) == SCORE_MODE_MAX_SUBTASK:
            args["score_mode"] = SCORE_MODE_MAX_SUBTASK
        elif conf.get("score_mode", None) == SCORE_MODE_MAX_TOKENED_LAST:
            args["score_mode"] = SCORE_MODE_MAX_TOKENED_LAST

        # Use the new token settings format if detected.
        if "token_mode" in conf:
            load(conf, args, "token_mode")
            load(conf, args, "token_max_number")
            load(conf, args, "token_min_interval", conv=make_timedelta)
            load(conf, args, "token_gen_initial")
            load(conf, args, "token_gen_number")
            load(conf, args, "token_gen_interval", conv=make_timedelta)
            load(conf, args, "token_gen_max")
        # Otherwise fall back on the old one.
        else:
            logger.warning(
                "task.yaml uses a deprecated format for token settings which "
                "will soon stop being supported, you're advised to update it.")
            # Determine the mode.
            if conf.get("token_initial", None) is None:
                args["token_mode"] = TOKEN_MODE_DISABLED
            elif conf.get("token_gen_number", 0) > 0 and \
                    conf.get("token_gen_time", 0) == 0:
                args["token_mode"] = TOKEN_MODE_INFINITE
            else:
                args["token_mode"] = TOKEN_MODE_FINITE
            # Set the old default values.
            args["token_gen_initial"] = 0
            args["token_gen_number"] = 0
            args["token_gen_interval"] = timedelta()
            # Copy the parameters to their new names.
            load(conf, args, "token_total", "token_max_number")
            load(conf, args, "token_min_interval", conv=make_timedelta)
            load(conf, args, "token_initial", "token_gen_initial")
            load(conf, args, "token_gen_number")
            load(conf, args, "token_gen_time", "token_gen_interval",
                 conv=make_timedelta)
            load(conf, args, "token_max", "token_gen_max")
            # Remove some corner cases.
            if args["token_gen_initial"] is None:
                args["token_gen_initial"] = 0
            if args["token_gen_interval"].total_seconds() == 0:
                args["token_gen_interval"] = timedelta(minutes=1)

        load(conf, args, "max_submission_number")
        load(conf, args, "max_user_test_number")
        load(conf, args, "min_submission_interval", conv=make_timedelta)
        load(conf, args, "min_user_test_interval", conv=make_timedelta)

        # Attachments
        args["attachments"] = dict()
        if os.path.exists(os.path.join(self.path, "att")):
            for filename in os.listdir(os.path.join(self.path, "att")):
                digest = self.file_cacher.put_file_from_path(
                    os.path.join(self.path, "att", filename),
                    "Attachment %s for task %s" % (filename, name))
                args["attachments"][filename] = Attachment(filename, digest)

        task = Task(**args)

        args = {}
        args["task"] = task
        args["description"] = conf.get("version", "Default")
        args["autojudge"] = False

        load(conf, args, ["time_limit", "timeout"], conv=float)
        # The Italian YAML format specifies memory limits in MiB.
        load(conf, args, ["memory_limit", "memlimit"],
             conv=lambda mb: mb * 1024 * 1024)

        # Builds the parameters that depend on the task type
        args["managers"] = []
        infile_param = conf.get("infile", "input.txt")
        outfile_param = conf.get("outfile", "output.txt")

        # If there is sol/grader.%l for some language %l, then,
        # presuming that the task type is Batch, we retrieve graders
        # in the form sol/grader.%l
        graders = False
        for lang in LANGUAGES:
            if os.path.exists(os.path.join(
                    self.path, "sol", "grader%s" % lang.source_extension)):
                graders = True
                break
        if graders:
            # Read grader for each language
            for lang in LANGUAGES:
                extension = lang.source_extension
                grader_filename = os.path.join(
                    self.path, "sol", "grader%s" % extension)
                if os.path.exists(grader_filename):
                    digest = self.file_cacher.put_file_from_path(
                        grader_filename,
                        "Grader for task %s and language %s" %
                        (task.name, lang))
                    args["managers"] += [
                        Manager("grader%s" % extension, digest)]
                else:
                    logger.warning("Grader for language %s not found ", lang)
            # Read managers with other known file extensions
            for other_filename in os.listdir(os.path.join(self.path, "sol")):
                if any(other_filename.endswith(header)
                       for header in HEADER_EXTS):
                    digest = self.file_cacher.put_file_from_path(
                        os.path.join(self.path, "sol", other_filename),
                        "Manager %s for task %s" % (other_filename, task.name))
                    args["managers"] += [
                        Manager(other_filename, digest)]
            compilation_param = "grader"
        else:
            compilation_param = "alone"

        # If there is check/checker (or equivalent), then, presuming
        # that the task type is Batch or OutputOnly, we retrieve the
        # comparator
        paths = [os.path.join(self.path, "check", "checker"),
                 os.path.join(self.path, "cor", "correttore")]
        for path in paths:
            if os.path.exists(path):
                digest = self.file_cacher.put_file_from_path(
                    path,
                    "Manager for task %s" % task.name)
                args["managers"] += [
                    Manager("checker", digest)]
                evaluation_param = "comparator"
                break
        else:
            evaluation_param = "diff"

        # Detect subtasks by checking GEN
        gen_filename = os.path.join(self.path, 'gen', 'GEN')
        try:
            with open(gen_filename, "rt", encoding="utf-8") as gen_file:
                subtasks = []
                testcases = 0
                points = None
                for line in gen_file:
                    line = line.strip()
                    splitted = line.split('#', 1)

                    if len(splitted) == 1:
                        # This line represents a testcase, otherwise
                        # it's just a blank
                        if splitted[0] != '':
                            testcases += 1

                    else:
                        testcase, comment = splitted
                        testcase = testcase.strip()
                        comment = comment.strip()
                        testcase_detected = len(testcase) > 0
                        copy_testcase_detected = comment.startswith("COPY:")
                        subtask_detected = comment.startswith('ST:')

                        flags = [testcase_detected,
                                 copy_testcase_detected,
                                 subtask_detected]
                        if len([x for x in flags if x]) > 1:
                            raise Exception("No testcase and command in"
                                            " the same line allowed")

                        # This line represents a testcase and contains a
                        # comment, but the comment doesn't start a new
                        # subtask
                        if testcase_detected or copy_testcase_detected:
                            testcases += 1

                        # This line starts a new subtask
                        if subtask_detected:
                            # Close the previous subtask
                            if points is None:
                                assert(testcases == 0)
                            else:
                                subtasks.append([points, testcases])
                            # Open the new one
                            testcases = 0
                            points = int(comment[3:].strip())

                # Close last subtask (if no subtasks were defined, just
                # fallback to Sum)
                if points is None:
                    args["score_type"] = "Sum"
                    total_value = float(conf.get("total_value", 100.0))
                    input_value = 0.0
                    n_input = testcases
                    if n_input != 0:
                        input_value = total_value / n_input
                    args["score_type_parameters"] = input_value
                else:
                    subtasks.append([points, testcases])
                    assert(100 == sum([int(st[0]) for st in subtasks]))
                    n_input = sum([int(st[1]) for st in subtasks])
                    args["score_type"] = "GroupMin"
                    args["score_type_parameters"] = subtasks

                if "n_input" in conf:
                    assert int(conf['n_input']) == n_input

        # If gen/GEN doesn't exist, just fallback to Sum
        except OSError:
            args["score_type"] = "Sum"
            total_value = float(conf.get("total_value", 100.0))
            input_value = 0.0
            n_input = int(conf['n_input'])
            if n_input != 0:
                input_value = total_value / n_input
            args["score_type_parameters"] = input_value

        # Override score_type if explicitly specified
        if "score_type" in conf and "score_type_parameters" in conf:
            logger.info("Overriding 'score_type' and 'score_type_parameters' "
                        "as per task.yaml")
            load(conf, args, "score_type")
            load(conf, args, "score_type_parameters")
        elif "score_type" in conf or "score_type_parameters" in conf:
            logger.warning("To override score type data, task.yaml must "
                           "specify both 'score_type' and "
                           "'score_type_parameters'.")

        # If output_only is set, then the task type is OutputOnly
        if conf.get('output_only', False):
            args["task_type"] = "OutputOnly"
            args["time_limit"] = None
            args["memory_limit"] = None
            args["task_type_parameters"] = [evaluation_param]
            task.submission_format = \
                ["output_%03d.txt" % i for i in range(n_input)]

        # If there is check/manager (or equivalent), then the task
        # type is Communication
        else:
            paths = [os.path.join(self.path, "check", "manager"),
                     os.path.join(self.path, "cor", "manager")]
            for path in paths:
                if os.path.exists(path):
                    num_processes = load(conf, None, "num_processes")
                    if num_processes is None:
                        num_processes = 1
                    logger.info("Task type Communication")
                    args["task_type"] = "Communication"
                    args["task_type_parameters"] = \
                        [num_processes, "stub", "fifo_io"]
                    digest = self.file_cacher.put_file_from_path(
                        path,
                        "Manager for task %s" % task.name)
                    args["managers"] += [
                        Manager("manager", digest)]
                    for lang in LANGUAGES:
                        stub_name = os.path.join(
                            self.path, "sol", "stub%s" % lang.source_extension)
                        if os.path.exists(stub_name):
                            digest = self.file_cacher.put_file_from_path(
                                stub_name,
                                "Stub for task %s and language %s" % (
                                    task.name, lang.name))
                            args["managers"] += [
                                Manager(
                                    "stub%s" % lang.source_extension, digest)]
                        else:
                            logger.warning("Stub for language %s not "
                                           "found.", lang.name)
                    for other_filename in os.listdir(os.path.join(self.path,
                                                                  "sol")):
                        if any(other_filename.endswith(header)
                               for header in HEADER_EXTS):
                            digest = self.file_cacher.put_file_from_path(
                                os.path.join(self.path, "sol", other_filename),
                                "Stub %s for task %s" % (other_filename,
                                                         task.name))
                            args["managers"] += [
                                Manager(other_filename, digest)]
                    break

            # Otherwise, the task type is Batch
            else:
                args["task_type"] = "Batch"
                args["task_type_parameters"] = \
                    [compilation_param, [infile_param, outfile_param],
                     evaluation_param]

        args["testcases"] = []
        for i in range(n_input):
            input_digest = self.file_cacher.put_file_from_path(
                os.path.join(self.path, "input", "input%d.txt" % i),
                "Input %d for task %s" % (i, task.name))
            output_digest = self.file_cacher.put_file_from_path(
                os.path.join(self.path, "output", "output%d.txt" % i),
                "Output %d for task %s" % (i, task.name))
            args["testcases"] += [
                Testcase("%03d" % i, False, input_digest, output_digest)]
            if args["task_type"] == "OutputOnly":
                task.attachments.set(
                    Attachment("input_%03d.txt" % i, input_digest))
        public_testcases = load(conf, None, ["public_testcases", "risultati"],
                                conv=lambda x: "" if x is None else x)
        if public_testcases == "all":
            for t in args["testcases"]:
                t.public = True
        elif len(public_testcases) > 0:
            for x in public_testcases.split(","):
                args["testcases"][int(x.strip())].public = True
        args["testcases"] = dict((tc.codename, tc) for tc in args["testcases"])
        args["managers"] = dict((mg.filename, mg) for mg in args["managers"])

        dataset = Dataset(**args)
        task.active_dataset = dataset

        # Import was successful
        os.remove(os.path.join(self.path, ".import_error"))

        logger.info("Task parameters loaded.")

        return task
Ejemplo n.º 7
0
    def get_contest(self):
        """See docstring in class ContestLoader."""
        if not os.path.exists(os.path.join(self.path, "contest.yaml")):
            logger.critical("File missing: \"contest.yaml\"")
            return None

        conf = load_yaml_from_path(os.path.join(self.path, "contest.yaml"))

        # Here we update the time of the last import
        touch(os.path.join(self.path, ".itime_contest"))
        # If this file is not deleted, then the import failed
        touch(os.path.join(self.path, ".import_error_contest"))

        args = {}

        load(conf, args, ["name", "nome_breve"])
        load(conf, args, ["description", "nome"])

        logger.info("Loading parameters for contest %s.", args["name"])

        # Use the new token settings format if detected.
        if "token_mode" in conf:
            load(conf, args, "token_mode")
            load(conf, args, "token_max_number")
            load(conf, args, "token_min_interval", conv=make_timedelta)
            load(conf, args, "token_gen_initial")
            load(conf, args, "token_gen_number")
            load(conf, args, "token_gen_interval", conv=make_timedelta)
            load(conf, args, "token_gen_max")
        # Otherwise fall back on the old one.
        else:
            logger.warning(
                "contest.yaml uses a deprecated format for token settings "
                "which will soon stop being supported, you're advised to "
                "update it.")
            # Determine the mode.
            if conf.get("token_initial", None) is None:
                args["token_mode"] = TOKEN_MODE_DISABLED
            elif conf.get("token_gen_number", 0) > 0 and \
                    conf.get("token_gen_time", 0) == 0:
                args["token_mode"] = TOKEN_MODE_INFINITE
            else:
                args["token_mode"] = TOKEN_MODE_FINITE
            # Set the old default values.
            args["token_gen_initial"] = 0
            args["token_gen_number"] = 0
            args["token_gen_interval"] = timedelta()
            # Copy the parameters to their new names.
            load(conf, args, "token_total", "token_max_number")
            load(conf, args, "token_min_interval", conv=make_timedelta)
            load(conf, args, "token_initial", "token_gen_initial")
            load(conf, args, "token_gen_number")
            load(conf, args, "token_gen_time", "token_gen_interval",
                 conv=make_timedelta)
            load(conf, args, "token_max", "token_gen_max")
            # Remove some corner cases.
            if args["token_gen_initial"] is None:
                args["token_gen_initial"] = 0
            if args["token_gen_interval"].total_seconds() == 0:
                args["token_gen_interval"] = timedelta(minutes=1)

        load(conf, args, ["start", "inizio"], conv=make_datetime)
        load(conf, args, ["stop", "fine"], conv=make_datetime)
        load(conf, args, ["per_user_time"], conv=make_timedelta)
        load(conf, args, ["timezone"])

        load(conf, args, "max_submission_number")
        load(conf, args, "max_user_test_number")
        load(conf, args, "min_submission_interval", conv=make_timedelta)
        load(conf, args, "min_user_test_interval", conv=make_timedelta)

        tasks = load(conf, None, ["tasks", "problemi"])
        participations = load(conf, None, ["users", "utenti"])
        for p in participations:
            p["password"] = build_password(p["password"])

        # Import was successful
        os.remove(os.path.join(self.path, ".import_error_contest"))

        logger.info("Contest parameters loaded.")

        return Contest(**args), tasks, participations
Ejemplo n.º 8
0
Archivo: polygon.py Proyecto: Corea/cms
    def get_task(self, get_statement=True):
        """See docstring in class Loader.

        """

        logger.info("Checking dos2unix presence")
        i = os.system('dos2unix -V 2>/dev/null')
        self.dos2unix_found = (i == 0)
        if not self.dos2unix_found:
            logger.error("dos2unix not found - tests will not be converted!")

        name = os.path.basename(self.path)
        logger.info("Loading parameters for task %s.", name)

        args = {}

        # Here we update the time of the last import.
        touch(os.path.join(self.path, ".itime"))
        # If this file is not deleted, then the import failed.
        touch(os.path.join(self.path, ".import_error"))

        # Get alphabetical task index for use in title.

        tree = ET.parse(os.path.join(self.path, "problem.xml"))
        root = tree.getroot()

        args["name"] = name
        args["title"] = root.find('names').find("name").attrib['value']

        if get_statement:
            args["statements"] = []
            args["primary_statements"] = []
            for language, language_code in LANGUAGE_MAP.iteritems():
                path = os.path.join(self.path, 'statements',
                                    '.pdf', language, 'problem.pdf')
                if os.path.exists(path):
                    lang = LANGUAGE_MAP[language]
                    digest = self.file_cacher.put_file_from_path(
                        path,
                        "Statement for task %s (lang: %s)" % (name,
                                                              language))
                    args["statements"].append(Statement(lang, digest))
                    args["primary_statements"].append(lang)
            args["primary_statements"] = json.dumps(args["primary_statements"])

        args["submission_format"] = [SubmissionFormatElement("%s.%%l" % name)]

        # These options cannot be configured in the Polygon format.
        # Uncomment the following to set specific values for them.

        # args['max_submission_number'] = 100
        # args['max_user_test_number'] = 100
        # args['min_submission_interval'] = make_timedelta(60)
        # args['min_user_test_interval'] = make_timedelta(60)

        # args['max_user_test_number'] = 10
        # args['min_user_test_interval'] = make_timedelta(60)

        # args['token_mode'] = 'infinite'
        # args['token_max_number'] = 100
        # args['token_min_interval'] = make_timedelta(60)
        # args['token_gen_initial'] = 1
        # args['token_gen_number'] = 1
        # args['token_gen_interval'] = make_timedelta(1800)
        # args['token_gen_max'] = 2

        task_cms_conf_path = os.path.join(self.path, 'files')
        task_cms_conf = None
        if os.path.exists(os.path.join(task_cms_conf_path, 'cms_conf.py')):
            sys.path.append(task_cms_conf_path)
            logger.info("Found additional CMS options for task %s.", name)
            task_cms_conf = __import__('cms_conf')
            # TODO: probably should find more clever way to get rid of caching
            task_cms_conf = reload(task_cms_conf)
            sys.path.pop()
        if task_cms_conf is not None and hasattr(task_cms_conf, "general"):
            args.update(task_cms_conf.general)

        task = Task(**args)

        judging = root.find('judging')
        testset = None
        for testset in judging:
            testset_name = testset.attrib["name"]

            args = {}
            args["task"] = task
            args["description"] = testset_name
            args["autojudge"] = False

            tl = float(testset.find('time-limit').text)
            ml = float(testset.find('memory-limit').text)
            args["time_limit"] = tl * 0.001
            args["memory_limit"] = int(ml / (1024 * 1024))

            args["managers"] = []
            infile_param = judging.attrib['input-file']
            outfile_param = judging.attrib['output-file']

            checker_src = os.path.join(self.path, "files", "check.cpp")
            if os.path.exists(checker_src):
                logger.info("Checker found, compiling")
                checker_exe = os.path.join(self.path, "files", "checker")
                testlib_path = "/usr/local/include/cms/testlib.h"
                if not config.installed:
                    testlib_path = os.path.join(os.path.dirname(__file__),
                                                "polygon", "testlib.h")
                os.system("cat %s | \
                    sed 's$testlib.h$%s$' | \
                    g++ -x c++ -O2 -static -o %s -" %
                          (checker_src, testlib_path, checker_exe))
                digest = self.file_cacher.put_file_from_path(
                    checker_exe,
                    "Manager for task %s" % name)
                args["managers"] += [
                    Manager("checker", digest)]
                evaluation_param = "comparator"
            else:
                logger.info("Checker not found, using diff")
                evaluation_param = "diff"

            args["task_type"] = "Batch"
            args["task_type_parameters"] = \
                '["%s", ["%s", "%s"], "%s"]' % \
                ("alone", infile_param, outfile_param, evaluation_param)

            args["score_type"] = "Sum"
            total_value = 100.0
            input_value = 0.0

            testcases = int(testset.find('test-count').text)

            n_input = testcases
            if n_input != 0:
                input_value = total_value / n_input
            args["score_type_parameters"] = str(input_value)

            args["testcases"] = []

            for i in xrange(testcases):
                infile = os.path.join(self.path, testset_name,
                                      "%02d" % (i + 1))
                outfile = os.path.join(self.path, testset_name,
                                       "%02d.a" % (i + 1))
                if self.dos2unix_found:
                    os.system('dos2unix -q %s' % (infile, ))
                    os.system('dos2unix -q %s' % (outfile, ))
                input_digest = self.file_cacher.put_file_from_path(
                    infile,
                    "Input %d for task %s" % (i, name))
                output_digest = self.file_cacher.put_file_from_path(
                    outfile,
                    "Output %d for task %s" % (i, name))
                testcase = Testcase("%03d" % (i, ), False,
                                    input_digest, output_digest)
                testcase.public = True
                args["testcases"] += [testcase]

            if task_cms_conf is not None and \
               hasattr(task_cms_conf, "datasets") and \
               testset_name in task_cms_conf.datasets:
                args.update(task_cms_conf.datasets[testset_name])

            dataset = Dataset(**args)
            if testset_name == "tests":
                task.active_dataset = dataset

        os.remove(os.path.join(self.path, ".import_error"))

        logger.info("Task parameters loaded.")
        return task
Ejemplo n.º 9
0
Archivo: polygon.py Proyecto: Nyrio/cms
    def get_task(self, get_statement=True):
        """See docstring in class Loader.

        """

        logger.info("Checking dos2unix presence")
        i = os.system('dos2unix -V 2>/dev/null')
        self.dos2unix_found = (i == 0)
        if not self.dos2unix_found:
            logger.error("dos2unix not found - tests will not be converted!")

        name = os.path.basename(self.path)
        logger.info("Loading parameters for task %s.", name)

        args = {}

        # Here we update the time of the last import.
        touch(os.path.join(self.path, ".itime"))
        # If this file is not deleted, then the import failed.
        touch(os.path.join(self.path, ".import_error"))

        # Get alphabetical task index for use in title.

        tree = ET.parse(os.path.join(self.path, "problem.xml"))
        root = tree.getroot()

        args["name"] = name
        args["title"] = str(root.find('names').find("name").attrib['value'])

        if get_statement:
            args["statements"] = {}
            args["primary_statements"] = []
            for language, lang in iteritems(LANGUAGE_MAP):
                path = os.path.join(self.path, 'statements',
                                    '.pdf', language, 'problem.pdf')
                if os.path.exists(path):
                    digest = self.file_cacher.put_file_from_path(
                        path,
                        "Statement for task %s (lang: %s)" % (name,
                                                              language))
                    args["statements"][lang] = Statement(lang, digest)
                    args["primary_statements"].append(lang)

        args["submission_format"] = ["%s.%%l" % name]

        # These options cannot be configured in the Polygon format.
        # Uncomment the following to set specific values for them.

        # args['max_submission_number'] = 100
        # args['max_user_test_number'] = 100
        # args['min_submission_interval'] = make_timedelta(60)
        # args['min_user_test_interval'] = make_timedelta(60)

        # args['max_user_test_number'] = 10
        # args['min_user_test_interval'] = make_timedelta(60)

        # args['token_mode'] = 'infinite'
        # args['token_max_number'] = 100
        # args['token_min_interval'] = make_timedelta(60)
        # args['token_gen_initial'] = 1
        # args['token_gen_number'] = 1
        # args['token_gen_interval'] = make_timedelta(1800)
        # args['token_gen_max'] = 2

        task_cms_conf_path = os.path.join(self.path, 'files', 'cms_conf.py')
        task_cms_conf = None
        if os.path.exists(task_cms_conf_path):
            logger.info("Found additional CMS options for task %s.", name)
            with io.open(task_cms_conf_path, 'rb') as f:
                task_cms_conf = imp.load_module('cms_conf', f,
                                                task_cms_conf_path,
                                                ('.py', 'r', imp.PY_SOURCE))
        if task_cms_conf is not None and hasattr(task_cms_conf, "general"):
            args.update(task_cms_conf.general)

        task = Task(**args)

        judging = root.find('judging')
        testset = None
        for testset in judging:
            testset_name = testset.attrib["name"]

            args = {}
            args["task"] = task
            args["description"] = str(testset_name)
            args["autojudge"] = False

            tl = float(testset.find('time-limit').text)
            ml = int(testset.find('memory-limit').text)
            args["time_limit"] = tl * 0.001
            args["memory_limit"] = ml // (1024 * 1024)

            args["managers"] = {}
            infile_param = judging.attrib['input-file']
            outfile_param = judging.attrib['output-file']

            # Checker can be in any of these two locations.
            checker_src = os.path.join(self.path, "files", "check.cpp")
            if not os.path.exists(checker_src):
                checker_src = os.path.join(self.path, "check.cpp")

            if os.path.exists(checker_src):
                logger.info("Checker found, compiling")
                checker_exe = os.path.join(
                    os.path.dirname(checker_src), "checker")
                testlib_path = "/usr/local/include/cms"
                testlib_include = os.path.join(testlib_path, "testlib.h")
                if not config.installed:
                    testlib_path = os.path.join(os.path.dirname(__file__),
                                                "polygon")
                code = subprocess.call(["g++", "-x", "c++", "-O2", "-static",
                                        "-DCMS", "-I", testlib_path,
                                        "-include", testlib_include,
                                        "-o", checker_exe, checker_src])
                if code != 0:
                    logger.critical("Could not compile checker")
                    return None
                digest = self.file_cacher.put_file_from_path(
                    checker_exe,
                    "Manager for task %s" % name)
                args["managers"]["checker"] = Manager("checker", digest)
                evaluation_param = "comparator"
            else:
                logger.info("Checker not found, using diff")
                evaluation_param = "diff"

            args["task_type"] = "Batch"
            args["task_type_parameters"] = \
                ["alone", [infile_param, outfile_param], evaluation_param]

            args["score_type"] = "Sum"
            total_value = 100.0
            input_value = 0.0

            testcases = int(testset.find('test-count').text)

            n_input = testcases
            if n_input != 0:
                input_value = total_value / n_input
            args["score_type_parameters"] = input_value

            args["testcases"] = {}

            for i in range(testcases):
                infile = os.path.join(self.path, testset_name,
                                      "%02d" % (i + 1))
                outfile = os.path.join(self.path, testset_name,
                                       "%02d.a" % (i + 1))
                if self.dos2unix_found:
                    os.system('dos2unix -q %s' % (infile, ))
                    os.system('dos2unix -q %s' % (outfile, ))
                input_digest = self.file_cacher.put_file_from_path(
                    infile,
                    "Input %d for task %s" % (i, name))
                output_digest = self.file_cacher.put_file_from_path(
                    outfile,
                    "Output %d for task %s" % (i, name))
                testcase = Testcase("%03d" % (i, ), False,
                                    input_digest, output_digest)
                testcase.public = True
                args["testcases"][testcase.codename] = testcase

            if task_cms_conf is not None and \
               hasattr(task_cms_conf, "datasets") and \
               testset_name in task_cms_conf.datasets:
                args.update(task_cms_conf.datasets[testset_name])

            dataset = Dataset(**args)
            if testset_name == "tests":
                task.active_dataset = dataset

        os.remove(os.path.join(self.path, ".import_error"))

        logger.info("Task parameters loaded.")
        return task
Ejemplo n.º 10
0
    def get_contest(self):
        """See docstring in class ContestLoader."""
        if not os.path.exists(os.path.join(self.path, "contest.yaml")):
            logger.critical("File missing: \"contest.yaml\"")
            return None

        conf = yaml.safe_load(
            io.open(os.path.join(self.path, "contest.yaml"),
                    "rt",
                    encoding="utf-8"))

        # Here we update the time of the last import
        touch(os.path.join(self.path, ".itime_contest"))
        # If this file is not deleted, then the import failed
        touch(os.path.join(self.path, ".import_error_contest"))

        args = {}

        load(conf, args, ["name", "nome_breve"])
        load(conf, args, ["description", "nome"])
        load(conf, args, "presentation")
        load(conf, args, "path_to_logo")
        load(conf, args, "timezone")

        pr_temp = load(conf, None, "presentation_translations")
        if pr_temp:
            presentation_translations = {
                language: Presentation(language, pr_temp[language])
                for language in pr_temp
            }
        else:
            presentation_translations = {}
        args["presentation_translations"] = presentation_translations

        # Authentication methods
        oic_info = load(conf, None, "oic_info")
        if oic_info:
            args["auth_type"] = "OpenIDConnect"
            with open(os.path.join(self.path, oic_info), "r") as f:
                args["openidconnect_info"] = f.read()

        # To limit the interface to a few languages
        load(conf, args, "allowed_localizations")

        # Allowed programming languages
        plang = load(conf, None, "allowed_languages")
        if plang:
            args["languages"] = plang

        logger.info("Loading parameters for contest %s.", args["name"])

        # Use the new token settings format if detected.
        if "token_mode" in conf:
            load(conf, args, "token_mode")
            load(conf, args, "token_max_number")
            load(conf, args, "token_min_interval", conv=make_timedelta)
            load(conf, args, "token_gen_initial")
            load(conf, args, "token_gen_number")
            load(conf, args, "token_gen_interval", conv=make_timedelta)
            load(conf, args, "token_gen_max")
        # Otherwise fall back on the old one.
        else:
            logger.warning(
                "contest.yaml uses a deprecated format for token settings "
                "which will soon stop being supported, you're advised to "
                "update it.")
            # Determine the mode.
            if conf.get("token_initial", None) is None:
                args["token_mode"] = TOKEN_MODE_DISABLED
            elif conf.get("token_gen_number", 0) > 0 and \
                    conf.get("token_gen_time", 0) == 0:
                args["token_mode"] = TOKEN_MODE_INFINITE
            else:
                args["token_mode"] = TOKEN_MODE_FINITE
            # Set the old default values.
            args["token_gen_initial"] = 0
            args["token_gen_number"] = 0
            args["token_gen_interval"] = timedelta()
            # Copy the parameters to their new names.
            load(conf, args, "token_total", "token_max_number")
            load(conf, args, "token_min_interval", conv=make_timedelta)
            load(conf, args, "token_initial", "token_gen_initial")
            load(conf, args, "token_gen_number")
            load(conf,
                 args,
                 "token_gen_time",
                 "token_gen_interval",
                 conv=make_timedelta)
            load(conf, args, "token_max", "token_gen_max")
            # Remove some corner cases.
            if args["token_gen_initial"] is None:
                args["token_gen_initial"] = 0
            if args["token_gen_interval"].total_seconds() == 0:
                args["token_gen_interval"] = timedelta(minutes=1)

        load(conf, args, ["start", "inizio"], conv=make_datetime)
        load(conf, args, ["stop", "fine"], conv=make_datetime)
        load(conf, args, ["per_user_time"], conv=make_timedelta)

        load(conf, args, "max_submission_number")
        load(conf, args, "max_user_test_number")
        load(conf, args, "min_submission_interval", conv=make_timedelta)
        load(conf, args, "min_user_test_interval", conv=make_timedelta)

        tasks = load(conf, None, ["tasks", "problemi"])
        participations = load(conf, None, ["users", "utenti"])
        if participations is not None:
            for p in participations:
                p["password"] = build_password(p["password"])

        # Import was successful
        os.remove(os.path.join(self.path, ".import_error_contest"))

        logger.info("Contest parameters loaded.")

        return Contest(**args), tasks, participations
Ejemplo n.º 11
0
    def get_contest(self):
        """See docstring in class ContestLoader."""
        if not os.path.exists(os.path.join(self.path, "contest.yaml")):
            logger.critical("File missing: \"contest.yaml\"")
            return None

        conf = yaml.safe_load(
            io.open(os.path.join(self.path, "contest.yaml"),
                    "rt", encoding="utf-8"))

        # Here we update the time of the last import
        touch(os.path.join(self.path, ".itime_contest"))
        # If this file is not deleted, then the import failed
        touch(os.path.join(self.path, ".import_error_contest"))

        args = {}

        load(conf, args, ["name", "nome_breve"])
        load(conf, args, ["description", "nome"])

        logger.info("Loading parameters for contest %s.", args["name"])

        # Use the new token settings format if detected.
        if "token_mode" in conf:
            load(conf, args, "token_mode")
            load(conf, args, "token_max_number")
            load(conf, args, "token_min_interval", conv=make_timedelta)
            load(conf, args, "token_gen_initial")
            load(conf, args, "token_gen_number")
            load(conf, args, "token_gen_interval", conv=make_timedelta)
            load(conf, args, "token_gen_max")
        # Otherwise fall back on the old one.
        else:
            logger.warning(
                "contest.yaml uses a deprecated format for token settings "
                "which will soon stop being supported, you're advised to "
                "update it.")
            # Determine the mode.
            if conf.get("token_initial", None) is None:
                args["token_mode"] = "disabled"
            elif conf.get("token_gen_number", 0) > 0 and \
                    conf.get("token_gen_time", 0) == 0:
                args["token_mode"] = "infinite"
            else:
                args["token_mode"] = "finite"
            # Set the old default values.
            args["token_gen_initial"] = 0
            args["token_gen_number"] = 0
            args["token_gen_interval"] = timedelta()
            # Copy the parameters to their new names.
            load(conf, args, "token_total", "token_max_number")
            load(conf, args, "token_min_interval", conv=make_timedelta)
            load(conf, args, "token_initial", "token_gen_initial")
            load(conf, args, "token_gen_number")
            load(conf, args, "token_gen_time", "token_gen_interval",
                 conv=make_timedelta)
            load(conf, args, "token_max", "token_gen_max")
            # Remove some corner cases.
            if args["token_gen_initial"] is None:
                args["token_gen_initial"] = 0
            if args["token_gen_interval"].total_seconds() == 0:
                args["token_gen_interval"] = timedelta(minutes=1)

        load(conf, args, ["start", "inizio"], conv=make_datetime)
        load(conf, args, ["stop", "fine"], conv=make_datetime)
        load(conf, args, ["per_user_time"], conv=make_timedelta)

        load(conf, args, "max_submission_number")
        load(conf, args, "max_user_test_number")
        load(conf, args, "min_submission_interval", conv=make_timedelta)
        load(conf, args, "min_user_test_interval", conv=make_timedelta)

        tasks = load(conf, None, ["tasks", "problemi"])
        participations = load(conf, None, ["users", "utenti"])

        # Import was successful
        os.remove(os.path.join(self.path, ".import_error_contest"))

        logger.info("Contest parameters loaded.")

        return Contest(**args), tasks, participations
Ejemplo n.º 12
0
    def get_task(self, get_statement=True):
        """See docstring in class TaskLoader."""
        name = os.path.split(self.path)[1]

        if (not os.path.exists(os.path.join(self.path, "task.yaml"))) and \
           (not os.path.exists(os.path.join(self.path, "problema.yaml"))) and \
           (not os.path.exists(os.path.join(self.path, "..", name + ".yaml"))):
            logger.critical("File missing: \"task.yaml\"")
            return None

        # We first look for the yaml file inside the task folder,
        # and eventually fallback to a yaml file in its parent folder.
        try:
            conf = yaml.safe_load(
                io.open(os.path.join(self.path, "task.yaml"),
                        "rt",
                        encoding="utf-8"))
        except IOError as err:
            try:
                conf = yaml.safe_load(
                    io.open(os.path.join(self.path, "problema.yaml"),
                            "rt",
                            encoding="utf-8"))
            except:
                try:
                    deprecated_path = os.path.join(self.path, "..",
                                                   name + ".yaml")
                    conf = yaml.safe_load(
                        io.open(deprecated_path, "rt", encoding="utf-8"))

                    logger.warning(
                        "You're using a deprecated location for the "
                        "task.yaml file. You're advised to move %s to "
                        "%s.", deprecated_path,
                        os.path.join(self.path, "task.yaml"))
                except IOError:
                    # Since both task.yaml and the (deprecated) "../taskname.yaml"
                    # are missing, we will only warn the user that task.yaml is
                    # missing (to avoid encouraging the use of the deprecated one)
                    raise err

        # Here we update the time of the last import
        touch(os.path.join(self.path, ".itime"))
        # If this file is not deleted, then the import failed
        touch(os.path.join(self.path, ".import_error"))

        args = {}

        load(conf, args, ["name", "nome_breve"])
        load(conf, args, ["title", "nome"])
        load(conf, args, "hide_task_prefix")
        load(conf, args, "category")
        load(conf, args, "level")
        if "level" in args:
            args["level"] = unicode(args["level"])

        if name != args["name"]:
            logger.info(
                "The task name (%s) and the directory name (%s) are "
                "different. The former will be used.", args["name"], name)

        if args["name"] == args["title"]:
            logger.warning("Short name equals long name (title). "
                           "Please check.")

        name = args["name"]

        logger.info("Loading parameters for task %s.", name)

        if get_statement:
            primary_language = load(conf, None, "primary_language")
            if primary_language is None:
                primary_language = 'it'
            paths = [
                os.path.join(self.path, "statement", "statement.pdf"),
                os.path.join(self.path, "statement.pdf"),
                os.path.join(self.path, "enunciado.pdf"),
                os.path.join(self.path, args["name"] + ".pdf"),
                os.path.join(self.path, "testo", "testo.pdf")
            ]
            for path in paths:
                if os.path.exists(path):
                    digest = self.file_cacher.put_file_from_path(
                        path, "Statement for task %s (lang: %s)" %
                        (name, primary_language))
                    break
            else:
                logger.critical("Couldn't find any task statement, aborting.")
                sys.exit(1)
            args["statements"] = [Statement(primary_language, digest)]

            args["primary_statements"] = '["%s"]' % (primary_language)

        args["attachments"] = []  # FIXME Use auxiliary

        args["submission_format"] = [SubmissionFormatElement("%s.%%l" % name)]

        if conf.get("score_mode", None) == SCORE_MODE_MAX:
            args["score_mode"] = SCORE_MODE_MAX
        elif conf.get("score_mode", None) == SCORE_MODE_MAX_TOKENED_LAST:
            args["score_mode"] = SCORE_MODE_MAX_TOKENED_LAST

        # Use the new token settings format if detected.
        if "token_mode" in conf:
            load(conf, args, "token_mode")
            load(conf, args, "token_max_number")
            load(conf, args, "token_min_interval", conv=make_timedelta)
            load(conf, args, "token_gen_initial")
            load(conf, args, "token_gen_number")
            load(conf, args, "token_gen_interval", conv=make_timedelta)
            load(conf, args, "token_gen_max")
        # Otherwise fall back on the old one.
        else:
            logger.warning(
                "task.yaml uses a deprecated format for token settings which "
                "will soon stop being supported, you're advised to update it.")
            # Determine the mode.
            if conf.get("token_initial", None) is None:
                args["token_mode"] = "disabled"
            elif conf.get("token_gen_number", 0) > 0 and \
                    conf.get("token_gen_time", 0) == 0:
                args["token_mode"] = "infinite"
            else:
                args["token_mode"] = "finite"
            # Set the old default values.
            args["token_gen_initial"] = 0
            args["token_gen_number"] = 0
            args["token_gen_interval"] = timedelta()
            # Copy the parameters to their new names.
            load(conf, args, "token_total", "token_max_number")
            load(conf, args, "token_min_interval", conv=make_timedelta)
            load(conf, args, "token_initial", "token_gen_initial")
            load(conf, args, "token_gen_number")
            load(conf,
                 args,
                 "token_gen_time",
                 "token_gen_interval",
                 conv=make_timedelta)
            load(conf, args, "token_max", "token_gen_max")
            # Remove some corner cases.
            if args["token_gen_initial"] is None:
                args["token_gen_initial"] = 0
            if args["token_gen_interval"].total_seconds() == 0:
                args["token_gen_interval"] = timedelta(minutes=1)

        load(conf, args, "max_submission_number")
        load(conf, args, "max_user_test_number")
        load(conf, args, "min_submission_interval", conv=make_timedelta)
        load(conf, args, "min_user_test_interval", conv=make_timedelta)

        # Attachments
        args["attachments"] = []
        if os.path.exists(os.path.join(self.path, "att")):
            for filename in os.listdir(os.path.join(self.path, "att")):
                digest = self.file_cacher.put_file_from_path(
                    os.path.join(self.path, "att", filename),
                    "Attachment %s for task %s" % (filename, name))
                args["attachments"] += [Attachment(filename, digest)]

        task = Task(**args)

        args = {}
        args["task"] = task
        args["description"] = conf.get("version", "Default")
        args["autojudge"] = False

        load(conf, args, ["time_limit", "timeout"], conv=float)
        load(conf, args, ["memory_limit", "memlimit"])

        # Builds the parameters that depend on the task type
        args["managers"] = []
        infile_param = conf.get("infile", "input.txt")
        outfile_param = conf.get("outfile", "output.txt")

        # If there is sol/grader.%l for some language %l, then,
        # presuming that the task type is Batch, we retrieve graders
        # in the form sol/grader.%l
        graders = False
        for lang in LANGUAGES:
            if os.path.exists(
                    os.path.join(self.path, "sol",
                                 "grader%s" % lang.source_extension)):
                graders = True
                break
        if graders:
            # Read grader for each language
            for lang in LANGUAGES:
                extension = lang.source_extension
                grader_filename = os.path.join(self.path, "sol",
                                               "grader%s" % extension)
                if os.path.exists(grader_filename):
                    digest = self.file_cacher.put_file_from_path(
                        grader_filename, "Grader for task %s and language %s" %
                        (task.name, lang))
                    args["managers"] += [
                        Manager("grader%s" % extension, digest)
                    ]
                else:
                    logger.warning("Grader for language %s not found ", lang)
            # Read managers with other known file extensions
            for other_filename in os.listdir(os.path.join(self.path, "sol")):
                if any(
                        other_filename.endswith(header)
                        for header in HEADER_EXTS):
                    digest = self.file_cacher.put_file_from_path(
                        os.path.join(self.path, "sol", other_filename),
                        "Manager %s for task %s" % (other_filename, task.name))
                    args["managers"] += [Manager(other_filename, digest)]
            compilation_param = "grader"
        else:
            compilation_param = "alone"

        # If there is check/checker (or equivalent), then, presuming
        # that the task type is Batch or OutputOnly, we retrieve the
        # comparator
        paths = [
            os.path.join(self.path, "check", "checker"),
            os.path.join(self.path, "corrector.exe"),
            os.path.join(self.path, "cor", "correttore")
        ]
        for path in paths:
            if os.path.exists(path):
                digest = self.file_cacher.put_file_from_path(
                    path, "Manager for task %s" % task.name)
                args["managers"] += [Manager("checker", digest)]
                evaluation_param = "comparator"
                break
        else:
            evaluation_param = "diff"

        # Detect subtasks by checking GEN
        gen_filename = os.path.join(self.path, 'gen', 'GEN')
        try:
            with io.open(gen_filename, "rt", encoding="utf-8") as gen_file:
                subtasks = []
                testcases = 0
                points = None
                for line in gen_file:
                    line = line.strip()
                    splitted = line.split('#', 1)

                    if len(splitted) == 1:
                        # This line represents a testcase, otherwise
                        # it's just a blank
                        if splitted[0] != '':
                            testcases += 1

                    else:
                        testcase, comment = splitted
                        testcase = testcase.strip()
                        comment = comment.strip()
                        testcase_detected = testcase != ''
                        copy_testcase_detected = comment.startswith("COPY:")
                        subtask_detected = comment.startswith('ST:')

                        flags = [
                            testcase_detected, copy_testcase_detected,
                            subtask_detected
                        ]
                        if len([x for x in flags if x]) > 1:
                            raise Exception("No testcase and command in"
                                            " the same line allowed")

                        # This line represents a testcase and contains a
                        # comment, but the comment doesn't start a new
                        # subtask
                        if testcase_detected or copy_testcase_detected:
                            testcases += 1

                        # This line starts a new subtask
                        if subtask_detected:
                            # Close the previous subtask
                            if points is None:
                                assert (testcases == 0)
                            else:
                                subtasks.append([points, testcases])
                            # Open the new one
                            testcases = 0
                            points = int(comment[3:].strip())

                # Close last subtask (if no subtasks were defined, just
                # fallback to Sum)
                if points is None:
                    args["score_type"] = "Sum"
                    total_value = float(conf.get("total_value", 100.0))
                    input_value = 0.0
                    n_input = testcases
                    if n_input != 0:
                        input_value = total_value / n_input
                    args["score_type_parameters"] = "%s" % input_value
                else:
                    subtasks.append([points, testcases])
                    assert (100 == sum([int(st[0]) for st in subtasks]))
                    n_input = sum([int(st[1]) for st in subtasks])
                    args["score_type"] = "GroupMin"
                    args["score_type_parameters"] = "%s" % subtasks

                if "n_input" in conf:
                    assert int(conf['n_input']) == n_input

        # If gen/GEN doesn't exist, just fallback to Sum
        except IOError:
            if 'n_input' not in conf:
                conf['n_input'] = 0
            n_input = int(conf['n_input'])
            if "score_type" in conf:
                args["score_type"] = conf["score_type"]
                if "score_type_parameters" in conf:
                    args["score_type_parameters"] = (
                        "%s" % conf["score_type_parameters"])
                    args["score_type_parameters"] = re.sub(
                        r'u\'([^\']+)\'', '\"\g<1>\"',
                        args["score_type_parameters"])
            else:
                args["score_type"] = "Sum"
                total_value = float(conf.get("total_value", 100.0))
                input_value = 0.0

                def count_testcases(folder):
                    c = 0
                    if os.path.isdir(folder):
                        for filename in sorted(os.listdir(folder)):
                            nombre, ext = os.path.splitext(filename)
                            if ext == ".in":
                                c += 1
                    return c

                casos = n_input + count_testcases(
                    os.path.join(self.path, "casos")) + count_testcases(
                        os.path.join(self.path, "casos", "generados"))
                if casos != 0:
                    input_value = total_value / casos
                args["score_type_parameters"] = "%s" % input_value

        # If output_only is set, then the task type is OutputOnly
        if conf.get('output_only', False):
            args["task_type"] = "OutputOnly"
            args["time_limit"] = None
            args["memory_limit"] = None
            args["task_type_parameters"] = '["%s"]' % evaluation_param
            task.submission_format = [
                SubmissionFormatElement("output_%03d.txt" % i)
                for i in xrange(n_input)
            ]

        # If there is check/manager (or equivalent), then the task
        # type is Communication
        else:
            paths = [
                os.path.join(self.path, "check", "manager"),
                os.path.join(self.path, "cor", "manager")
            ]
            for path in paths:
                if os.path.exists(path):
                    num_processes = load(conf, None, "num_processes")
                    if num_processes is None:
                        num_processes = 1
                    logger.info("Task type Communication")
                    args["task_type"] = "Communication"
                    args["task_type_parameters"] = '[%d]' % num_processes
                    digest = self.file_cacher.put_file_from_path(
                        path, "Manager for task %s" % task.name)
                    args["managers"] += [Manager("manager", digest)]
                    for lang in LANGUAGES:
                        stub_name = os.path.join(
                            self.path, "sol", "stub%s" % lang.source_extension)
                        if os.path.exists(stub_name):
                            digest = self.file_cacher.put_file_from_path(
                                stub_name, "Stub for task %s and language %s" %
                                (task.name, lang.name))
                            args["managers"] += [
                                Manager("stub%s" % lang.source_extension,
                                        digest)
                            ]
                        else:
                            logger.warning(
                                "Stub for language %s not "
                                "found.", lang.name)
                    for other_filename in os.listdir(
                            os.path.join(self.path, "sol")):
                        if any(
                                other_filename.endswith(header)
                                for header in HEADER_EXTS):
                            digest = self.file_cacher.put_file_from_path(
                                os.path.join(self.path, "sol", other_filename),
                                "Stub %s for task %s" %
                                (other_filename, task.name))
                            args["managers"] += [
                                Manager(other_filename, digest)
                            ]
                    break

            # Otherwise, the task type is Batch
            else:
                args["task_type"] = "Batch"
                args["task_type_parameters"] = \
                    '["%s", ["%s", "%s"], "%s"]' % \
                    (compilation_param, infile_param, outfile_param,
                     evaluation_param)

        args["testcases"] = []
        for i in xrange(n_input):
            input_digest = self.file_cacher.put_file_from_path(
                os.path.join(self.path, "input", "input%d.txt" % i),
                "Input %d for task %s" % (i, task.name))
            output_digest = self.file_cacher.put_file_from_path(
                os.path.join(self.path, "output", "output%d.txt" % i),
                "Output %d for task %s" % (i, task.name))
            args["testcases"] += [
                Testcase("%03d" % i, False, input_digest, output_digest)
            ]
            if args["task_type"] == "OutputOnly":
                task.attachments += [
                    Attachment("input_%03d.txt" % i, input_digest)
                ]

        def add_testcases_dir(folder):
            if os.path.isdir(folder):
                for filename in sorted(os.listdir(folder)):
                    nombre, ext = os.path.splitext(filename)
                    if ext == ".in":
                        input_digest = self.file_cacher.put_file_from_path(
                            os.path.join(folder, filename),
                            "Input %s for task %s" % (nombre, task.name))
                        output_digest = self.file_cacher.put_file_from_path(
                            os.path.join(folder, nombre + ".dat"),
                            "Output %s for task %s" % (nombre, task.name))
                        args["testcases"] += [
                            Testcase(nombre, False, input_digest,
                                     output_digest)
                        ]
                        if args["task_type"] == "OutputOnly":
                            task.attachments += [
                                Attachment(filename, input_digest)
                            ]

        add_testcases_dir(os.path.join(self.path, "casos"))
        add_testcases_dir(os.path.join(self.path, "casos", "generados"))

        public_testcases = load(conf,
                                None, ["public_testcases", "risultati"],
                                conv=lambda x: "" if x is None else x)
        if public_testcases == "all":
            for t in args["testcases"]:
                t.public = True
        elif public_testcases != "":
            for x in public_testcases.split(","):
                args["testcases"][int(x.strip())].public = True

        dataset = Dataset(**args)
        task.active_dataset = dataset

        # Import was successful
        os.remove(os.path.join(self.path, ".import_error"))

        logger.info("Task parameters loaded.")

        return task
Ejemplo n.º 13
0
    def get_task(self, get_statement=True):
        """See docstring in class TaskLoader."""
        name = os.path.split(self.path)[1]

        if (not os.path.exists(os.path.join(self.path, "task.yaml"))) and \
           (not os.path.exists(os.path.join(self.path, "..", name + ".yaml"))):
            logger.critical("File missing: \"task.yaml\"")
            return None

        # We first look for the yaml file inside the task folder,
        # and eventually fallback to a yaml file in its parent folder.
        try:
            conf = yaml.safe_load(
                io.open(os.path.join(self.path, "task.yaml"),
                        "rt", encoding="utf-8"))
        except IOError as err:
            try:
                deprecated_path = os.path.join(self.path, "..", name + ".yaml")
                conf = yaml.safe_load(io.open(deprecated_path, "rt",
                                              encoding="utf-8"))

                logger.warning("You're using a deprecated location for the "
                               "task.yaml file. You're advised to move %s to "
                               "%s.", deprecated_path,
                               os.path.join(self.path, "task.yaml"))
            except IOError:
                # Since both task.yaml and the (deprecated) "../taskname.yaml"
                # are missing, we will only warn the user that task.yaml is
                # missing (to avoid encouraging the use of the deprecated one)
                raise err

        # Here we update the time of the last import
        touch(os.path.join(self.path, ".itime"))
        # If this file is not deleted, then the import failed
        touch(os.path.join(self.path, ".import_error"))

        args = {}

        load(conf, args, ["name", "nome_breve"])
        load(conf, args, ["title", "nome"])

        if name != args["name"]:
            logger.info("The task name (%s) and the directory name (%s) are "
                        "different. The former will be used.", args["name"],
                        name)

        if args["name"] == args["title"]:
            logger.warning("Short name equals long name (title). "
                           "Please check.")

        name = args["name"]

        logger.info("Loading parameters for task %s.", name)

        if get_statement:
            primary_language = load(conf, None, "primary_language")
            if primary_language is None:
                primary_language = 'it'
            paths = [os.path.join(self.path, "statement", "statement.pdf"),
                     os.path.join(self.path, "testo", "testo.pdf")]
            for path in paths:
                if os.path.exists(path):
                    digest = self.file_cacher.put_file_from_path(
                        path,
                        "Statement for task %s (lang: %s)" %
                        (name, primary_language))
                    break
            else:
                logger.critical("Couldn't find any task statement, aborting.")
                sys.exit(1)
            args["statements"] = {
                primary_language: Statement(primary_language, digest)
            }

            args["primary_statements"] = [primary_language]

        args["submission_format"] = ["%s.%%l" % name]

        if conf.get("score_mode", None) == SCORE_MODE_MAX:
            args["score_mode"] = SCORE_MODE_MAX
        elif conf.get("score_mode", None) == SCORE_MODE_MAX_TOKENED_LAST:
            args["score_mode"] = SCORE_MODE_MAX_TOKENED_LAST

        # Use the new token settings format if detected.
        if "token_mode" in conf:
            load(conf, args, "token_mode")
            load(conf, args, "token_max_number")
            load(conf, args, "token_min_interval", conv=make_timedelta)
            load(conf, args, "token_gen_initial")
            load(conf, args, "token_gen_number")
            load(conf, args, "token_gen_interval", conv=make_timedelta)
            load(conf, args, "token_gen_max")
        # Otherwise fall back on the old one.
        else:
            logger.warning(
                "task.yaml uses a deprecated format for token settings which "
                "will soon stop being supported, you're advised to update it.")
            # Determine the mode.
            if conf.get("token_initial", None) is None:
                args["token_mode"] = TOKEN_MODE_DISABLED
            elif conf.get("token_gen_number", 0) > 0 and \
                    conf.get("token_gen_time", 0) == 0:
                args["token_mode"] = TOKEN_MODE_INFINITE
            else:
                args["token_mode"] = TOKEN_MODE_FINITE
            # Set the old default values.
            args["token_gen_initial"] = 0
            args["token_gen_number"] = 0
            args["token_gen_interval"] = timedelta()
            # Copy the parameters to their new names.
            load(conf, args, "token_total", "token_max_number")
            load(conf, args, "token_min_interval", conv=make_timedelta)
            load(conf, args, "token_initial", "token_gen_initial")
            load(conf, args, "token_gen_number")
            load(conf, args, "token_gen_time", "token_gen_interval",
                 conv=make_timedelta)
            load(conf, args, "token_max", "token_gen_max")
            # Remove some corner cases.
            if args["token_gen_initial"] is None:
                args["token_gen_initial"] = 0
            if args["token_gen_interval"].total_seconds() == 0:
                args["token_gen_interval"] = timedelta(minutes=1)

        load(conf, args, "max_submission_number")
        load(conf, args, "max_user_test_number")
        load(conf, args, "min_submission_interval", conv=make_timedelta)
        load(conf, args, "min_user_test_interval", conv=make_timedelta)

        # Attachments
        args["attachments"] = dict()
        if os.path.exists(os.path.join(self.path, "att")):
            for filename in os.listdir(os.path.join(self.path, "att")):
                digest = self.file_cacher.put_file_from_path(
                    os.path.join(self.path, "att", filename),
                    "Attachment %s for task %s" % (filename, name))
                args["attachments"][filename] = Attachment(filename, digest)

        task = Task(**args)

        args = {}
        args["task"] = task
        args["description"] = conf.get("version", "Default")
        args["autojudge"] = False

        load(conf, args, ["time_limit", "timeout"], conv=float)
        load(conf, args, ["memory_limit", "memlimit"])

        # Builds the parameters that depend on the task type
        args["managers"] = []
        infile_param = conf.get("infile", "input.txt")
        outfile_param = conf.get("outfile", "output.txt")

        # If there is sol/grader.%l for some language %l, then,
        # presuming that the task type is Batch, we retrieve graders
        # in the form sol/grader.%l
        graders = False
        for lang in LANGUAGES:
            if os.path.exists(os.path.join(
                    self.path, "sol", "grader%s" % lang.source_extension)):
                graders = True
                break
        if graders:
            # Read grader for each language
            for lang in LANGUAGES:
                extension = lang.source_extension
                grader_filename = os.path.join(
                    self.path, "sol", "grader%s" % extension)
                if os.path.exists(grader_filename):
                    digest = self.file_cacher.put_file_from_path(
                        grader_filename,
                        "Grader for task %s and language %s" %
                        (task.name, lang))
                    args["managers"] += [
                        Manager("grader%s" % extension, digest)]
                else:
                    logger.warning("Grader for language %s not found ", lang)
            # Read managers with other known file extensions
            for other_filename in os.listdir(os.path.join(self.path, "sol")):
                if any(other_filename.endswith(header)
                       for header in HEADER_EXTS):
                    digest = self.file_cacher.put_file_from_path(
                        os.path.join(self.path, "sol", other_filename),
                        "Manager %s for task %s" % (other_filename, task.name))
                    args["managers"] += [
                        Manager(other_filename, digest)]
            compilation_param = "grader"
        else:
            compilation_param = "alone"

        # If there is check/checker (or equivalent), then, presuming
        # that the task type is Batch or OutputOnly, we retrieve the
        # comparator
        paths = [os.path.join(self.path, "check", "checker"),
                 os.path.join(self.path, "cor", "correttore")]
        for path in paths:
            if os.path.exists(path):
                digest = self.file_cacher.put_file_from_path(
                    path,
                    "Manager for task %s" % task.name)
                args["managers"] += [
                    Manager("checker", digest)]
                evaluation_param = "comparator"
                break
        else:
            evaluation_param = "diff"

        # Detect subtasks by checking GEN
        gen_filename = os.path.join(self.path, 'gen', 'GEN')
        try:
            with io.open(gen_filename, "rt", encoding="utf-8") as gen_file:
                subtasks = []
                testcases = 0
                points = None
                for line in gen_file:
                    line = line.strip()
                    splitted = line.split('#', 1)

                    if len(splitted) == 1:
                        # This line represents a testcase, otherwise
                        # it's just a blank
                        if splitted[0] != '':
                            testcases += 1

                    else:
                        testcase, comment = splitted
                        testcase = testcase.strip()
                        comment = comment.strip()
                        testcase_detected = len(testcase) > 0
                        copy_testcase_detected = comment.startswith("COPY:")
                        subtask_detected = comment.startswith('ST:')

                        flags = [testcase_detected,
                                 copy_testcase_detected,
                                 subtask_detected]
                        if len([x for x in flags if x]) > 1:
                            raise Exception("No testcase and command in"
                                            " the same line allowed")

                        # This line represents a testcase and contains a
                        # comment, but the comment doesn't start a new
                        # subtask
                        if testcase_detected or copy_testcase_detected:
                            testcases += 1

                        # This line starts a new subtask
                        if subtask_detected:
                            # Close the previous subtask
                            if points is None:
                                assert(testcases == 0)
                            else:
                                subtasks.append([points, testcases])
                            # Open the new one
                            testcases = 0
                            points = int(comment[3:].strip())

                # Close last subtask (if no subtasks were defined, just
                # fallback to Sum)
                if points is None:
                    args["score_type"] = "Sum"
                    total_value = float(conf.get("total_value", 100.0))
                    input_value = 0.0
                    n_input = testcases
                    if n_input != 0:
                        input_value = total_value / n_input
                    args["score_type_parameters"] = input_value
                else:
                    subtasks.append([points, testcases])
                    assert(100 == sum([int(st[0]) for st in subtasks]))
                    n_input = sum([int(st[1]) for st in subtasks])
                    args["score_type"] = "GroupMin"
                    args["score_type_parameters"] = subtasks

                if "n_input" in conf:
                    assert int(conf['n_input']) == n_input

        # If gen/GEN doesn't exist, just fallback to Sum
        except IOError:
            args["score_type"] = "Sum"
            total_value = float(conf.get("total_value", 100.0))
            input_value = 0.0
            n_input = int(conf['n_input'])
            if n_input != 0:
                input_value = total_value / n_input
            args["score_type_parameters"] = input_value

        # Override score_type if explicitly specified
        if "score_type" in conf and "score_type_parameters" in conf:
            logger.info("Overriding 'score_type' and 'score_type_parameters' "
                        "as per task.yaml")
            load(conf, args, "score_type")
            load(conf, args, "score_type_parameters")
        elif "score_type" in conf or "score_type_parameters" in conf:
            logger.warning("To override score type data, task.yaml must "
                           "specify both 'score_type' and "
                           "'score_type_parameters'.")

        # If output_only is set, then the task type is OutputOnly
        if conf.get('output_only', False):
            args["task_type"] = "OutputOnly"
            args["time_limit"] = None
            args["memory_limit"] = None
            args["task_type_parameters"] = [evaluation_param]
            task.submission_format = \
                ["output_%03d.txt" % i for i in range(n_input)]

        # If there is check/manager (or equivalent), then the task
        # type is Communication
        else:
            paths = [os.path.join(self.path, "check", "manager"),
                     os.path.join(self.path, "cor", "manager")]
            for path in paths:
                if os.path.exists(path):
                    num_processes = load(conf, None, "num_processes")
                    if num_processes is None:
                        num_processes = 1
                    logger.info("Task type Communication")
                    args["task_type"] = "Communication"
                    args["task_type_parameters"] = [num_processes]
                    digest = self.file_cacher.put_file_from_path(
                        path,
                        "Manager for task %s" % task.name)
                    args["managers"] += [
                        Manager("manager", digest)]
                    for lang in LANGUAGES:
                        stub_name = os.path.join(
                            self.path, "sol", "stub%s" % lang.source_extension)
                        if os.path.exists(stub_name):
                            digest = self.file_cacher.put_file_from_path(
                                stub_name,
                                "Stub for task %s and language %s" % (
                                    task.name, lang.name))
                            args["managers"] += [
                                Manager(
                                    "stub%s" % lang.source_extension, digest)]
                        else:
                            logger.warning("Stub for language %s not "
                                           "found.", lang.name)
                    for other_filename in os.listdir(os.path.join(self.path,
                                                                  "sol")):
                        if any(other_filename.endswith(header)
                               for header in HEADER_EXTS):
                            digest = self.file_cacher.put_file_from_path(
                                os.path.join(self.path, "sol", other_filename),
                                "Stub %s for task %s" % (other_filename,
                                                         task.name))
                            args["managers"] += [
                                Manager(other_filename, digest)]
                    break

            # Otherwise, the task type is Batch
            else:
                args["task_type"] = "Batch"
                args["task_type_parameters"] = \
                    [compilation_param, [infile_param, outfile_param],
                     evaluation_param]

        args["testcases"] = []
        for i in range(n_input):
            input_digest = self.file_cacher.put_file_from_path(
                os.path.join(self.path, "input", "input%d.txt" % i),
                "Input %d for task %s" % (i, task.name))
            output_digest = self.file_cacher.put_file_from_path(
                os.path.join(self.path, "output", "output%d.txt" % i),
                "Output %d for task %s" % (i, task.name))
            args["testcases"] += [
                Testcase("%03d" % i, False, input_digest, output_digest)]
            if args["task_type"] == "OutputOnly":
                task.attachments.set(
                    Attachment("input_%03d.txt" % i, input_digest))
        public_testcases = load(conf, None, ["public_testcases", "risultati"],
                                conv=lambda x: "" if x is None else x)
        if public_testcases == "all":
            for t in args["testcases"]:
                t.public = True
        elif len(public_testcases) > 0:
            for x in public_testcases.split(","):
                args["testcases"][int(x.strip())].public = True
        args["testcases"] = dict((tc.codename, tc) for tc in args["testcases"])
        args["managers"] = dict((mg.filename, mg) for mg in args["managers"])

        dataset = Dataset(**args)
        task.active_dataset = dataset

        # Import was successful
        os.remove(os.path.join(self.path, ".import_error"))

        logger.info("Task parameters loaded.")

        return task
Ejemplo n.º 14
0
    def get_task(self, name):
        """See docstring in class Loader.

        """
        try:
            num = self.tasks_order[name]

        # Here we expose an undocumented behavior, so that cmsMake can
        # import a task even without the whole contest; this is not to
        # be relied upon in general
        except AttributeError:
            num = 1

        task_path = os.path.join(self.path, "problems", name)

        logger.info("Loading parameters for task %s." % name)

        args = {}

        # Here we update the time of the last import
        touch(os.path.join(task_path, ".itime"))
        # If this file is not deleted, then the import failed
        touch(os.path.join(task_path, ".import_error"))

        args["num"] = num

        # get alphabetical task index for use in title

        index = None
        contest_tree = ET.parse(os.path.join(self.path, "contest.xml"))
        contest_root = contest_tree.getroot()
        for problem in contest_root.find('problems'):
            if os.path.basename(problem.attrib['url']) == name:
                index = problem.attrib['index']

        tree = ET.parse(os.path.join(task_path, "problem.xml"))
        root = tree.getroot()

        args["name"] = name
        if index is not None:
            args["title"] = index.upper() + '. '
        else:
            args["title"] = ''
        args["title"] += root.find('names') \
            .find("name[@language='%s']" % self.primary_language) \
            .attrib['value']

        args["statements"] = []
        args["primary_statements"] = []
        for language in self.languages:
            path = os.path.join(task_path, 'statements',
                                '.pdf', language, 'problem.pdf')
            if os.path.exists(path):
                lang = LANGUAGE_MAP[language]
                digest = self.file_cacher.put_file_from_path(
                    path,
                    "Statement for task %s (lang: %s)" % (name,
                                                          language))
                args["statements"].append(Statement(lang, digest))
                args["primary_statements"].append(lang)
        args["primary_statements"] = '["%s"]' % \
            '","'.join(args["primary_statements"])
        args["submission_format"] = [SubmissionFormatElement("%s.%%l" % name)]

#        args['max_submission_number'] = 100
#        args['max_user_test_number'] = 100
#        args['min_submission_interval'] = make_timedelta(60)
#        args['min_user_test_interval'] = make_timedelta(60)

#        args['max_user_test_number'] = 10
#        args['min_user_test_interval'] = make_timedelta(60)

#        args['token_mode'] = 'infinite'
#        args['token_max_number'] = 100
#        args['token_min_interval'] = make_timedelta(60)
#        args['token_gen_initial'] = 1
#        args['token_gen_number'] = 1
#        args['token_gen_interval'] = make_timedelta(1800)
#        args['token_gen_max'] = 2

        task_cms_conf_path = os.path.join(task_path, 'files')
        task_cms_conf = None
        if os.path.exists(os.path.join(task_cms_conf_path, 'cms_conf.py')):
            sys.path.append(task_cms_conf_path)
            logger.info("Found additional CMS options for task %s." % name)
            task_cms_conf = __import__('cms_conf')
            # TODO: probably should find more clever way to get rid of caching
            task_cms_conf = reload(task_cms_conf)
            sys.path.pop()
        if task_cms_conf is not None and hasattr(task_cms_conf, "general"):
            args.update(task_cms_conf.general)

        task = Task(**args)

        judging = root.find('judging')
        testset = None
        for testset in judging:
            testset_name = testset.attrib["name"]

            args = {}
            args["task"] = task
            args["description"] = testset_name
            args["autojudge"] = False

            tl = float(testset.find('time-limit').text)
            ml = float(testset.find('memory-limit').text)
            args["time_limit"] = tl * 0.001
            args["memory_limit"] = int(ml / (1024 * 1024))

            args["managers"] = []
            infile_param = judging.attrib['input-file']
            outfile_param = judging.attrib['output-file']

            checker_src = os.path.join(task_path, "files", "check.cpp")
            if os.path.exists(checker_src):
                logger.info("Checker found, compiling")
                checker_exe = os.path.join(task_path, "files", "checker")
                os.system("cat %s | \
                    sed 's$testlib.h$/usr/local/include/cms/testlib.h$' | \
                    g++ -x c++ -O2 -static -o %s -" %
                          (checker_src, checker_exe))
                digest = self.file_cacher.put_file_from_path(
                    checker_exe,
                    "Manager for task %s" % name)
                args["managers"] += [
                    Manager("checker", digest)]
                evaluation_param = "comparator"
            else:
                logger.info("Checker not found, using diff")
                evaluation_param = "diff"

            args["task_type"] = "Batch"
            args["task_type_parameters"] = \
                '["%s", ["%s", "%s"], "%s"]' % \
                ("alone", infile_param, outfile_param, evaluation_param)

            args["score_type"] = "Sum"
            total_value = 100.0
            input_value = 0.0

            testcases = int(testset.find('test-count').text)

            n_input = testcases
            if n_input != 0:
                input_value = total_value / n_input
            args["score_type_parameters"] = str(input_value)

            args["testcases"] = []

            for i in xrange(testcases):
                infile = os.path.join(task_path, testset_name,
                                      "%02d" % (i + 1))
                outfile = os.path.join(task_path, testset_name,
                                       "%02d.a" % (i + 1))
                if self.dos2unix_found:
                    os.system('dos2unix -q %s' % (infile, ))
                    os.system('dos2unix -q %s' % (outfile, ))
                input_digest = self.file_cacher.put_file_from_path(
                    infile,
                    "Input %d for task %s" % (i, name))
                output_digest = self.file_cacher.put_file_from_path(
                    outfile,
                    "Output %d for task %s" % (i, name))
                testcase = Testcase("%03d" % (i, ), False,
                                    input_digest, output_digest)
                testcase.public = True
                args["testcases"] += [testcase]

            if task_cms_conf is not None and \
               hasattr(task_cms_conf, "datasets") and \
               testset_name in task_cms_conf.datasets:
                args.update(task_cms_conf.datasets[testset_name])

            dataset = Dataset(**args)
            if testset_name == "tests":
                task.active_dataset = dataset

        os.remove(os.path.join(task_path, ".import_error"))

        logger.info("Task parameters loaded.")
        return task
Ejemplo n.º 15
0
    def get_contest(self):
        """See docstring in class ContestLoader."""
        if not os.path.exists(os.path.join(self.path, "contest.yaml")):
            logger.critical("File missing: \"contest.yaml\"")
            return None

        conf = load_yaml_from_path(os.path.join(self.path, "contest.yaml"))

        # Here we update the time of the last import
        touch(os.path.join(self.path, ".itime_contest"))
        # If this file is not deleted, then the import failed
        touch(os.path.join(self.path, ".import_error_contest"))

        args = {}

        # Contest Information
        load(conf, args, ["name", "nome_breve"])
        logger.info("Loading parameters for contest %s.", args["name"])

        load(conf, args, ["description", "nome"])
        load(conf, args, "allowed_localizations")
        load(conf, args, ["languages", "allowed_programming_languages"])
        load(conf, args, "submissions_download_allowed")
        load(conf, args, "allow_questions")
        load(conf, args, "allow_user_tests")
        load(conf, args, ["score_precision", "score_decimal_places"])

        # Logging in
        load(conf, args, "block_hidden_participations")
        load(conf, args, "allow_password_authentication")
        load(conf, args, "allow_registration")
        load(conf, args, ["ip_restriction","ip_based_login_restriction"])
        load(conf, args, ["ip_autologin","ip_based_autologin"])

        # Tokens parameters
        # Use the new token settings format if detected.
        if "token_mode" in conf:
            load(conf, args, "token_mode")
            load(conf, args, ["token_max_number","maximum_number_of_tokens"])
            load(conf, args, ["token_min_interval","minimum_interval_between_tokens"], conv=make_timedelta)
            load(conf, args, ["token_gen_initial","initial_number_of_tokens"])
            load(conf, args, ["token_gen_number","token_generation_number"])
            load(conf, args, ["token_gen_interval","token_generation_period"], conv=make_timedelta)
            load(conf, args, ["token_gen_max","maximum_accumulated_tokens"])
        # Otherwise fall back on the old one.
        else:
            logger.warning(
                "contest.yaml uses a deprecated format for token settings "
                "which will soon stop being supported, you're advised to "
                "update it.")
            # Determine the mode.
            if conf.get("token_initial", None) is None:
                args["token_mode"] = TOKEN_MODE_DISABLED
            elif conf.get("token_gen_number", 0) > 0 and \
                    conf.get("token_gen_time", 0) == 0:
                args["token_mode"] = TOKEN_MODE_INFINITE
            else:
                args["token_mode"] = TOKEN_MODE_FINITE
            # Set the old default values.
            args["token_gen_initial"] = 0
            args["token_gen_number"] = 0
            args["token_gen_interval"] = timedelta()
            # Copy the parameters to their new names.
            load(conf, args, "token_total", "token_max_number")
            load(conf, args, "token_min_interval", conv=make_timedelta)
            load(conf, args, "token_initial", "token_gen_initial")
            load(conf, args, "token_gen_number")
            load(conf, args, "token_gen_time", "token_gen_interval",
                 conv=make_timedelta)
            load(conf, args, "token_max", "token_gen_max")
            # Remove some corner cases.
            if args["token_gen_initial"] is None:
                args["token_gen_initial"] = 0
            if args["token_gen_interval"].total_seconds() == 0:
                args["token_gen_interval"] = timedelta(minutes=1)

        # Times
        load(conf, args, ["start", "start_time", "start_time_in_utc", "inizio"], conv=make_utc_datetime)
        load(conf, args, ["stop", "end_time", "end_time_in_utc", "fine"], conv=make_utc_datetime)
        load(conf, args, ["per_user_time","length_of_the_contest"], conv=make_timedelta)
        load(conf, args, ["timezone"])

        # Limits
        load(conf, args, ["max_submission_number","maximum_number_of_submissions"])
        load(conf, args, ["max_user_test_number","maximum_number_of_user_tests"])
        load(conf, args, ["min_submission_interval","minimum_interval_between_submissions"], conv=make_timedelta)
        load(conf, args, ["min_user_test_interval","minimum_interval_between_user_tests"], conv=make_timedelta)

        # Analysis
        load(conf, args, ["analysis_enabled", "enabled"])
        load(conf, args, ["analysis_start", "analysis_mode_start_time_in_utc"], conv=make_utc_datetime)
        load(conf, args, ["analysis_stop", "analysis_mode_end_time_in_utc"], conv=make_utc_datetime)


        taskpaths = load(conf, None, ["tasks", "problemi"])
        
        # read true name of tasks
        tasks = []
        self.task_path_map = {}
        for taskpath in taskpaths:
            name = taskpath
            try:
                conf = load_yaml_from_path(os.path.join(self.path, taskpath, "task.yaml"))
                name = conf.get("name", taskpath)
            except:
                try:
                    conf = load_yaml_from_path(os.path.join(self.path, taskpath + ".yaml"))
                    name = conf.get("name", taskpath)
                except:
                    pass

            tasks.append(name)
            self.task_path_map[name] = os.path.join(self.path, taskpath)

        participations = load(conf, None, ["users", "participations"])
        if participations:
            for p in participations:
                p["password"] = build_password(p["password"])

        # Import was successful
        os.remove(os.path.join(self.path, ".import_error_contest"))

        logger.info("Contest parameters loaded.")

        return Contest(**args), tasks, participations
Ejemplo n.º 16
0
    def get_contest(self):
        """See docstring in class ContestLoader."""
        if not os.path.exists(os.path.join(self.path, "contest.yaml")):
            logger.critical("File missing: \"contest.yaml\"")
            return None

        conf = yaml.safe_load(
            io.open(os.path.join(self.path, "contest.yaml"),
                    "rt", encoding="utf-8"))

        # Here we update the time of the last import
        touch(os.path.join(self.path, ".itime_contest"))
        # If this file is not deleted, then the import failed
        touch(os.path.join(self.path, ".import_error_contest"))

        args = {}

        load(conf, args, ["name", "nome_breve"])
        load(conf, args, ["description", "nome"])
        load(conf, args, "presentation")
        load(conf, args, "path_to_logo")
        load(conf, args, "timezone")

        pr_temp = load(conf, None, "presentation_translations")
        if pr_temp:
            presentation_translations = {
                language: Presentation(language, pr_temp[language])
                for language in pr_temp
            }
        else:
            presentation_translations = {}
        args["presentation_translations"] = presentation_translations

        # Authentication methods
        oic_info = load(conf, None, "oic_info")
        if oic_info:
            args["auth_type"] = "OpenIDConnect"
            with open(os.path.join(self.path, oic_info), "r") as f:
                args["openidconnect_info"] = f.read()

        # To limit the interface to a few languages
        load(conf, args, "allowed_localizations")

        # Allowed programming languages
        plang = load(conf, None, "allowed_languages")
        if plang:
            args["languages"] = plang

        logger.info("Loading parameters for contest %s.", args["name"])

        # Use the new token settings format if detected.
        if "token_mode" in conf:
            load(conf, args, "token_mode")
            load(conf, args, "token_max_number")
            load(conf, args, "token_min_interval", conv=make_timedelta)
            load(conf, args, "token_gen_initial")
            load(conf, args, "token_gen_number")
            load(conf, args, "token_gen_interval", conv=make_timedelta)
            load(conf, args, "token_gen_max")
        # Otherwise fall back on the old one.
        else:
            logger.warning(
                "contest.yaml uses a deprecated format for token settings "
                "which will soon stop being supported, you're advised to "
                "update it.")
            # Determine the mode.
            if conf.get("token_initial", None) is None:
                args["token_mode"] = TOKEN_MODE_DISABLED
            elif conf.get("token_gen_number", 0) > 0 and \
                    conf.get("token_gen_time", 0) == 0:
                args["token_mode"] = TOKEN_MODE_INFINITE
            else:
                args["token_mode"] = TOKEN_MODE_FINITE
            # Set the old default values.
            args["token_gen_initial"] = 0
            args["token_gen_number"] = 0
            args["token_gen_interval"] = timedelta()
            # Copy the parameters to their new names.
            load(conf, args, "token_total", "token_max_number")
            load(conf, args, "token_min_interval", conv=make_timedelta)
            load(conf, args, "token_initial", "token_gen_initial")
            load(conf, args, "token_gen_number")
            load(conf, args, "token_gen_time", "token_gen_interval",
                 conv=make_timedelta)
            load(conf, args, "token_max", "token_gen_max")
            # Remove some corner cases.
            if args["token_gen_initial"] is None:
                args["token_gen_initial"] = 0
            if args["token_gen_interval"].total_seconds() == 0:
                args["token_gen_interval"] = timedelta(minutes=1)

        load(conf, args, ["start", "inizio"], conv=make_datetime)
        load(conf, args, ["stop", "fine"], conv=make_datetime)
        load(conf, args, ["per_user_time"], conv=make_timedelta)

        load(conf, args, "max_submission_number")
        load(conf, args, "max_user_test_number")
        load(conf, args, "min_submission_interval", conv=make_timedelta)
        load(conf, args, "min_user_test_interval", conv=make_timedelta)

        tasks = load(conf, None, ["tasks", "problemi"])
        participations = load(conf, None, ["users", "utenti"])
        if participations is not None:
            for p in participations:
                p["password"] = build_password(p["password"])

        # Import was successful
        os.remove(os.path.join(self.path, ".import_error_contest"))

        logger.info("Contest parameters loaded.")

        return Contest(**args), tasks, participations