Пример #1
0
    def get_task(self, get_statement):
        """
        See docstring in base_loader.
        """

        # Cannot import a task that with generation errors.
        if os.path.isfile(self.task_error_mark):
            raise Exception("Task has an error mark: %s" %
                            self.task_error_mark)
        if not os.path.isfile(self.task_ok_mark):
            raise Exception("Task does not have an okay mark: %s" %
                            self.task_ok_mark)

        # Mark this import as an error until we finish.
        touch(self.contest_error_mark)
        if os.path.isfile(self.contest_ok_mark):
            os.remove(self.contest_ok_mark)

        args = {}

        self.put_names(args)
        if get_statement:
            self.put_statements(args)
        self.put_score_mode(args)
        self.put_task_submission_format(args)
        self.put_attachments(args)

        task = Task(**args)
        task.active_dataset = self.create_dataset(task)

        # Success - mark this task as okay.
        touch(self.contest_ok_mark)
        os.remove(self.contest_error_mark)
        return task
Пример #2
0
    def post(self):
        fallback_page = "/tasks/add"

        try:
            attrs = dict()

            self.get_string(attrs, "name", empty=None)
            self.get_string(attrs, "category")

            assert attrs.get("name") is not None, "No task name specified."
            attrs["title"] = attrs["name"]

            # Set default submission format as ["taskname.%l"]
            attrs["submission_format"] = \
                [SubmissionFormatElement("%s.%%l" % attrs["name"])]

            # Create the task.
            task = Task(**attrs)
            self.sql_session.add(task)

        except Exception as error:
            self.application.service.add_notification(make_datetime(),
                                                      "Invalid field(s)",
                                                      repr(error))
            self.redirect(fallback_page)
            return

        try:
            attrs = dict()

            # Create its first dataset.
            attrs["description"] = "Default"
            attrs["autojudge"] = True
            attrs["task_type"] = "Batch"
            attrs["task_type_parameters"] = '["alone", ["", ""], "diff"]'
            attrs["score_type"] = "Sum"
            attrs["score_type_parameters"] = '100'
            attrs["task"] = task
            dataset = Dataset(**attrs)
            self.sql_session.add(dataset)

            # Make the dataset active. Life works better that way.
            task.active_dataset = dataset

        except Exception as error:
            self.application.service.add_notification(make_datetime(),
                                                      "Invalid field(s)",
                                                      repr(error))
            self.redirect(fallback_page)
            return

        if self.try_commit():
            # Create the task on RWS.
            self.application.service.proxy_service.reinitialize()
            self.redirect("/task/%s" % task.id)
        else:
            self.redirect(fallback_page)
Пример #3
0
    def post(self):
        fallback_page = "/tasks/add"

        try:
            attrs = dict()

            self.get_string(attrs, "name", empty=None)
            self.get_string(attrs, "category")

            assert attrs.get("name") is not None, "No task name specified."
            attrs["title"] = attrs["name"]

            # Set default submission format as ["taskname.%l"]
            attrs["submission_format"] = \
                [SubmissionFormatElement("%s.%%l" % attrs["name"])]

            # Create the task.
            task = Task(**attrs)
            self.sql_session.add(task)

        except Exception as error:
            self.application.service.add_notification(
                make_datetime(), "Invalid field(s)", repr(error))
            self.redirect(fallback_page)
            return

        try:
            attrs = dict()

            # Create its first dataset.
            attrs["description"] = "Default"
            attrs["autojudge"] = True
            attrs["task_type"] = "Batch"
            attrs["task_type_parameters"] = '["alone", ["", ""], "diff"]'
            attrs["score_type"] = "Sum"
            attrs["score_type_parameters"] = '100'
            attrs["task"] = task
            dataset = Dataset(**attrs)
            self.sql_session.add(dataset)

            # Make the dataset active. Life works better that way.
            task.active_dataset = dataset

        except Exception as error:
            self.application.service.add_notification(
                make_datetime(), "Invalid field(s)", repr(error))
            self.redirect(fallback_page)
            return

        if self.try_commit():
            # Create the task on RWS.
            self.application.service.proxy_service.reinitialize()
            self.redirect("/task/%s" % task.id)
        else:
            self.redirect(fallback_page)
Пример #4
0
    def get_task(self, get_statement=True):
        """See docstring in class Loader.

        """

        logger.info("Checking dos2unix presence")
        i = os.system('dos2unix -V 2>/dev/null')
        self.dos2unix_found = (i == 0)
        if not self.dos2unix_found:
            logger.error("dos2unix not found - tests will not be converted!")

        name = os.path.basename(self.path)
        logger.info("Loading parameters for task %s.", name)

        args = {}

        # Here we update the time of the last import.
        touch(os.path.join(self.path, ".itime"))
        # If this file is not deleted, then the import failed.
        touch(os.path.join(self.path, ".import_error"))

        # Get alphabetical task index for use in title.

        tree = ET.parse(os.path.join(self.path, "problem.xml"))
        root = tree.getroot()

        args["name"] = name
        args["title"] = root.find('names').find("name").attrib['value']

        if get_statement:
            args["statements"] = []
            args["primary_statements"] = []
            for language, language_code in LANGUAGE_MAP.iteritems():
                path = os.path.join(self.path, 'statements',
                                    '.pdf', language, 'problem.pdf')
                if os.path.exists(path):
                    lang = LANGUAGE_MAP[language]
                    digest = self.file_cacher.put_file_from_path(
                        path,
                        "Statement for task %s (lang: %s)" % (name,
                                                              language))
                    args["statements"].append(Statement(lang, digest))
                    args["primary_statements"].append(lang)
            args["primary_statements"] = json.dumps(args["primary_statements"])

        args["submission_format"] = [SubmissionFormatElement("%s.%%l" % name)]

        # These options cannot be configured in the Polygon format.
        # Uncomment the following to set specific values for them.

        # args['max_submission_number'] = 100
        # args['max_user_test_number'] = 100
        # args['min_submission_interval'] = make_timedelta(60)
        # args['min_user_test_interval'] = make_timedelta(60)

        # args['max_user_test_number'] = 10
        # args['min_user_test_interval'] = make_timedelta(60)

        # args['token_mode'] = 'infinite'
        # args['token_max_number'] = 100
        # args['token_min_interval'] = make_timedelta(60)
        # args['token_gen_initial'] = 1
        # args['token_gen_number'] = 1
        # args['token_gen_interval'] = make_timedelta(1800)
        # args['token_gen_max'] = 2

        task_cms_conf_path = os.path.join(self.path, 'files')
        task_cms_conf = None
        if os.path.exists(os.path.join(task_cms_conf_path, 'cms_conf.py')):
            sys.path.append(task_cms_conf_path)
            logger.info("Found additional CMS options for task %s.", name)
            task_cms_conf = __import__('cms_conf')
            # TODO: probably should find more clever way to get rid of caching
            task_cms_conf = reload(task_cms_conf)
            sys.path.pop()
        if task_cms_conf is not None and hasattr(task_cms_conf, "general"):
            args.update(task_cms_conf.general)

        task = Task(**args)

        judging = root.find('judging')
        testset = None
        for testset in judging:
            testset_name = testset.attrib["name"]

            args = {}
            args["task"] = task
            args["description"] = testset_name
            args["autojudge"] = False

            tl = float(testset.find('time-limit').text)
            ml = float(testset.find('memory-limit').text)
            args["time_limit"] = tl * 0.001
            args["memory_limit"] = int(ml / (1024 * 1024))

            args["managers"] = []
            infile_param = judging.attrib['input-file']
            outfile_param = judging.attrib['output-file']

            checker_src = os.path.join(self.path, "files", "check.cpp")
            if os.path.exists(checker_src):
                logger.info("Checker found, compiling")
                checker_exe = os.path.join(self.path, "files", "checker")
                testlib_path = "/usr/local/include/cms/testlib.h"
                if not config.installed:
                    testlib_path = os.path.join(os.path.dirname(__file__),
                                                "polygon", "testlib.h")
                os.system("cat %s | \
                    sed 's$testlib.h$%s$' | \
                    g++ -x c++ -O2 -static -o %s -" %
                          (checker_src, testlib_path, checker_exe))
                digest = self.file_cacher.put_file_from_path(
                    checker_exe,
                    "Manager for task %s" % name)
                args["managers"] += [
                    Manager("checker", digest)]
                evaluation_param = "comparator"
            else:
                logger.info("Checker not found, using diff")
                evaluation_param = "diff"

            args["task_type"] = "Batch"
            args["task_type_parameters"] = \
                '["%s", ["%s", "%s"], "%s"]' % \
                ("alone", infile_param, outfile_param, evaluation_param)

            args["score_type"] = "Sum"
            total_value = 100.0
            input_value = 0.0

            testcases = int(testset.find('test-count').text)

            n_input = testcases
            if n_input != 0:
                input_value = total_value / n_input
            args["score_type_parameters"] = str(input_value)

            args["testcases"] = []

            for i in xrange(testcases):
                infile = os.path.join(self.path, testset_name,
                                      "%02d" % (i + 1))
                outfile = os.path.join(self.path, testset_name,
                                       "%02d.a" % (i + 1))
                if self.dos2unix_found:
                    os.system('dos2unix -q %s' % (infile, ))
                    os.system('dos2unix -q %s' % (outfile, ))
                input_digest = self.file_cacher.put_file_from_path(
                    infile,
                    "Input %d for task %s" % (i, name))
                output_digest = self.file_cacher.put_file_from_path(
                    outfile,
                    "Output %d for task %s" % (i, name))
                testcase = Testcase("%03d" % (i, ), False,
                                    input_digest, output_digest)
                testcase.public = True
                args["testcases"] += [testcase]

            if task_cms_conf is not None and \
               hasattr(task_cms_conf, "datasets") and \
               testset_name in task_cms_conf.datasets:
                args.update(task_cms_conf.datasets[testset_name])

            dataset = Dataset(**args)
            if testset_name == "tests":
                task.active_dataset = dataset

        os.remove(os.path.join(self.path, ".import_error"))

        logger.info("Task parameters loaded.")
        return task
Пример #5
0
    def get_task(self, get_statement=True):
        """See docstring in class Loader.

        """

        json_src = os.path.join(self.path, 'problem.json')
        if not os.path.exists(json_src):
            logger.critical('No task found.')
            raise OSError('No task found at path %s' % json_src)
        with open(json_src, 'rt', encoding='utf-8') as json_file:
            data = json.load(json_file)

        name = data['code']
        logger.info("Loading parameters for task %s.", name)

        args = {}

        args["name"] = name
        args["title"] = data['name']

        # Statements
        if get_statement:
            statements_dir = os.path.join(self.path, 'statements')
            if os.path.exists(statements_dir):
                statements = [
                    filename
                    for filename in os.listdir(statements_dir)
                    if filename[-4:] == ".pdf"]
                if len(statements) > 0:
                    args['statements'] = dict()
                    logger.info('Statements found')
                for statement in statements:
                    language = statement[:-4]
                    if language == "en_US":
                        args["primary_statements"] = ["en_US"]
                    digest = self.file_cacher.put_file_from_path(
                        os.path.join(statements_dir, statement),
                        "Statement for task %s (lang: %s)" %
                        (name, language))
                    args['statements'][language] = Statement(language, digest)

        # Attachments
        args["attachments"] = dict()
        attachments_dir = os.path.join(self.path, 'attachments')
        if os.path.exists(attachments_dir):
            logger.info("Attachments found")
            for filename in os.listdir(attachments_dir):
                digest = self.file_cacher.put_file_from_path(
                    os.path.join(attachments_dir, filename),
                    "Attachment %s for task %s" % (filename, name))
                args["attachments"][filename] = Attachment(filename, digest)

        data["task_type"] = \
            data["task_type"][0].upper() + data["task_type"][1:]

        # Setting the submission format
        # Obtaining testcases' codename
        testcases_dir = os.path.join(self.path, 'tests')
        if not os.path.exists(testcases_dir):
            logger.warning('Testcase folder was not found')
            testcase_codenames = []
        else:
            testcase_codenames = sorted([
                filename[:-3]
                for filename in os.listdir(testcases_dir)
                if filename[-3:] == '.in'])
        if data["task_type"] == 'OutputOnly':
            args["submission_format"] = list()
            for codename in testcase_codenames:
                args["submission_format"].append("%s.out" % codename)
        elif data["task_type"] == 'Notice':
            args["submission_format"] = list()
        else:
            args["submission_format"] = ["%s.%%l" % name]

        # These options cannot be configured in the TPS format.
        # Uncomment the following to set specific values for them.

        # args['max_user_test_number'] = 10
        # args['min_user_test_interval'] = make_timedelta(60)

        # args['token_mode'] = 'infinite'
        # args['token_max_number'] = 100
        # args['token_min_interval'] = make_timedelta(60)
        # args['token_gen_initial'] = 1
        # args['token_gen_number'] = 1
        # args['token_gen_interval'] = make_timedelta(1800)
        # args['token_gen_max'] = 2

        if "score_precision" in data:
            args['score_precision'] = int(data["score_precision"])
        else:
            args['score_precision'] = 2
        args['max_submission_number'] = 50
        args['max_user_test_number'] = 50
        if data["task_type"] == 'OutputOnly':
            args['max_submission_number'] = 100
            args['max_user_test_number'] = 100

        args['min_submission_interval'] = make_timedelta(60)
        args['min_user_test_interval'] = make_timedelta(60)

        task = Task(**args)

        args = dict()

        args["task"] = task
        args["description"] = "Default"
        args["autojudge"] = True

        if data['task_type'] != 'OutputOnly' \
                and data['task_type'] != 'Notice':
            args["time_limit"] = float(data['time_limit'])
            args["memory_limit"] = int(data['memory_limit'])

        args["managers"] = {}

        # Checker
        checker_dir = os.path.join(self.path, "checker")
        checker_src = os.path.join(checker_dir, "checker.cpp")

        if os.path.exists(checker_src):
            logger.info("Checker found, compiling")
            checker_exe = os.path.join(checker_dir, "checker")
            subprocess.call([
                "g++", "-x", "c++", "-std=gnu++14", "-O2", "-static",
                "-o", checker_exe, checker_src
            ])
            digest = self.file_cacher.put_file_from_path(
                checker_exe,
                "Manager for task %s" % name)
            args["managers"]['checker'] = Manager("checker", digest)
            evaluation_param = "comparator"
        else:
            logger.info("Checker not found, using diff if necessary")
            evaluation_param = "diff"

        # Note that the original TPS worked with custom task type Batch2017
        # and Communication2017 instead of Batch and Communication.
        args["task_type"] = data['task_type']
        args["task_type_parameters"] = \
            self._get_task_type_parameters(
                data, data['task_type'], evaluation_param)

        # Graders
        graders_dir = os.path.join(self.path, 'graders')

        if data['task_type'] == 'TwoSteps':
            pas_manager = name + 'lib.pas'
            pas_manager_path = os.path.join(graders_dir, pas_manager)
            if not os.path.exists(pas_manager_path):
                digest = self.file_cacher.put_file_content(
                    ''.encode('utf-8'), 'Pascal manager for task %s' % name)
                args["managers"][pas_manager] = Manager(pas_manager, digest)

        if not os.path.exists(graders_dir):
            logger.warning('Grader folder was not found')
            graders_list = []
        else:
            graders_list = \
                [filename
                 for filename in os.listdir(graders_dir)
                 if filename != 'manager.cpp']
        for grader_name in graders_list:
            grader_src = os.path.join(graders_dir, grader_name)
            digest = self.file_cacher.put_file_from_path(
                grader_src,
                "Manager for task %s" % name)
            if data['task_type'] == 'Communication' \
                    and os.path.splitext(grader_name)[0] == 'grader':
                grader_name = 'stub' + os.path.splitext(grader_name)[1]
            args["managers"][grader_name] = Manager(grader_name, digest)

        # Manager
        manager_src = os.path.join(graders_dir, 'manager.cpp')

        if os.path.exists(manager_src):
            logger.info("Manager found, compiling")
            manager_exe = os.path.join(graders_dir, "manager")
            subprocess.call([
                "g++", "-x", "c++", "-O2", "-static",
                "-o", manager_exe, manager_src
            ])
            digest = self.file_cacher.put_file_from_path(
                manager_exe,
                "Manager for task %s" % name)
            args["managers"]["manager"] = Manager("manager", digest)

        # Testcases
        args["testcases"] = {}

        for codename in testcase_codenames:
            infile = os.path.join(testcases_dir, "%s.in" % codename)
            outfile = os.path.join(testcases_dir, "%s.out" % codename)
            if not os.path.exists(outfile):
                logger.critical(
                    'Could not find the output file for testcase %s', codename)
                logger.critical('Aborting...')
                return

            input_digest = self.file_cacher.put_file_from_path(
                infile,
                "Input %s for task %s" % (codename, name))
            output_digest = self.file_cacher.put_file_from_path(
                outfile,
                "Output %s for task %s" % (codename, name))
            testcase = Testcase(codename, True,
                                input_digest, output_digest)
            args["testcases"][codename] = testcase

        # Score Type
        subtasks_dir = os.path.join(self.path, 'subtasks')
        if not os.path.exists(subtasks_dir):
            logger.warning('Subtask folder was not found')
            subtasks = []
        else:
            subtasks = sorted(os.listdir(subtasks_dir))

        if len(subtasks) == 0:
            number_tests = max(len(testcase_codenames), 1)
            args["score_type"] = "Sum"
            args["score_type_parameters"] = 100 / number_tests
        else:
            args["score_type"] = "GroupMin"
            parsed_data = []
            subtask_no = -1
            add_optional_name = False
            for subtask in subtasks:
                subtask_no += 1
                with open(os.path.join(subtasks_dir, subtask), 'rt',
                          encoding='utf-8') as subtask_json:
                    subtask_data = json.load(subtask_json)
                    score = int(subtask_data["score"])
                    testcases = "|".join(
                        re.escape(testcase)
                        for testcase in subtask_data["testcases"]
                    )
                    optional_name = "Subtask %d" % subtask_no
                    if subtask_no == 0 and score == 0:
                        add_optional_name = True
                        optional_name = "Samples"
                    if add_optional_name:
                        parsed_data.append([score, testcases, optional_name])
                    else:
                        parsed_data.append([score, testcases])
            args["score_type_parameters"] = parsed_data

        dataset = Dataset(**args)
        task.active_dataset = dataset

        logger.info("Task parameters loaded.")

        return task
Пример #6
0
Файл: tps.py Проект: cms-dev/cms
    def get_task(self, get_statement=True):
        """See docstring in class Loader.

        """

        json_src = os.path.join(self.path, 'problem.json')
        if not os.path.exists(json_src):
            logger.critical('No task found.')
            raise OSError('No task found at path %s' % json_src)
        with open(json_src, 'rt', encoding='utf-8') as json_file:
            data = json.load(json_file)

        name = data['code']
        logger.info("Loading parameters for task %s.", name)

        args = {}

        args["name"] = name
        args["title"] = data['name']

        # Statements
        if get_statement:
            statements_dir = os.path.join(self.path, 'statements')
            if os.path.exists(statements_dir):
                statements = [
                    filename
                    for filename in os.listdir(statements_dir)
                    if filename[-4:] == ".pdf"]
                if len(statements) > 0:
                    args['statements'] = dict()
                    logger.info('Statements found')
                for statement in statements:
                    language = statement[:-4]
                    if language == "en_US":
                        args["primary_statements"] = ["en_US"]
                    digest = self.file_cacher.put_file_from_path(
                        os.path.join(statements_dir, statement),
                        "Statement for task %s (lang: %s)" %
                        (name, language))
                    args['statements'][language] = Statement(language, digest)

        # Attachments
        args["attachments"] = dict()
        attachments_dir = os.path.join(self.path, 'attachments')
        if os.path.exists(attachments_dir):
            logger.info("Attachments found")
            for filename in os.listdir(attachments_dir):
                digest = self.file_cacher.put_file_from_path(
                    os.path.join(attachments_dir, filename),
                    "Attachment %s for task %s" % (filename, name))
                args["attachments"][filename] = Attachment(filename, digest)

        data["task_type"] = \
            data["task_type"][0].upper() + data["task_type"][1:]

        # Setting the submission format
        # Obtaining testcases' codename
        testcases_dir = os.path.join(self.path, 'tests')
        if not os.path.exists(testcases_dir):
            logger.warning('Testcase folder was not found')
            testcase_codenames = []
        else:
            testcase_codenames = sorted([
                filename[:-3]
                for filename in os.listdir(testcases_dir)
                if filename[-3:] == '.in'])
        if data["task_type"] == 'OutputOnly':
            args["submission_format"] = list()
            for codename in testcase_codenames:
                args["submission_format"].append("%s.out" % codename)
        elif data["task_type"] == 'Notice':
            args["submission_format"] = list()
        else:
            args["submission_format"] = ["%s.%%l" % name]

        # These options cannot be configured in the TPS format.
        # Uncomment the following to set specific values for them.

        # args['max_user_test_number'] = 10
        # args['min_user_test_interval'] = make_timedelta(60)

        # args['token_mode'] = 'infinite'
        # args['token_max_number'] = 100
        # args['token_min_interval'] = make_timedelta(60)
        # args['token_gen_initial'] = 1
        # args['token_gen_number'] = 1
        # args['token_gen_interval'] = make_timedelta(1800)
        # args['token_gen_max'] = 2

        if "score_precision" in data:
            args['score_precision'] = int(data["score_precision"])
        else:
            args['score_precision'] = 2
        args['max_submission_number'] = 50
        args['max_user_test_number'] = 50
        if data["task_type"] == 'OutputOnly':
            args['max_submission_number'] = 100
            args['max_user_test_number'] = 100

        args['min_submission_interval'] = make_timedelta(60)
        args['min_user_test_interval'] = make_timedelta(60)

        task = Task(**args)

        args = dict()

        args["task"] = task
        args["description"] = "Default"
        args["autojudge"] = True

        if data['task_type'] != 'OutputOnly' \
                and data['task_type'] != 'Notice':
            args["time_limit"] = float(data['time_limit'])
            args["memory_limit"] = int(data['memory_limit'])

        args["managers"] = {}

        # Checker
        checker_dir = os.path.join(self.path, "checker")
        checker_src = os.path.join(checker_dir, "checker.cpp")

        if os.path.exists(checker_src):
            logger.info("Checker found, compiling")
            checker_exe = os.path.join(checker_dir, "checker")
            subprocess.call([
                "g++", "-x", "c++", "-std=gnu++14", "-O2", "-static",
                "-o", checker_exe, checker_src
            ])
            digest = self.file_cacher.put_file_from_path(
                checker_exe,
                "Manager for task %s" % name)
            args["managers"]['checker'] = Manager("checker", digest)
            evaluation_param = "comparator"
        else:
            logger.info("Checker not found, using diff if necessary")
            evaluation_param = "diff"

        # Note that the original TPS worked with custom task type Batch2017
        # and Communication2017 instead of Batch and Communication.
        args["task_type"] = data['task_type']
        args["task_type_parameters"] = \
            self._get_task_type_parameters(
                data, data['task_type'], evaluation_param)

        # Graders
        graders_dir = os.path.join(self.path, 'graders')

        if data['task_type'] == 'TwoSteps':
            pas_manager = name + 'lib.pas'
            pas_manager_path = os.path.join(graders_dir, pas_manager)
            if not os.path.exists(pas_manager_path):
                digest = self.file_cacher.put_file_content(
                    ''.encode('utf-8'), 'Pascal manager for task %s' % name)
                args["managers"][pas_manager] = Manager(pas_manager, digest)

        if not os.path.exists(graders_dir):
            logger.warning('Grader folder was not found')
            graders_list = []
        else:
            graders_list = \
                [filename
                 for filename in os.listdir(graders_dir)
                 if filename != 'manager.cpp']
        for grader_name in graders_list:
            grader_src = os.path.join(graders_dir, grader_name)
            digest = self.file_cacher.put_file_from_path(
                grader_src,
                "Manager for task %s" % name)
            if data['task_type'] == 'Communication' \
                    and os.path.splitext(grader_name)[0] == 'grader':
                grader_name = 'stub' + os.path.splitext(grader_name)[1]
            args["managers"][grader_name] = Manager(grader_name, digest)

        # Manager
        manager_src = os.path.join(graders_dir, 'manager.cpp')

        if os.path.exists(manager_src):
            logger.info("Manager found, compiling")
            manager_exe = os.path.join(graders_dir, "manager")
            subprocess.call([
                "g++", "-x", "c++", "-O2", "-static",
                "-o", manager_exe, manager_src
            ])
            digest = self.file_cacher.put_file_from_path(
                manager_exe,
                "Manager for task %s" % name)
            args["managers"]["manager"] = Manager("manager", digest)

        # Testcases
        args["testcases"] = {}

        for codename in testcase_codenames:
            infile = os.path.join(testcases_dir, "%s.in" % codename)
            outfile = os.path.join(testcases_dir, "%s.out" % codename)
            if not os.path.exists(outfile):
                logger.critical(
                    'Could not find the output file for testcase %s', codename)
                logger.critical('Aborting...')
                return

            input_digest = self.file_cacher.put_file_from_path(
                infile,
                "Input %s for task %s" % (codename, name))
            output_digest = self.file_cacher.put_file_from_path(
                outfile,
                "Output %s for task %s" % (codename, name))
            testcase = Testcase(codename, True,
                                input_digest, output_digest)
            args["testcases"][codename] = testcase

        # Score Type
        subtasks_dir = os.path.join(self.path, 'subtasks')
        if not os.path.exists(subtasks_dir):
            logger.warning('Subtask folder was not found')
            subtasks = []
        else:
            subtasks = sorted(os.listdir(subtasks_dir))

        if len(subtasks) == 0:
            number_tests = max(len(testcase_codenames), 1)
            args["score_type"] = "Sum"
            args["score_type_parameters"] = 100 / number_tests
        else:
            args["score_type"] = "GroupMin"
            parsed_data = []
            subtask_no = -1
            add_optional_name = False
            for subtask in subtasks:
                subtask_no += 1
                with open(os.path.join(subtasks_dir, subtask), 'rt',
                          encoding='utf-8') as subtask_json:
                    subtask_data = json.load(subtask_json)
                    score = int(subtask_data["score"])
                    testcases = "|".join(
                        re.escape(testcase)
                        for testcase in subtask_data["testcases"]
                    )
                    optional_name = "Subtask %d" % subtask_no
                    if subtask_no == 0 and score == 0:
                        add_optional_name = True
                        optional_name = "Samples"
                    if add_optional_name:
                        parsed_data.append([score, testcases, optional_name])
                    else:
                        parsed_data.append([score, testcases])
            args["score_type_parameters"] = parsed_data

        dataset = Dataset(**args)
        task.active_dataset = dataset

        logger.info("Task parameters loaded.")

        return task
Пример #7
0
    def get_task(self, get_statement=True):
        """See docstring in class Loader.

        """

        logger.info("Checking dos2unix presence")
        i = os.system('dos2unix -V 2>/dev/null')
        self.dos2unix_found = (i == 0)
        if not self.dos2unix_found:
            logger.error("dos2unix not found - tests will not be converted!")

        name = os.path.basename(self.path)
        logger.info("Loading parameters for task %s.", name)

        args = {}

        # Here we update the time of the last import.
        touch(os.path.join(self.path, ".itime"))
        # If this file is not deleted, then the import failed.
        touch(os.path.join(self.path, ".import_error"))

        # Get alphabetical task index for use in title.

        tree = ET.parse(os.path.join(self.path, "problem.xml"))
        root = tree.getroot()

        args["name"] = name
        args["title"] = str(root.find('names').find("name").attrib['value'])

        if get_statement:
            args["statements"] = {}
            args["primary_statements"] = []
            for language, lang in iteritems(LANGUAGE_MAP):
                path = os.path.join(self.path, 'statements',
                                    '.pdf', language, 'problem.pdf')
                if os.path.exists(path):
                    digest = self.file_cacher.put_file_from_path(
                        path,
                        "Statement for task %s (lang: %s)" % (name,
                                                              language))
                    args["statements"][lang] = Statement(lang, digest)
                    args["primary_statements"].append(lang)

        args["submission_format"] = ["%s.%%l" % name]

        # These options cannot be configured in the Polygon format.
        # Uncomment the following to set specific values for them.

        # args['max_submission_number'] = 100
        # args['max_user_test_number'] = 100
        # args['min_submission_interval'] = make_timedelta(60)
        # args['min_user_test_interval'] = make_timedelta(60)

        # args['max_user_test_number'] = 10
        # args['min_user_test_interval'] = make_timedelta(60)

        # args['token_mode'] = 'infinite'
        # args['token_max_number'] = 100
        # args['token_min_interval'] = make_timedelta(60)
        # args['token_gen_initial'] = 1
        # args['token_gen_number'] = 1
        # args['token_gen_interval'] = make_timedelta(1800)
        # args['token_gen_max'] = 2

        task_cms_conf_path = os.path.join(self.path, 'files', 'cms_conf.py')
        task_cms_conf = None
        if os.path.exists(task_cms_conf_path):
            logger.info("Found additional CMS options for task %s.", name)
            with io.open(task_cms_conf_path, 'rb') as f:
                task_cms_conf = imp.load_module('cms_conf', f,
                                                task_cms_conf_path,
                                                ('.py', 'r', imp.PY_SOURCE))
        if task_cms_conf is not None and hasattr(task_cms_conf, "general"):
            args.update(task_cms_conf.general)

        task = Task(**args)

        judging = root.find('judging')
        testset = None
        for testset in judging:
            testset_name = testset.attrib["name"]

            args = {}
            args["task"] = task
            args["description"] = str(testset_name)
            args["autojudge"] = False

            tl = float(testset.find('time-limit').text)
            ml = int(testset.find('memory-limit').text)
            args["time_limit"] = tl * 0.001
            args["memory_limit"] = ml // (1024 * 1024)

            args["managers"] = {}
            infile_param = judging.attrib['input-file']
            outfile_param = judging.attrib['output-file']

            # Checker can be in any of these two locations.
            checker_src = os.path.join(self.path, "files", "check.cpp")
            if not os.path.exists(checker_src):
                checker_src = os.path.join(self.path, "check.cpp")

            if os.path.exists(checker_src):
                logger.info("Checker found, compiling")
                checker_exe = os.path.join(
                    os.path.dirname(checker_src), "checker")
                testlib_path = "/usr/local/include/cms"
                testlib_include = os.path.join(testlib_path, "testlib.h")
                if not config.installed:
                    testlib_path = os.path.join(os.path.dirname(__file__),
                                                "polygon")
                code = subprocess.call(["g++", "-x", "c++", "-O2", "-static",
                                        "-DCMS", "-I", testlib_path,
                                        "-include", testlib_include,
                                        "-o", checker_exe, checker_src])
                if code != 0:
                    logger.critical("Could not compile checker")
                    return None
                digest = self.file_cacher.put_file_from_path(
                    checker_exe,
                    "Manager for task %s" % name)
                args["managers"]["checker"] = Manager("checker", digest)
                evaluation_param = "comparator"
            else:
                logger.info("Checker not found, using diff")
                evaluation_param = "diff"

            args["task_type"] = "Batch"
            args["task_type_parameters"] = \
                ["alone", [infile_param, outfile_param], evaluation_param]

            args["score_type"] = "Sum"
            total_value = 100.0
            input_value = 0.0

            testcases = int(testset.find('test-count').text)

            n_input = testcases
            if n_input != 0:
                input_value = total_value / n_input
            args["score_type_parameters"] = input_value

            args["testcases"] = {}

            for i in range(testcases):
                infile = os.path.join(self.path, testset_name,
                                      "%02d" % (i + 1))
                outfile = os.path.join(self.path, testset_name,
                                       "%02d.a" % (i + 1))
                if self.dos2unix_found:
                    os.system('dos2unix -q %s' % (infile, ))
                    os.system('dos2unix -q %s' % (outfile, ))
                input_digest = self.file_cacher.put_file_from_path(
                    infile,
                    "Input %d for task %s" % (i, name))
                output_digest = self.file_cacher.put_file_from_path(
                    outfile,
                    "Output %d for task %s" % (i, name))
                testcase = Testcase("%03d" % (i, ), False,
                                    input_digest, output_digest)
                testcase.public = True
                args["testcases"][testcase.codename] = testcase

            if task_cms_conf is not None and \
               hasattr(task_cms_conf, "datasets") and \
               testset_name in task_cms_conf.datasets:
                args.update(task_cms_conf.datasets[testset_name])

            dataset = Dataset(**args)
            if testset_name == "tests":
                task.active_dataset = dataset

        os.remove(os.path.join(self.path, ".import_error"))

        logger.info("Task parameters loaded.")
        return task
Пример #8
0
    def get_task(self, get_statement=True):

        base_path = self.path
        cms_path = os.path.join(base_path, 'cms')
        conf_path = os.path.join(base_path, 'cms', 'task-iif.yaml')

        if not exists(conf_path):
            logger.critical("cannot find \"task-iif.yaml\"")
            return None

        conf = load_yaml(conf_path)
        name = conf['name']

        logger.info("loading parameters for task \"%s\"", name)

        # inherited default
        default_conf_paths = DEFAULT_CONF_PATHS.copy()
        env_default_conf_path = os.environ.get(ENVVAR_NAME_DEFAULT_CONF_PATH)
        if env_default_conf_path is not None:
            default_conf_paths = [env_default_conf_path]

        default_found = False

        for def_path in default_conf_paths:
            if exists(def_path):
                default_conf = load_yaml(def_path)
                default_assign(conf, default_conf, 'primary_language')
                default_assign(conf, default_conf, 'max_submission_number')
                default_assign(conf, default_conf, 'min_submission_interval')
                default_found = True
                break

        if not default_found:
            search_paths = ', '.join(default_conf_paths)
            logging.warning("cannot find default config file (search path: {})".format(search_paths))

        # default
        conf.setdefault('score_mode', SCORE_MODE_MAX_SUBTASK)
        conf.setdefault('primary_language', 'ja')
        conf.setdefault('samples', ['sample-*'])
        conf.setdefault('feedback', ['*'])
        conf.setdefault('version', 'default-dataset')

        # override
        conf['token_mode'] = TOKEN_MODE_DISABLED

        task = {}
        task_type = conf.get('task_type', 'batch').lower()
        score_type = conf.get('score_type', 'normal').lower()

        # general task config
        assign(task, conf, 'name')
        assign(task, conf, 'title')
        task['primary_statements'] = [conf['primary_language']]
        assign(task, conf, 'score_mode')
        assign(task, conf, 'token_mode')
        try_assign(task, conf, 'max_submission_number')
        try_assign(task, conf, 'max_user_test_number')
        try_assign(task, conf, 'min_submission_interval', make_timedelta)
        try_assign(task, conf, 'min_user_test_interval', make_timedelta)
        try_assign(task, conf, 'score_precision')

        sample_regexp = globlist_to_regexp(conf['samples'])
        feedback_regexp = globlist_to_regexp(conf['feedback'])

        # testcases detection
        testcases = {}
        missing_out_testcases = []

        old_input_dir = os.path.join(base_path, 'in')
        new_input_dir = os.path.join(base_path, 'gen', 'in')

        for input_dir in [old_input_dir, new_input_dir]:

            if not os.path.isdir(input_dir):
                continue

            for fname in os.listdir(input_dir):

                m = re.match(r'\A(.+)\.txt\Z', fname)

                if not m:
                    logger.warning("ignored input file: \"%s\"", fname)
                    continue

                codename = m.group(1)
                in_path = os.path.join(input_dir, fname)
                out_path = os.path.join(input_dir, '..', 'out', fname)

                if not exists(out_path):
                    missing_out_testcases.append(codename)
                    out_path = None

                if codename in testcases:
                    logger.warning("duplicated testcase name: \"%s\"", codename)

                testcases[codename] = {
                    'in_path': in_path,
                    'out_path': out_path,
                    'sample': sample_regexp.match(codename) is not None,
                    'feedback': feedback_regexp.match(codename) is not None,
                }

        # additional files detection
        headers = []
        stubs, graders, manager, checker, stub_preload = [], [], None, None, None
        manager_src, checker_src, stub_preload_src = None, None, None

        for fname in os.listdir(cms_path):

            path = os.path.join(cms_path, fname)

            if any(fname.endswith(ext) for ext in HEADER_EXTS):
                headers.append((fname, path))

            for src_ext in SOURCE_EXTS:
                if fname == ('stub%s' % src_ext):
                    stubs.append((fname, path))
                if fname == ('grader%s' % src_ext):
                    graders.append((fname, path))

            if fname == 'manager.cpp':
                manager_src = path
            if fname == 'checker.cpp':
                checker_src = path
            if fname == 'stub_preload.cpp':
                stub_preload_src = path

        # auto compilation
        if manager_src:
            logger.info("manager auto compilation")
            manager = compile_judge_program('manager', os.path.join(cms_path, 'manager'), manager_src)
            if manager is None:
                logger.critical("manager compilation failed")
                return None

        if checker_src:
            logger.info("checker auto compilation")
            checker = compile_judge_program('checker', os.path.join(cms_path, 'checker'), checker_src)
            if checker is None:
                logger.critical("checker compilation failed")
                return None

        if stub_preload_src:
            logger.info("stub_preload auto compilation")
            stub_preload = compile_judge_program('stub_preload', os.path.join(cms_path, 'stub_preload'), stub_preload_src)
            if stub_preload is None:
                logger.critical("stub_preload compilation failed")
                return None

        # statements detection & registration
        if get_statement:

            statements = {}

            primary_language = conf['primary_language']
            pdf_dir = os.path.join(base_path, 'task')
            pdf_files = [
                ('statement.pdf', primary_language),
                ('statement-ja.pdf', 'ja'),
                ('statement-en.pdf', 'en'),
            ]

            for fname, lang in pdf_files:
                path = os.path.join(pdf_dir, fname)
                if exists(path):
                    digest = self.file_cacher.put_file_from_path(path,
                        "statement (%s) for task \"%s\"" % (lang, name))
                    statements[lang] = Statement(lang, digest)

            task['statements'] = statements

            if len(statements) == 0:
                logger.warning("cannot find any task statements")

        # attachments detection
        dist_path = os.path.join(base_path, 'dist')

        zipping_files = []
        dist_files = []

        if exists(dist_path):
            for base, dirs, files in os.walk(dist_path):
                for fname in files:

                    path = os.path.join(base, fname)
                    arc_name = os.path.relpath(path, dist_path)
                    safe_arc_name = arc_name.replace(os.sep, '-')

                    if fname.endswith('.zip'):
                        dist_files.append((path, safe_arc_name))
                    else:
                        zipping_files.append((path, arc_name))

        for codename, testcase in testcases.items():

            in_path = testcase['in_path']
            out_path = testcase['out_path']

            if testcase['sample']:
                zipping_files.append((in_path, "%s-in.txt" % codename))
                if out_path:
                    zipping_files.append((out_path, "%s-out.txt" % codename))
            elif task_type == 'outputonly':
                zipping_files.append((in_path, "input_%s.txt" % codename))

        dataset = {}

        dataset['description'] = conf['version']
        dataset['autojudge'] = False

        # score type parameters
        if score_type == 'normal':

            dataset['score_type_parameters'] = [
                [st['point'], globlist_to_text(st['targets'])]
                for st in conf['subtasks']
            ]
            dataset['score_type'] = 'GroupMin'

        elif score_type == 'truncation':

            score_params = []

            for st in conf['subtasks']:

                opt = st['score_option']
                opt.setdefault('power', 1.0)

                if 'threshold' not in opt:
                    logger.critical("truncation score type requires \"threshold\" parameter")
                    return None

                param = [
                    st['point'],
                    globlist_to_text(st['targets']),
                    opt['threshold'][0],
                    opt['threshold'][1],
                    opt['power'],
                ]

                score_params.append(param)

            dataset['score_type_parameters'] = score_params
            dataset['score_type'] = 'GroupMinTruncation'

        else:

            logger.critical("unknown score type \"%s\"", score_type)
            return None

        # task_type
        grader_param = 'grader' if graders else 'alone'
        eval_param = 'comparator' if checker else 'diff'

        if task_type == 'batch':

            assign(dataset, conf, 'time_limit', float)
            assign(dataset, conf, 'memory_limit', int)

            task['submission_format'] = ["%s.%%l" % name]
            dataset['task_type'] = 'Batch'
            dataset['task_type_parameters'] = \
                [grader_param, ['', ''], eval_param]

        elif task_type == 'outputonly':

            task['submission_format'] =  [
                'output_%s.txt' % codename
                for codename in sorted(testcases.keys())]
            dataset['task_type'] = 'OutputOnly'
            dataset['task_type_parameters'] = [eval_param]

        elif task_type == 'communication':

            assign(dataset, conf, 'time_limit', float)
            assign(dataset, conf, 'memory_limit', int)

            if not stubs:
                logger.critical("stub is required for communication task")
                return None
            if not manager:
                logger.critical("manager is required for communication task")
                return None

            task_params = [1]
            submission_format = ["%s.%%l" % name]

            if 'task_option' in conf:

                opt = conf['task_option']

                if 'processes' not in opt:
                    logger.critical("task_option/processes is required")
                    return None
                if 'formats' not in opt:
                    logger.critical("task_option/formats is required")
                    return None

                task_params = [opt['processes']]
                submission_format = [
                    fname for fname in opt['formats']
                ]

            task['submission_format'] = submission_format
            dataset['task_type'] = 'Communication'
            dataset['task_type_parameters'] = task_params

        else:

            logger.critical("unknown task type \"%s\"", task_type)
            return None

        # attachments registration
        attachments = {}

        for path, arc_name in dist_files:
            digest = self.file_cacher.put_file_from_path(
                path, "distribution file for task \"%s\"" % name)
            attachments[arc_name] = Attachment(arc_name, digest)

        # zipfile registration
        if zipping_files:

            zip_archive = tempfile.mkstemp('cms-iimoj-loader-', '.zip')
            zip_path = zip_archive[1]

            with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_STORED) as fp:
                for path, arc_name in zipping_files:
                    new_arc_name = os.path.join(name, arc_name)
                    fp.write(path, new_arc_name)

            zip_digest = self.file_cacher.put_file_from_path(zip_path,
                "distribution archive for task \"%s\"" % name)
            zip_fname = name + '.zip'
            attachments[zip_fname] = Attachment(zip_fname, zip_digest)
            os.remove(zip_path)

        task['attachments'] = attachments

        # additional files registration
        extra_managers = {}

        extra_files = headers + stubs + graders
        if manager:
            extra_files.append(manager)
        if checker:
            extra_files.append(checker)
        if stub_preload:
            extra_files.append(stub_preload)

        for fname, path in extra_files:
            digest = self.file_cacher.put_file_from_path(path,
                    "extra file \"%s\" for task \"%s\"" % (fname, name))
            logger.info("extra file: \"%s\"", fname)
            extra_managers[fname] = Manager(fname, digest)

        dataset['managers'] = extra_managers

        # testcases registration
        logger.info("registering testcases")

        registered_testcases = {}

        for codename, testcase in testcases.items():

            in_path = testcase['in_path']
            out_path = testcase['out_path']
            feedback = testcase['feedback']

            in_digest = self.file_cacher.put_file_from_path(in_path,
                "input \"%s\" for task \"%s\"" % (codename, name))
            out_digest = None

            if out_path:
                out_digest = self.file_cacher.put_file_from_path(out_path,
                    "output \"%s\" for task \"%s\"" % (codename, name))
            else:
                out_digest = self.file_cacher.put_file_content(b'',
                    "output \"%s\" for task \"%s\"" % (codename, name))

            registered_testcases[codename] = Testcase(codename,
                feedback, in_digest, out_digest)

        logger.info("testcases registration completed")

        dataset['testcases'] = registered_testcases

        # instantiation
        db_task = Task(**task)
        dataset['task'] = db_task
        db_dataset = Dataset(**dataset)
        db_task.active_dataset = db_dataset

        # import result
        logger.info("========== task \"%s\" ==========", name)
        logger.info("tasktype  : %s", task_type)

        if task_type != 'batch':
            logger.info("headers   : [%02d files]", len(headers))
            for fname, _ in sorted(headers):
                logger.info("            * %s", fname)

        if task_type == 'communication':
            logger.info("manager   : %s", "OK" if manager else "--")
            logger.info("stub      : [%02d files]", len(stubs))
            for fname, _ in sorted(stubs):
                logger.info("            * %s", fname)

        if task_type != 'communication':
            logger.info("comparator: %s", "OK" if checker else "--")

        if task_type == 'batch':
            logger.info("grader    : [%02d files]", len(graders))
            for fname, _ in sorted(graders):
                logger.info("            * %s", fname)

        logger.info("zipped    : [%02d files]", len(zipping_files))
        for _, arc_name in sorted(zipping_files):
            logger.info("            * %s", arc_name)
        logger.info("direct    : [%02d files]", len(dist_files))
        for _, arc_name in sorted(dist_files):
            logger.info("            * %s", arc_name)

        if missing_out_testcases and task_type != 'communication':
            pretty = ", ".join(sorted(missing_out_testcases)[:4])
            remain = len(missing_out_testcases) - 4
            if remain > 0:
                pretty += (", (%d more files)" % remain)
            logger.warning("missing output: %s", pretty)

        logger.info("=================%s============", "=" * len(name))

        logger.info("task parameters loaded")

        return db_task
Пример #9
0
    def get_task(self, name):
        """See docstring in class Loader.

        """
        try:
            num = self.tasks_order[name]

        # Here we expose an undocumented behavior, so that cmsMake can
        # import a task even without the whole contest; this is not to
        # be relied upon in general
        except AttributeError:
            num = 1

        task_path = os.path.join(self.path, name)

        # We first look for the yaml file inside the task folder,
        # and eventually fallback to a yaml file in its parent folder.
        try:
            conf = yaml.safe_load(
                io.open(os.path.join(task_path, "task.yaml"),
                        "rt", encoding="utf-8"))
        except IOError:
            conf = yaml.safe_load(
                io.open(os.path.join(self.path, name + ".yaml"),
                        "rt", encoding="utf-8"))

        logger.info("Loading parameters for task %s." % name)

        # Here we update the time of the last import
        touch(os.path.join(task_path, ".itime"))
        # If this file is not deleted, then the import failed
        touch(os.path.join(task_path, ".import_error"))

        args = {}

        args["num"] = num
        load(conf, args, ["name", "nome_breve"])
        load(conf, args, ["title", "nome"])

        assert name == args["name"]

        if args["name"] == args["title"]:
            logger.warning("Short name equals long name (title). "
                           "Please check.")

        primary_language = load(conf, None, "primary_language")
        if primary_language is None:
            primary_language = 'it'
        paths = [os.path.join(task_path, "statement", "statement.pdf"),
                 os.path.join(task_path, "testo", "testo.pdf")]
        for path in paths:
            if os.path.exists(path):
                digest = self.file_cacher.put_file_from_path(
                    path,
                    "Statement for task %s (lang: %s)" % (name,
                                                          primary_language))
                break
        else:
            logger.critical("Couldn't find any task statement, aborting...")
            sys.exit(1)
        args["statements"] = [Statement(primary_language, digest)]

        args["primary_statements"] = '["%s"]' % (primary_language)

        args["attachments"] = []  # FIXME Use auxiliary

        args["submission_format"] = [
            SubmissionFormatElement("%s.%%l" % name)]

        # Use the new token settings format if detected.
        if "token_mode" in conf:
            load(conf, args, "token_mode")
            load(conf, args, "token_max_number")
            load(conf, args, "token_min_interval", conv=make_timedelta)
            load(conf, args, "token_gen_initial")
            load(conf, args, "token_gen_number")
            load(conf, args, "token_gen_interval", conv=make_timedelta)
            load(conf, args, "token_gen_max")
        # Otherwise fall back on the old one.
        else:
            logger.warning(
                "%s.yaml uses a deprecated format for token settings which "
                "will soon stop being supported, you're advised to update it.",
                name)
            # Determine the mode.
            if conf.get("token_initial", None) is None:
                args["token_mode"] = "disabled"
            elif conf.get("token_gen_number", 0) > 0 and \
                    conf.get("token_gen_time", 0) == 0:
                args["token_mode"] = "infinite"
            else:
                args["token_mode"] = "finite"
            # Set the old default values.
            args["token_gen_initial"] = 0
            args["token_gen_number"] = 0
            args["token_gen_interval"] = timedelta()
            # Copy the parameters to their new names.
            load(conf, args, "token_total", "token_max_number")
            load(conf, args, "token_min_interval", conv=make_timedelta)
            load(conf, args, "token_initial", "token_gen_initial")
            load(conf, args, "token_gen_number")
            load(conf, args, "token_gen_time", "token_gen_interval",
                 conv=make_timedelta)
            load(conf, args, "token_max", "token_gen_max")
            # Remove some corner cases.
            if args["token_gen_initial"] is None:
                args["token_gen_initial"] = 0
            if args["token_gen_interval"].total_seconds() == 0:
                args["token_gen_interval"] = timedelta(minutes=1)

        load(conf, args, "max_submission_number")
        load(conf, args, "max_user_test_number")
        load(conf, args, "min_submission_interval", conv=make_timedelta)
        load(conf, args, "min_user_test_interval", conv=make_timedelta)

        # Attachments
        args["attachments"] = []
        if os.path.exists(os.path.join(task_path, "att")):
            for filename in os.listdir(os.path.join(task_path, "att")):
                digest = self.file_cacher.put_file_from_path(
                    os.path.join(task_path, "att", filename),
                    "Attachment %s for task %s" % (filename, name))
                args["attachments"] += [Attachment(filename, digest)]

        task = Task(**args)

        args = {}
        args["task"] = task
        args["description"] = conf.get("version", "Default")
        args["autojudge"] = False

        load(conf, args, ["time_limit", "timeout"], conv=float)
        load(conf, args, ["memory_limit", "memlimit"])

        # Builds the parameters that depend on the task type
        args["managers"] = []
        infile_param = conf.get("infile", "input.txt")
        outfile_param = conf.get("outfile", "output.txt")

        # If there is sol/grader.%l for some language %l, then,
        # presuming that the task type is Batch, we retrieve graders
        # in the form sol/grader.%l
        graders = False
        for lang in LANGUAGES:
            if os.path.exists(os.path.join(
                    task_path, "sol", "grader.%s" % lang)):
                graders = True
                break
        if graders:
            # Read grader for each language
            for lang in LANGUAGES:
                grader_filename = os.path.join(
                    task_path, "sol", "grader.%s" % lang)
                if os.path.exists(grader_filename):
                    digest = self.file_cacher.put_file_from_path(
                        grader_filename,
                        "Grader for task %s and language %s" % (name, lang))
                    args["managers"] += [
                        Manager("grader.%s" % lang, digest)]
                else:
                    logger.warning("Grader for language %s not found " % lang)
            # Read managers with other known file extensions
            for other_filename in os.listdir(os.path.join(task_path, "sol")):
                if other_filename.endswith('.h') or \
                        other_filename.endswith('lib.pas'):
                    digest = self.file_cacher.put_file_from_path(
                        os.path.join(task_path, "sol", other_filename),
                        "Manager %s for task %s" % (other_filename, name))
                    args["managers"] += [
                        Manager(other_filename, digest)]
            compilation_param = "grader"
        else:
            compilation_param = "alone"

        # If there is check/checker (or equivalent), then, presuming
        # that the task type is Batch or OutputOnly, we retrieve the
        # comparator
        paths = [os.path.join(task_path, "check", "checker"),
                 os.path.join(task_path, "cor", "correttore")]
        for path in paths:
            if os.path.exists(path):
                digest = self.file_cacher.put_file_from_path(
                    path,
                    "Manager for task %s" % name)
                args["managers"] += [
                    Manager("checker", digest)]
                evaluation_param = "comparator"
                break
        else:
            evaluation_param = "diff"

        # Detect subtasks by checking GEN
        gen_filename = os.path.join(task_path, 'gen', 'GEN')
        try:
            with io.open(gen_filename, "rt", encoding="utf-8") as gen_file:
                subtasks = []
                testcases = 0
                points = None
                for line in gen_file:
                    line = line.strip()
                    splitted = line.split('#', 1)

                    if len(splitted) == 1:
                        # This line represents a testcase, otherwise it's
                        # just a blank
                        if splitted[0] != '':
                            testcases += 1

                    else:
                        testcase, comment = splitted
                        testcase_detected = False
                        subtask_detected = False
                        if testcase.strip() != '':
                            testcase_detected = True
                        comment = comment.strip()
                        if comment.startswith('ST:'):
                            subtask_detected = True

                        if testcase_detected and subtask_detected:
                            raise Exception("No testcase and subtask in the"
                                            " same line allowed")

                        # This line represents a testcase and contains a
                        # comment, but the comment doesn't start a new
                        # subtask
                        if testcase_detected:
                            testcases += 1

                        # This line starts a new subtask
                        if subtask_detected:
                            # Close the previous subtask
                            if points is None:
                                assert(testcases == 0)
                            else:
                                subtasks.append([points, testcases])
                            # Open the new one
                            testcases = 0
                            points = int(comment[3:].strip())

                # Close last subtask (if no subtasks were defined, just
                # fallback to Sum)
                if points is None:
                    args["score_type"] = "Sum"
                    total_value = float(conf.get("total_value", 100.0))
                    input_value = 0.0
                    n_input = testcases
                    if n_input != 0:
                        input_value = total_value / n_input
                    args["score_type_parameters"] = "%s" % input_value
                else:
                    subtasks.append([points, testcases])
                    assert(100 == sum([int(st[0]) for st in subtasks]))
                    n_input = sum([int(st[1]) for st in subtasks])
                    args["score_type"] = "GroupMin"
                    args["score_type_parameters"] = "%s" % subtasks

                if "n_input" in conf:
                    assert int(conf['n_input']) == n_input

        # If gen/GEN doesn't exist, just fallback to Sum
        except IOError:
            args["score_type"] = "Sum"
            total_value = float(conf.get("total_value", 100.0))
            input_value = 0.0
            n_input = int(conf['n_input'])
            if n_input != 0:
                input_value = total_value / n_input
            args["score_type_parameters"] = "%s" % input_value

        # If output_only is set, then the task type is OutputOnly
        if conf.get('output_only', False):
            args["task_type"] = "OutputOnly"
            args["time_limit"] = None
            args["memory_limit"] = None
            args["task_type_parameters"] = '["%s"]' % evaluation_param
            task.submission_format = [
                SubmissionFormatElement("output_%03d.txt" % i)
                for i in xrange(n_input)]

        # If there is check/manager (or equivalent), then the task
        # type is Communication
        else:
            paths = [os.path.join(task_path, "check", "manager"),
                     os.path.join(task_path, "cor", "manager")]
            for path in paths:
                if os.path.exists(path):
                    args["task_type"] = "Communication"
                    args["task_type_parameters"] = '[]'
                    digest = self.file_cacher.put_file_from_path(
                        path,
                        "Manager for task %s" % name)
                    args["managers"] += [
                        Manager("manager", digest)]
                    for lang in LANGUAGES:
                        stub_name = os.path.join(
                            task_path, "sol", "stub.%s" % lang)
                        if os.path.exists(stub_name):
                            digest = self.file_cacher.put_file_from_path(
                                stub_name,
                                "Stub for task %s and language %s" % (name,
                                                                      lang))
                            args["managers"] += [
                                Manager("stub.%s" % lang, digest)]
                        else:
                            logger.warning("Stub for language %s not "
                                           "found." % lang)
                    break

            # Otherwise, the task type is Batch
            else:
                args["task_type"] = "Batch"
                args["task_type_parameters"] = \
                    '["%s", ["%s", "%s"], "%s"]' % \
                    (compilation_param, infile_param, outfile_param,
                     evaluation_param)

        args["testcases"] = []
        for i in xrange(n_input):
            input_digest = self.file_cacher.put_file_from_path(
                os.path.join(task_path, "input", "input%d.txt" % i),
                "Input %d for task %s" % (i, name))
            output_digest = self.file_cacher.put_file_from_path(
                os.path.join(task_path, "output", "output%d.txt" % i),
                "Output %d for task %s" % (i, name))
            args["testcases"] += [
                Testcase("%03d" % i, False, input_digest, output_digest)]
            if args["task_type"] == "OutputOnly":
                task.attachments += [
                    Attachment("input_%03d.txt" % i, input_digest)]
        public_testcases = load(conf, None, ["public_testcases", "risultati"],
                                conv=lambda x: "" if x is None else x)
        if public_testcases != "":
            for x in public_testcases.split(","):
                args["testcases"][int(x.strip())].public = True

        dataset = Dataset(**args)
        task.active_dataset = dataset

        # Import was successful
        os.remove(os.path.join(task_path, ".import_error"))

        logger.info("Task parameters loaded.")

        return task
Пример #10
0
    def get_task(self, get_statement=True):
        """See docstring in class Loader.

        """

        json_src = os.path.join(self.path, 'problem.json')
        if not os.path.exists(json_src):
            logger.error('No task found.')
        with open(json_src) as json_file:
            data = json.load(json_file)

        name = data['code']
        logger.info("Loading parameters for task %s.", name)

        args = {}

        # Here we update the time of the last import.
        touch(os.path.join(self.path, ".itime"))
        # If this file is not deleted, then the import failed.
        touch(os.path.join(self.path, ".import_error"))

        args["name"] = name
        args["title"] = data['name']

        # Statements
        if get_statement:
            statements_dir = os.path.join(self.path, 'statements')
            if os.path.exists(statements_dir):
                statements = [
                    filename for filename in os.listdir(statements_dir)
                    if filename[-4:] == ".pdf"
                ]
                if len(statements) > 0:
                    args['statements'] = dict()
                    logger.info('Statements found')
                for statement in statements:
                    language = statement[:-4]
                    if language == "en_US":
                        args["primary_statements"] = '["en_US"]'
                    digest = self.file_cacher.put_file_from_path(
                        os.path.join(statements_dir, statement),
                        "Statement for task %s (lang: %s)" % (name, language))
                    args['statements'][language] = Statement(language, digest)

        # Attachments
        args["attachments"] = dict()
        attachments_dir = os.path.join(self.path, 'attachments')
        if os.path.exists(attachments_dir):
            logger.info("Attachments found")
            for filename in os.listdir(attachments_dir):
                digest = self.file_cacher.put_file_from_path(
                    os.path.join(attachments_dir, filename),
                    "Attachment %s for task %s" % (filename, name))
                args["attachments"][filename] = Attachment(filename, digest)

        data["task_type"] = data["task_type"][0].upper(
        ) + data["task_type"][1:]

        # Setting the submission format
        # Obtaining testcases' codename
        testcases_dir = os.path.join(self.path, 'tests')
        if not os.path.exists(testcases_dir):
            logger.warning('Testcase folder was not found')
            testcase_codenames = []
        else:
            testcase_codenames = sorted([
                filename[:-3] for filename in os.listdir(testcases_dir)
                if filename[-3:] == '.in'
            ])
        if data["task_type"] == 'OutputOnly':
            args["submission_format"] = list()
            for codename in testcase_codenames:
                args["submission_format"].append(
                    SubmissionFormatElement("%s.out" % codename))
        elif data["task_type"] == 'Notice':
            args["submission_format"] = list()
        else:
            args["submission_format"] = [
                SubmissionFormatElement("%s.%%l" % name)
            ]

        # These options cannot be configured in the CPS format.
        # Uncomment the following to set specific values for them.

        # args['max_submission_number'] = 100
        # args['max_user_test_number'] = 100
        # args['min_submission_interval'] = make_timedelta(60)
        # args['min_user_test_interval'] = make_timedelta(60)

        # args['max_user_test_number'] = 10
        # args['min_user_test_interval'] = make_timedelta(60)

        # args['token_mode'] = 'infinite'
        # args['token_max_number'] = 100
        # args['token_min_interval'] = make_timedelta(60)
        # args['token_gen_initial'] = 1
        # args['token_gen_number'] = 1
        # args['token_gen_interval'] = make_timedelta(1800)
        # args['token_gen_max'] = 2
        if "score_precision" in data:
            args['score_precision'] = int(data["score_precision"])
        else:
            args['score_precision'] = 2
        args['max_submission_number'] = 50
        args['max_user_test_number'] = 50
        if data["task_type"] == 'OutputOnly':
            args['max_submission_number'] = 100
            args['max_user_test_number'] = 100

        args['min_submission_interval'] = make_timedelta(60)
        args['min_user_test_interval'] = make_timedelta(60)

        task = Task(**args)

        args = dict()

        args["task"] = task
        args["description"] = "Default"
        args["autojudge"] = True

        if data['task_type'] != 'OutputOnly' and data['task_type'] != 'Notice':
            args["time_limit"] = float(data['time_limit'])
            args["memory_limit"] = int(data['memory_limit'])

        args["managers"] = {}

        # Checker
        checker_dir = os.path.join(self.path, "checker")
        checker_src = os.path.join(checker_dir, "checker.cpp")

        if os.path.exists(checker_src):
            logger.info("Checker found, compiling")
            checker_exe = os.path.join(checker_dir, "checker")
            os.system("g++ -x c++ -std=gnu++14 -O2 -static -o %s %s" %
                      (checker_exe, checker_src))
            digest = self.file_cacher.put_file_from_path(
                checker_exe, "Manager for task %s" % name)
            args["managers"]['checker'] = Manager("checker", digest)
            evaluation_param = "comparator"
        else:
            logger.info("Checker not found, using diff if neccessary")
            evaluation_param = "diff"

        args["task_type"] = data['task_type']
        if data['task_type'] != 'Notice':
            args["task_type"] += '2017'
        args["task_type_parameters"] = \
            self._get_task_type_parameters(data, data['task_type'], evaluation_param)

        # Graders
        graders_dir = os.path.join(self.path, 'graders')

        if data['task_type'] == 'TwoSteps':
            pas_manager = name + 'lib.pas'
            pas_manager_path = os.path.join(graders_dir, pas_manager)
            if not os.path.exists(pas_manager_path):
                digest = self.file_cacher.put_file_content(
                    ''.encode('utf-8'), 'Pascal manager for task %s' % name)
                args["managers"][pas_manager] = Manager(pas_manager, digest)

        if not os.path.exists(graders_dir):
            logger.warning('Grader folder was not found')
            graders_list = []
        else:
            graders_list = \
                [filename for filename in os.listdir(graders_dir) if filename != 'manager.cpp']
        for grader_name in graders_list:
            grader_src = os.path.join(graders_dir, grader_name)
            digest = self.file_cacher.put_file_from_path(
                grader_src, "Manager for task %s" % name)
            args["managers"][grader_name] = Manager(grader_name, digest)

        # Manager
        manager_src = os.path.join(graders_dir, 'manager.cpp')

        if os.path.exists(manager_src):
            logger.info("Manager found, compiling")
            manager_exe = os.path.join(graders_dir, "manager")
            os.system("cat %s | \
                            g++ -x c++ -O2 -static -o %s -" %
                      (manager_src, manager_exe))
            digest = self.file_cacher.put_file_from_path(
                manager_exe, "Manager for task %s" % name)
            args["managers"]["manager"] = Manager("manager", digest)

        # Testcases
        args["testcases"] = {}

        for codename in testcase_codenames:
            infile = os.path.join(testcases_dir, "%s.in" % codename)
            outfile = os.path.join(testcases_dir, "%s.out" % codename)
            if not os.path.exists(outfile):
                logger.critical(
                    'Could not file the output file for testcase %s' %
                    codename)
                logger.critical('Aborting...')
                return

            input_digest = self.file_cacher.put_file_from_path(
                infile, "Input %s for task %s" % (codename, name))
            output_digest = self.file_cacher.put_file_from_path(
                outfile, "Output %s for task %s" % (codename, name))
            testcase = Testcase(codename, True, input_digest, output_digest)
            args["testcases"][codename] = testcase

        # Score Type
        subtasks_dir = os.path.join(self.path, 'subtasks')
        if not os.path.exists(subtasks_dir):
            logger.warning('Subtask folder was not found')
            subtasks = []
        else:
            subtasks = sorted(os.listdir(subtasks_dir))

        if len(subtasks) == 0:
            number_tests = max(len(testcase_codenames), 1)
            args["score_type"] = "Sum"
            args["score_type_parameters"] = str(100 / number_tests)
        else:
            args["score_type"] = "GroupMinWithMaxScore"
            parsed_data = [
                100,
            ]
            subtask_no = -1
            add_optional_name = False
            for subtask in subtasks:
                subtask_no += 1
                with open(os.path.join(subtasks_dir, subtask)) as subtask_json:
                    subtask_data = json.load(subtask_json)
                    score = int(subtask_data["score"])
                    testcases = "|".join(
                        re.escape(testcase)
                        for testcase in subtask_data["testcases"])
                    optional_name = "Subtask %d" % subtask_no
                    if subtask_no == 0 and score == 0:
                        add_optional_name = True
                        optional_name = "Samples"
                    if add_optional_name:
                        parsed_data.append([score, testcases, optional_name])
                    else:
                        parsed_data.append([score, testcases])
            args["score_type_parameters"] = json.dumps(parsed_data)
        args["description"] = datetime.utcnow()\
            .strftime("%Y-%m-%d %H:%M:%S %Z%z")

        dataset = Dataset(**args)
        task.active_dataset = dataset

        os.remove(os.path.join(self.path, ".import_error"))

        logger.info("Task parameters loaded.")

        return task
Пример #11
0
    def get_task(self, name):
        """See docstring in class Loader.

        """
        try:
            num = self.tasks_order[name]

        # Here we expose an undocumented behavior, so that cmsMake can
        # import a task even without the whole contest; this is not to
        # be relied upon in general
        except AttributeError:
            num = 1

        task_path = os.path.join(self.path, "problems", name)

        logger.info("Loading parameters for task %s." % name)

        args = {}

        # Here we update the time of the last import
        touch(os.path.join(task_path, ".itime"))
        # If this file is not deleted, then the import failed
        touch(os.path.join(task_path, ".import_error"))

        args["num"] = num

        # get alphabetical task index for use in title

        index = None
        contest_tree = ET.parse(os.path.join(self.path, "contest.xml"))
        contest_root = contest_tree.getroot()
        for problem in contest_root.find('problems'):
            if os.path.basename(problem.attrib['url']) == name:
                index = problem.attrib['index']

        tree = ET.parse(os.path.join(task_path, "problem.xml"))
        root = tree.getroot()

        args["name"] = name
        if index is not None:
            args["title"] = index.upper() + '. '
        else:
            args["title"] = ''
        args["title"] += root.find('names') \
            .find("name[@language='%s']" % self.primary_language) \
            .attrib['value']

        args["statements"] = []
        args["primary_statements"] = []
        for language in self.languages:
            path = os.path.join(task_path, 'statements',
                                '.pdf', language, 'problem.pdf')
            if os.path.exists(path):
                lang = LANGUAGE_MAP[language]
                digest = self.file_cacher.put_file_from_path(
                    path,
                    "Statement for task %s (lang: %s)" % (name,
                                                          language))
                args["statements"].append(Statement(lang, digest))
                args["primary_statements"].append(lang)
        args["primary_statements"] = '["%s"]' % \
            '","'.join(args["primary_statements"])
        args["submission_format"] = [SubmissionFormatElement("%s.%%l" % name)]

#        args['max_submission_number'] = 100
#        args['max_user_test_number'] = 100
#        args['min_submission_interval'] = make_timedelta(60)
#        args['min_user_test_interval'] = make_timedelta(60)

#        args['max_user_test_number'] = 10
#        args['min_user_test_interval'] = make_timedelta(60)

#        args['token_mode'] = 'infinite'
#        args['token_max_number'] = 100
#        args['token_min_interval'] = make_timedelta(60)
#        args['token_gen_initial'] = 1
#        args['token_gen_number'] = 1
#        args['token_gen_interval'] = make_timedelta(1800)
#        args['token_gen_max'] = 2

        task_cms_conf_path = os.path.join(task_path, 'files')
        task_cms_conf = None
        if os.path.exists(os.path.join(task_cms_conf_path, 'cms_conf.py')):
            sys.path.append(task_cms_conf_path)
            logger.info("Found additional CMS options for task %s." % name)
            task_cms_conf = __import__('cms_conf')
            # TODO: probably should find more clever way to get rid of caching
            task_cms_conf = reload(task_cms_conf)
            sys.path.pop()
        if task_cms_conf is not None and hasattr(task_cms_conf, "general"):
            args.update(task_cms_conf.general)

        task = Task(**args)

        judging = root.find('judging')
        testset = None
        for testset in judging:
            testset_name = testset.attrib["name"]

            args = {}
            args["task"] = task
            args["description"] = testset_name
            args["autojudge"] = False

            tl = float(testset.find('time-limit').text)
            ml = float(testset.find('memory-limit').text)
            args["time_limit"] = tl * 0.001
            args["memory_limit"] = int(ml / (1024 * 1024))

            args["managers"] = []
            infile_param = judging.attrib['input-file']
            outfile_param = judging.attrib['output-file']

            checker_src = os.path.join(task_path, "files", "check.cpp")
            if os.path.exists(checker_src):
                logger.info("Checker found, compiling")
                checker_exe = os.path.join(task_path, "files", "checker")
                os.system("cat %s | \
                    sed 's$testlib.h$/usr/local/include/cms/testlib.h$' | \
                    g++ -x c++ -O2 -static -o %s -" %
                          (checker_src, checker_exe))
                digest = self.file_cacher.put_file_from_path(
                    checker_exe,
                    "Manager for task %s" % name)
                args["managers"] += [
                    Manager("checker", digest)]
                evaluation_param = "comparator"
            else:
                logger.info("Checker not found, using diff")
                evaluation_param = "diff"

            args["task_type"] = "Batch"
            args["task_type_parameters"] = \
                '["%s", ["%s", "%s"], "%s"]' % \
                ("alone", infile_param, outfile_param, evaluation_param)

            args["score_type"] = "Sum"
            total_value = 100.0
            input_value = 0.0

            testcases = int(testset.find('test-count').text)

            n_input = testcases
            if n_input != 0:
                input_value = total_value / n_input
            args["score_type_parameters"] = str(input_value)

            args["testcases"] = []

            for i in xrange(testcases):
                infile = os.path.join(task_path, testset_name,
                                      "%02d" % (i + 1))
                outfile = os.path.join(task_path, testset_name,
                                       "%02d.a" % (i + 1))
                if self.dos2unix_found:
                    os.system('dos2unix -q %s' % (infile, ))
                    os.system('dos2unix -q %s' % (outfile, ))
                input_digest = self.file_cacher.put_file_from_path(
                    infile,
                    "Input %d for task %s" % (i, name))
                output_digest = self.file_cacher.put_file_from_path(
                    outfile,
                    "Output %d for task %s" % (i, name))
                testcase = Testcase("%03d" % (i, ), False,
                                    input_digest, output_digest)
                testcase.public = True
                args["testcases"] += [testcase]

            if task_cms_conf is not None and \
               hasattr(task_cms_conf, "datasets") and \
               testset_name in task_cms_conf.datasets:
                args.update(task_cms_conf.datasets[testset_name])

            dataset = Dataset(**args)
            if testset_name == "tests":
                task.active_dataset = dataset

        os.remove(os.path.join(task_path, ".import_error"))

        logger.info("Task parameters loaded.")
        return task
Пример #12
0
 def get_task(self, get_statement=True):
     # Name
     name = os.path.split(self.path)[1]
     # Check for required files
     if not self.__require_file("problem.json"):
         return None
     # Load JSON
     problem_json = json.loads(
         open(os.path.join(self.path, 'problem.json'), 'r').read())
     problem = problem_json['problem']
     # Load info
     args = {}
     args['name'] = name
     args['title'] = problem['name']
     logger.info("Loading parameters for task %s.", name)
     # Load statement
     if get_statement:
         language = 'ru'
         path = os.path.join(self.path, '..', '..', 'statements',
                             name + '.pdf')
         if os.path.exists(path):
             digest = self.file_cacher.put_file_from_path(
                 path,
                 "Statement for task %s (lang: %s)" % (name, language))
             args['statements'] = [Statement(language, digest)]
             args['primary_statements'] = '["%s"]' % (language)
         else:
             logger.error('No statements found for problem "%s"' % (name))
     # Load other properties
     args['submission_format'] = [SubmissionFormatElement('%s.%%l' % name)]
     self.__load_token_submission_info(os.path.join(self.path, '..', '..'),
                                       args)
     args['score_mode'] = SCORE_MODE_MAX_TOKENED_LAST
     contest_mode = self.__get_contest_mode(
         os.path.join(self.path, '..', '..'))
     if contest_mode != 'running' and contest_mode != 'final':
         logger.critical('Invalid contest mode')
         return None
     task = Task(**args)
     # Load dataset info
     args = {}
     args['task'] = task
     args['description'] = ''
     args['autojudge'] = False
     args['time_limit'] = problem['timeLimit']
     args['memory_limit'] = problem['memoryLimit']
     args['managers'] = []
     # Add checker
     checker_src = os.path.join(self.path, 'checker.cpp')
     checker_exe = os.path.join(self.path, 'checker')
     if os.path.exists(checker_src):
         logger.info("Checker found, compiling")
         os.system("g++ -x c++ -O2 -static -DCMS -o %s %s" %
                   (checker_exe, checker_src))
         digest = self.file_cacher.put_file_from_path(
             checker_exe, "Manager for task %s" % name)
         args['managers'] += [Manager('checker', digest)]
         evaluation_param = 'comparator'
     else:
         logger.info("Checker not found, using diff")
         evaluation_param = 'diff'
     # Add testcases
     args['testcases'] = []
     pretest_cnt = self.__add_tests('pretests', task, args, 0, True,
                                    contest_mode)
     self.__add_tests('tests', task, args, pretest_cnt, False, contest_mode)
     # Add input/output
     infile_param = problem['input']
     outfile_param = problem['output']
     args["task_type"] = "Batch"
     args["task_type_parameters"] = \
         '["%s", ["%s", "%s"], "%s"]' % \
         ("alone", infile_param, outfile_param, evaluation_param)
     if problem['scoreType'] == 'subtask':
         subtasks = problem['subtasks']
         if contest_mode == 'running':
             subtasks = [[1, 1]] * pretest_cnt + subtasks
         else:
             subtasks = [[0, pretest_cnt]] + subtasks
         args['score_type'] = 'GroupMin'
         args['score_type_parameters'] = str(subtasks)
     elif problem['scoreType'] == 'byTest':
         args['score_type'] = 'Sum'
         args['score_type_parameters'] = str(problem['cost'])
     else:
         logger.critical('Unknown scoring type: %s' % problem['scoreType'])
     # Finalize dataset
     dataset = Dataset(**args)
     task.active_dataset = dataset
     # Import was successful
     logger.info("Task parameters loaded.")
     return task
Пример #13
0
    def post(self):
        fallback_page = "/tasks/add"

        try:
            attrs = dict()

            self.get_string(attrs, "name", empty=None)
            self.get_string(attrs, "title")

            assert attrs.get("name") is not None, "No task name specified."

            self.get_string(attrs, "primary_statements")

            self.get_submission_format(attrs)

            self.get_string(attrs, "token_mode")
            self.get_int(attrs, "token_max_number")
            self.get_timedelta_sec(attrs, "token_min_interval")
            self.get_int(attrs, "token_gen_initial")
            self.get_int(attrs, "token_gen_number")
            self.get_timedelta_min(attrs, "token_gen_interval")
            self.get_int(attrs, "token_gen_max")

            self.get_int(attrs, "max_submission_number")
            self.get_int(attrs, "max_user_test_number")
            self.get_timedelta_sec(attrs, "min_submission_interval")
            self.get_timedelta_sec(attrs, "min_user_test_interval")

            self.get_int(attrs, "score_precision")

            self.get_string(attrs, "score_mode")

            # Create the task.
            task = Task(**attrs)
            self.sql_session.add(task)

        except Exception as error:
            self.application.service.add_notification(
                make_datetime(), "Invalid field(s)", repr(error))
            self.redirect(fallback_page)
            return

        try:
            attrs = dict()

            self.get_time_limit(attrs, "time_limit")
            self.get_memory_limit(attrs, "memory_limit")
            self.get_task_type(attrs, "task_type", "TaskTypeOptions_")
            self.get_score_type(attrs, "score_type", "score_type_parameters")

            # Create its first dataset.
            attrs["description"] = "Default"
            attrs["autojudge"] = True
            attrs["task"] = task
            dataset = Dataset(**attrs)
            self.sql_session.add(dataset)

            # Make the dataset active. Life works better that way.
            task.active_dataset = dataset

        except Exception as error:
            self.application.service.add_notification(
                make_datetime(), "Invalid field(s)", repr(error))
            self.redirect(fallback_page)
            return

        if self.try_commit():
            # Create the task on RWS.
            self.application.service.proxy_service.reinitialize()
            self.redirect("/task/%s" % task.id)
        else:
            self.redirect(fallback_page)
Пример #14
0
    def get_task(self, get_statement=True):
        """See docstring in class Loader.

        """

        json_src = os.path.join(self.path, 'problem.json')
        if not os.path.exists(json_src):
            logger.critical('No task found.')
            raise IOError('No task found at path %s' % json_src)
        with io.open(json_src, 'rt', encoding='utf-8') as json_file:
            data = json.load(json_file)

        name = data['name']
        logger.info("Loading parameters for task %s.", name)

        args = {}

        args["name"] = name
        if 'problem_label' in data:
            args['title'] = '{}. {}'.format(data['problem_label'],
                                            data['title'])
        else:
            args['title'] = data['title']

        # Statements
        if get_statement:
            statements_dir = os.path.join(self.path, 'statement')
            if os.path.exists(statements_dir):
                statements = [
                    filename for filename in os.listdir(statements_dir)
                    if filename[-4:] == ".pdf"
                ]
                if len(statements) > 0:
                    args['statements'] = dict()
                    logger.info('Statements found')
                for statement in statements:
                    language = statement[:-4]
                    if language == "en_US":
                        args["primary_statements"] = ["en_US"]
                    digest = self.file_cacher.put_file_from_path(
                        os.path.join(statements_dir, statement),
                        "Statement for task %s (lang: %s)" % (name, language))
                    args['statements'][language] = Statement(language, digest)

        # Attachments
        if get_statement:
            args["attachments"] = dict()
            attachments_dir = os.path.join(self.path, 'attachments')
            if os.path.exists(attachments_dir):
                logger.info("Attachments found")
                for filename in os.listdir(attachments_dir):
                    digest = self.file_cacher.put_file_from_path(
                        os.path.join(attachments_dir, filename),
                        "Attachment %s for task %s" % (filename, name))
                    args["attachments"][filename] = Attachment(
                        filename, digest)

        data["task_type"] = \
            data["task_type"][0].upper() + data["task_type"][1:]

        # Setting the submission format
        # Obtaining testcases' codename
        testcases_dir = os.path.join(self.path, 'tests')
        if not os.path.exists(testcases_dir):
            logger.warning('Testcase folder was not found')
            testcase_codenames = []
        else:
            testcase_codenames = sorted([
                filename[:-3] for filename in os.listdir(testcases_dir)
                if filename[-3:] == '.in'
            ])
        if data["task_type"] == 'OutputOnly':
            args["submission_format"] = list()
            for codename in testcase_codenames:
                args["submission_format"].append("%s.out" % codename)
        elif data["task_type"] == 'Notice':
            args["submission_format"] = list()
        else:
            args["submission_format"] = ["%s.%%l" % name]

        # Task information
        if 'feedback_level' in data:
            args['feedback_level'] = data['feedback_level']

        # Tokens parameters

        # Limits
        if 'max_submission_number' in data:
            args['max_submission_number'] = data['max_submission_number']
        if 'max_user_test_number' in data:
            args['max_user_test_number'] = data['max_user_test_number']
        if 'min_submission_interval' in data:
            if data['min_submission_interval'] is None:
                args['min_submission_interval'] = None
            else:
                args['min_submission_interval'] = make_timedelta(
                    data['min_submission_interval'])
        if 'min_user_test_interval' in data:
            if data['min_user_test_interval'] is None:
                args['min_user_test_interval'] = None
            else:
                args['min_user_test_interval'] = make_timedelta(
                    data['min_user_test_interval'])

        # Score options
        if 'score_precision' in data:
            args['score_precision'] = int(data['score_precision'])
        if 'score_mode' in data:
            args['score_mode'] = data['score_mode']

        task = Task(**args)

        ignore_datasets = data[
            'ignore_datasets'] if 'ignore_datasets' in data else False

        if ignore_datasets:
            logger.info("Task parameters loaded.")
            logger.info("Dataset loading skipped.")

            return task

        args = dict()

        args["task"] = task
        args["description"] = "Default"
        args["autojudge"] = True

        if data['task_type'] != 'OutputOnly' \
                and data['task_type'] != 'Notice':
            args["time_limit"] = float(data['time_limit'])
            args["memory_limit"] = int(data['memory_limit'])

        args["managers"] = {}

        # Checker
        checker_dir = os.path.join(self.path, "checker")
        checker_src = os.path.join(checker_dir, "checker.cpp")
        checker_py = os.path.join(checker_dir, "checker.py")

        has_checker = data['has_checker'] if 'has_checker' in data else False

        if not has_checker:
            logger.info("Checker is ignored, using diff if necessary")
            evaluation_param = "diff"
        elif os.path.exists(checker_src):
            logger.info("Checker found, compiling")
            checker_exe = os.path.join(checker_dir, "checker")
            subprocess.call([
                "g++", "-x", "c++", "-std=gnu++14", "-O2", "-static", "-o",
                checker_exe, checker_src
            ])
            digest = self.file_cacher.put_file_from_path(
                checker_exe, "Manager for task %s" % name)
            args["managers"]['checker'] = Manager("checker", digest)
            evaluation_param = "comparator"
        elif os.path.exists(checker_py):
            logger.info("Checker found, compiling")
            checker_exe = os.path.join(checker_dir, "checker")
            subprocess.call(["cp", checker_py, checker_exe])
            subprocess.call(["chmod", "+x", checker_exe])
            digest = self.file_cacher.put_file_from_path(
                checker_exe, "Manager for task %s" % name)
            args["managers"]['checker'] = Manager("checker", digest)
            evaluation_param = "comparator"
        else:
            logger.info("Checker not found, using diff if necessary")
            evaluation_param = "diff"

        # Note that the original TPS worked with custom task type Batch2017
        # and Communication2017 instead of Batch and Communication.
        args["task_type"] = data['task_type']
        args["task_type_parameters"] = \
            self._get_task_type_parameters(
                data, data['task_type'], evaluation_param)

        # Graders
        graders_dir = os.path.join(self.path, 'graders')

        if data['task_type'] == 'TwoSteps':
            pas_manager = name + 'lib.pas'
            pas_manager_path = os.path.join(graders_dir, pas_manager)
            if not os.path.exists(pas_manager_path):
                digest = self.file_cacher.put_file_content(
                    ''.encode('utf-8'), 'Pascal manager for task %s' % name)
                args["managers"][pas_manager] = Manager(pas_manager, digest)

        if not os.path.exists(graders_dir):
            logger.warning('Grader folder was not found')
            graders_list = []
        else:
            graders_list = \
                [filename
                 for filename in os.listdir(graders_dir)
                 if filename != 'manager.cpp']
        for grader_name in graders_list:
            grader_src = os.path.join(graders_dir, grader_name)
            digest = self.file_cacher.put_file_from_path(
                grader_src, "Manager for task %s" % name)
            if data['task_type'] == 'Communication' \
                    and os.path.splitext(grader_name)[0] == 'grader':
                grader_name = 'stub' + os.path.splitext(grader_name)[1]
            args["managers"][grader_name] = Manager(grader_name, digest)

        # Manager
        manager_src = os.path.join(graders_dir, 'manager.cpp')

        if os.path.exists(manager_src):
            logger.info("Manager found, compiling")
            manager_exe = os.path.join(graders_dir, "manager")
            subprocess.call([
                "g++", "-x", "c++", "-O2", "-static", "-o", manager_exe,
                manager_src
            ])
            digest = self.file_cacher.put_file_from_path(
                manager_exe, "Manager for task %s" % name)
            args["managers"]["manager"] = Manager("manager", digest)

        # Testcases
        args["testcases"] = {}

        for codename in testcase_codenames:
            infile = os.path.join(testcases_dir, "%s.in" % codename)
            outfile = os.path.join(testcases_dir, "%s.out" % codename)
            if not os.path.exists(outfile):
                logger.critical(
                    'Could not find the output file for testcase %s', codename)
                logger.critical('Aborting...')
                return

            input_digest = self.file_cacher.put_file_from_path(
                infile, "Input %s for task %s" % (codename, name))
            output_digest = self.file_cacher.put_file_from_path(
                outfile, "Output %s for task %s" % (codename, name))
            testcase = Testcase(codename, True, input_digest, output_digest)
            args["testcases"][codename] = testcase

        # Score Type
        subtasks_json_src = os.path.join(self.path, 'subtasks.json')
        if not os.path.exists(subtasks_json_src):
            number_tests = max(len(testcase_codenames), 1)
            args["score_type"] = "Sum"
            args["score_type_parameters"] = 100 / number_tests
        else:
            args["score_type"] = "GroupMin"
            parsed_data = []
            subtask_no = -1
            mapping_src = os.path.join(self.path, 'tests', 'mapping')
            with open(subtasks_json_src, 'rt', encoding='utf-8') as json_file:
                subtasks_data = json.load(json_file)

            use_mapping = os.path.exists(mapping_src)
            if use_mapping:
                mapping_data = {}
                for subtask in subtasks_data['subtasks']:
                    mapping_data[subtask] = []
                with open(mapping_src, 'rt', encoding='utf-8') as mapping_file:
                    for row in mapping_file:
                        row = row.strip().split(' ')
                        if len(row) == 2:
                            mapping_data[row[0]].append(row[1])

            add_optional_name = data[
                'add_optional_name'] if 'add_optional_name' in data else False

            for subtask, subtask_data in subtasks_data['subtasks'].items():
                subtask_no += 1
                score = int(subtask_data["score"])
                if use_mapping:
                    testcases = "|".join(
                        re.escape(testcase)
                        for testcase in mapping_data[subtask])
                    if testcases == '':
                        testcases = '|NO_TESTCASES_AVAILABLE'
                else:
                    testcases = subtask_data["regex"]
                optional_name = "Subtask %d" % subtask_no
                if subtask_no == 0 and score == 0:
                    optional_name = "Samples"
                if add_optional_name:
                    parsed_data.append([score, testcases, optional_name])
                else:
                    parsed_data.append([score, testcases])
            args["score_type_parameters"] = parsed_data

        dataset = Dataset(**args)
        task.active_dataset = dataset

        logger.info("Task parameters loaded.")

        return task
Пример #15
0
    def get_task(self, get_statement=True):
        """See docstring in class TaskLoader."""
        name = os.path.split(self.path)[1]

        if (not os.path.exists(os.path.join(self.path, "task.yaml"))) and \
           (not os.path.exists(os.path.join(self.path, "..", name + ".yaml"))):
            logger.critical("File missing: \"task.yaml\"")
            return None

        # We first look for the yaml file inside the task folder,
        # and eventually fallback to a yaml file in its parent folder.
        try:
            conf = load_yaml_from_path(os.path.join(self.path, "task.yaml"))
        except OSError as err:
            try:
                deprecated_path = os.path.join(self.path, "..", name + ".yaml")
                conf = load_yaml_from_path(deprecated_path)

                logger.warning("You're using a deprecated location for the "
                               "task.yaml file. You're advised to move %s to "
                               "%s.", deprecated_path,
                               os.path.join(self.path, "task.yaml"))
            except OSError:
                # Since both task.yaml and the (deprecated) "../taskname.yaml"
                # are missing, we will only warn the user that task.yaml is
                # missing (to avoid encouraging the use of the deprecated one)
                raise err

        # Here we update the time of the last import
        touch(os.path.join(self.path, ".itime"))
        # If this file is not deleted, then the import failed
        touch(os.path.join(self.path, ".import_error"))

        args = {}

        load(conf, args, ["name", "nome_breve"])
        load(conf, args, ["title", "nome"])

        if name != args["name"]:
            logger.info("The task name (%s) and the directory name (%s) are "
                        "different. The former will be used.", args["name"],
                        name)

        if args["name"] == args["title"]:
            logger.warning("Short name equals long name (title). "
                           "Please check.")

        name = args["name"]

        logger.info("Loading parameters for task %s.", name)

        if get_statement:
            primary_language = load(conf, None, "primary_language")
            if primary_language is None:
                primary_language = 'it'
            paths = [os.path.join(self.path, "statement", "statement.pdf"),
                     os.path.join(self.path, "testo", "testo.pdf")]
            for path in paths:
                if os.path.exists(path):
                    digest = self.file_cacher.put_file_from_path(
                        path,
                        "Statement for task %s (lang: %s)" %
                        (name, primary_language))
                    break
            else:
                logger.critical("Couldn't find any task statement, aborting.")
                sys.exit(1)
            args["statements"] = {
                primary_language: Statement(primary_language, digest)
            }

            args["primary_statements"] = [primary_language]

        args["submission_format"] = ["%s.%%l" % name]

        # Import the feedback level when explicitly set to full
        # (default behaviour is restricted)
        if conf.get("feedback_level", None) == FEEDBACK_LEVEL_FULL:
            args["feedback_level"] = FEEDBACK_LEVEL_FULL
        elif conf.get("feedback_level", None) == FEEDBACK_LEVEL_RESTRICTED:
            args["feedback_level"] = FEEDBACK_LEVEL_RESTRICTED

        if conf.get("score_mode", None) == SCORE_MODE_MAX:
            args["score_mode"] = SCORE_MODE_MAX
        elif conf.get("score_mode", None) == SCORE_MODE_MAX_SUBTASK:
            args["score_mode"] = SCORE_MODE_MAX_SUBTASK
        elif conf.get("score_mode", None) == SCORE_MODE_MAX_TOKENED_LAST:
            args["score_mode"] = SCORE_MODE_MAX_TOKENED_LAST

        # Use the new token settings format if detected.
        if "token_mode" in conf:
            load(conf, args, "token_mode")
            load(conf, args, "token_max_number")
            load(conf, args, "token_min_interval", conv=make_timedelta)
            load(conf, args, "token_gen_initial")
            load(conf, args, "token_gen_number")
            load(conf, args, "token_gen_interval", conv=make_timedelta)
            load(conf, args, "token_gen_max")
        # Otherwise fall back on the old one.
        else:
            logger.warning(
                "task.yaml uses a deprecated format for token settings which "
                "will soon stop being supported, you're advised to update it.")
            # Determine the mode.
            if conf.get("token_initial", None) is None:
                args["token_mode"] = TOKEN_MODE_DISABLED
            elif conf.get("token_gen_number", 0) > 0 and \
                    conf.get("token_gen_time", 0) == 0:
                args["token_mode"] = TOKEN_MODE_INFINITE
            else:
                args["token_mode"] = TOKEN_MODE_FINITE
            # Set the old default values.
            args["token_gen_initial"] = 0
            args["token_gen_number"] = 0
            args["token_gen_interval"] = timedelta()
            # Copy the parameters to their new names.
            load(conf, args, "token_total", "token_max_number")
            load(conf, args, "token_min_interval", conv=make_timedelta)
            load(conf, args, "token_initial", "token_gen_initial")
            load(conf, args, "token_gen_number")
            load(conf, args, "token_gen_time", "token_gen_interval",
                 conv=make_timedelta)
            load(conf, args, "token_max", "token_gen_max")
            # Remove some corner cases.
            if args["token_gen_initial"] is None:
                args["token_gen_initial"] = 0
            if args["token_gen_interval"].total_seconds() == 0:
                args["token_gen_interval"] = timedelta(minutes=1)

        load(conf, args, "max_submission_number")
        load(conf, args, "max_user_test_number")
        load(conf, args, "min_submission_interval", conv=make_timedelta)
        load(conf, args, "min_user_test_interval", conv=make_timedelta)

        # Attachments
        args["attachments"] = dict()
        if os.path.exists(os.path.join(self.path, "att")):
            for filename in os.listdir(os.path.join(self.path, "att")):
                digest = self.file_cacher.put_file_from_path(
                    os.path.join(self.path, "att", filename),
                    "Attachment %s for task %s" % (filename, name))
                args["attachments"][filename] = Attachment(filename, digest)

        task = Task(**args)

        args = {}
        args["task"] = task
        args["description"] = conf.get("version", "Default")
        args["autojudge"] = False

        load(conf, args, ["time_limit", "timeout"], conv=float)
        # The Italian YAML format specifies memory limits in MiB.
        load(conf, args, ["memory_limit", "memlimit"],
             conv=lambda mb: mb * 1024 * 1024)

        # Builds the parameters that depend on the task type
        args["managers"] = []
        infile_param = conf.get("infile", "input.txt")
        outfile_param = conf.get("outfile", "output.txt")

        # If there is sol/grader.%l for some language %l, then,
        # presuming that the task type is Batch, we retrieve graders
        # in the form sol/grader.%l
        graders = False
        for lang in LANGUAGES:
            if os.path.exists(os.path.join(
                    self.path, "sol", "grader%s" % lang.source_extension)):
                graders = True
                break
        if graders:
            # Read grader for each language
            for lang in LANGUAGES:
                extension = lang.source_extension
                grader_filename = os.path.join(
                    self.path, "sol", "grader%s" % extension)
                if os.path.exists(grader_filename):
                    digest = self.file_cacher.put_file_from_path(
                        grader_filename,
                        "Grader for task %s and language %s" %
                        (task.name, lang))
                    args["managers"] += [
                        Manager("grader%s" % extension, digest)]
                else:
                    logger.warning("Grader for language %s not found ", lang)
            # Read managers with other known file extensions
            for other_filename in os.listdir(os.path.join(self.path, "sol")):
                if any(other_filename.endswith(header)
                       for header in HEADER_EXTS):
                    digest = self.file_cacher.put_file_from_path(
                        os.path.join(self.path, "sol", other_filename),
                        "Manager %s for task %s" % (other_filename, task.name))
                    args["managers"] += [
                        Manager(other_filename, digest)]
            compilation_param = "grader"
        else:
            compilation_param = "alone"

        # If there is check/checker (or equivalent), then, presuming
        # that the task type is Batch or OutputOnly, we retrieve the
        # comparator
        paths = [os.path.join(self.path, "check", "checker"),
                 os.path.join(self.path, "cor", "correttore")]
        for path in paths:
            if os.path.exists(path):
                digest = self.file_cacher.put_file_from_path(
                    path,
                    "Manager for task %s" % task.name)
                args["managers"] += [
                    Manager("checker", digest)]
                evaluation_param = "comparator"
                break
        else:
            evaluation_param = "diff"

        # Detect subtasks by checking GEN
        gen_filename = os.path.join(self.path, 'gen', 'GEN')
        try:
            with open(gen_filename, "rt", encoding="utf-8") as gen_file:
                subtasks = []
                testcases = 0
                points = None
                for line in gen_file:
                    line = line.strip()
                    splitted = line.split('#', 1)

                    if len(splitted) == 1:
                        # This line represents a testcase, otherwise
                        # it's just a blank
                        if splitted[0] != '':
                            testcases += 1

                    else:
                        testcase, comment = splitted
                        testcase = testcase.strip()
                        comment = comment.strip()
                        testcase_detected = len(testcase) > 0
                        copy_testcase_detected = comment.startswith("COPY:")
                        subtask_detected = comment.startswith('ST:')

                        flags = [testcase_detected,
                                 copy_testcase_detected,
                                 subtask_detected]
                        if len([x for x in flags if x]) > 1:
                            raise Exception("No testcase and command in"
                                            " the same line allowed")

                        # This line represents a testcase and contains a
                        # comment, but the comment doesn't start a new
                        # subtask
                        if testcase_detected or copy_testcase_detected:
                            testcases += 1

                        # This line starts a new subtask
                        if subtask_detected:
                            # Close the previous subtask
                            if points is None:
                                assert(testcases == 0)
                            else:
                                subtasks.append([points, testcases])
                            # Open the new one
                            testcases = 0
                            points = int(comment[3:].strip())

                # Close last subtask (if no subtasks were defined, just
                # fallback to Sum)
                if points is None:
                    args["score_type"] = "Sum"
                    total_value = float(conf.get("total_value", 100.0))
                    input_value = 0.0
                    n_input = testcases
                    if n_input != 0:
                        input_value = total_value / n_input
                    args["score_type_parameters"] = input_value
                else:
                    subtasks.append([points, testcases])
                    assert(100 == sum([int(st[0]) for st in subtasks]))
                    n_input = sum([int(st[1]) for st in subtasks])
                    args["score_type"] = "GroupMin"
                    args["score_type_parameters"] = subtasks

                if "n_input" in conf:
                    assert int(conf['n_input']) == n_input

        # If gen/GEN doesn't exist, just fallback to Sum
        except OSError:
            args["score_type"] = "Sum"
            total_value = float(conf.get("total_value", 100.0))
            input_value = 0.0
            n_input = int(conf['n_input'])
            if n_input != 0:
                input_value = total_value / n_input
            args["score_type_parameters"] = input_value

        # Override score_type if explicitly specified
        if "score_type" in conf and "score_type_parameters" in conf:
            logger.info("Overriding 'score_type' and 'score_type_parameters' "
                        "as per task.yaml")
            load(conf, args, "score_type")
            load(conf, args, "score_type_parameters")
        elif "score_type" in conf or "score_type_parameters" in conf:
            logger.warning("To override score type data, task.yaml must "
                           "specify both 'score_type' and "
                           "'score_type_parameters'.")

        # If output_only is set, then the task type is OutputOnly
        if conf.get('output_only', False):
            args["task_type"] = "OutputOnly"
            args["time_limit"] = None
            args["memory_limit"] = None
            args["task_type_parameters"] = [evaluation_param]
            task.submission_format = \
                ["output_%03d.txt" % i for i in range(n_input)]

        # If there is check/manager (or equivalent), then the task
        # type is Communication
        else:
            paths = [os.path.join(self.path, "check", "manager"),
                     os.path.join(self.path, "cor", "manager")]
            for path in paths:
                if os.path.exists(path):
                    num_processes = load(conf, None, "num_processes")
                    if num_processes is None:
                        num_processes = 1
                    logger.info("Task type Communication")
                    args["task_type"] = "Communication"
                    args["task_type_parameters"] = \
                        [num_processes, "stub", "fifo_io"]
                    digest = self.file_cacher.put_file_from_path(
                        path,
                        "Manager for task %s" % task.name)
                    args["managers"] += [
                        Manager("manager", digest)]
                    for lang in LANGUAGES:
                        stub_name = os.path.join(
                            self.path, "sol", "stub%s" % lang.source_extension)
                        if os.path.exists(stub_name):
                            digest = self.file_cacher.put_file_from_path(
                                stub_name,
                                "Stub for task %s and language %s" % (
                                    task.name, lang.name))
                            args["managers"] += [
                                Manager(
                                    "stub%s" % lang.source_extension, digest)]
                        else:
                            logger.warning("Stub for language %s not "
                                           "found.", lang.name)
                    for other_filename in os.listdir(os.path.join(self.path,
                                                                  "sol")):
                        if any(other_filename.endswith(header)
                               for header in HEADER_EXTS):
                            digest = self.file_cacher.put_file_from_path(
                                os.path.join(self.path, "sol", other_filename),
                                "Stub %s for task %s" % (other_filename,
                                                         task.name))
                            args["managers"] += [
                                Manager(other_filename, digest)]
                    break

            # Otherwise, the task type is Batch
            else:
                args["task_type"] = "Batch"
                args["task_type_parameters"] = \
                    [compilation_param, [infile_param, outfile_param],
                     evaluation_param]

        args["testcases"] = []
        for i in range(n_input):
            input_digest = self.file_cacher.put_file_from_path(
                os.path.join(self.path, "input", "input%d.txt" % i),
                "Input %d for task %s" % (i, task.name))
            output_digest = self.file_cacher.put_file_from_path(
                os.path.join(self.path, "output", "output%d.txt" % i),
                "Output %d for task %s" % (i, task.name))
            args["testcases"] += [
                Testcase("%03d" % i, False, input_digest, output_digest)]
            if args["task_type"] == "OutputOnly":
                task.attachments.set(
                    Attachment("input_%03d.txt" % i, input_digest))
        public_testcases = load(conf, None, ["public_testcases", "risultati"],
                                conv=lambda x: "" if x is None else x)
        if public_testcases == "all":
            for t in args["testcases"]:
                t.public = True
        elif len(public_testcases) > 0:
            for x in public_testcases.split(","):
                args["testcases"][int(x.strip())].public = True
        args["testcases"] = dict((tc.codename, tc) for tc in args["testcases"])
        args["managers"] = dict((mg.filename, mg) for mg in args["managers"])

        dataset = Dataset(**args)
        task.active_dataset = dataset

        # Import was successful
        os.remove(os.path.join(self.path, ".import_error"))

        logger.info("Task parameters loaded.")

        return task
Пример #16
0
    def get_task(self, name):
        """See docstring in class Loader.

        """
        try:
            num = self.tasks_order[name]

        # Here we expose an undocumented behavior, so that cmsMake can
        # import a task even without the whole contest; this is not to
        # be relied upon in general.
        except AttributeError:
            num = 1

        task_path = os.path.join(self.path, "problems", name)

        logger.info("Loading parameters for task %s.", name)

        args = {}

        # Here we update the time of the last import.
        touch(os.path.join(task_path, ".itime"))
        # If this file is not deleted, then the import failed.
        touch(os.path.join(task_path, ".import_error"))

        args["num"] = num

        # Get alphabetical task index for use in title.

        index = None
        contest_tree = ET.parse(os.path.join(self.path, "contest.xml"))
        contest_root = contest_tree.getroot()
        for problem in contest_root.find('problems'):
            if os.path.basename(problem.attrib['url']) == name:
                index = problem.attrib['index']

        tree = ET.parse(os.path.join(task_path, "problem.xml"))
        root = tree.getroot()

        args["name"] = name
        if index is not None:
            args["title"] = index.upper() + '. '
        else:
            args["title"] = ''
        args["title"] += root.find('names') \
            .find("name[@language='%s']" % self.primary_language) \
            .attrib['value']

        args["statements"] = []
        args["primary_statements"] = []
        for language in self.languages:
            path = os.path.join(task_path, 'statements', '.pdf', language,
                                'problem.pdf')
            if os.path.exists(path):
                lang = LANGUAGE_MAP[language]
                digest = self.file_cacher.put_file_from_path(
                    path,
                    "Statement for task %s (lang: %s)" % (name, language))
                args["statements"].append(Statement(lang, digest))
                args["primary_statements"].append(lang)
        args["primary_statements"] = '["%s"]' % \
            '","'.join(args["primary_statements"])
        args["submission_format"] = [SubmissionFormatElement("%s.%%l" % name)]

        # These options cannot be configured in the Polygon format.
        # Uncomment the following to set specific values for them.

        # args['max_submission_number'] = 100
        # args['max_user_test_number'] = 100
        # args['min_submission_interval'] = make_timedelta(60)
        # args['min_user_test_interval'] = make_timedelta(60)

        # args['max_user_test_number'] = 10
        # args['min_user_test_interval'] = make_timedelta(60)

        # args['token_mode'] = 'infinite'
        # args['token_max_number'] = 100
        # args['token_min_interval'] = make_timedelta(60)
        # args['token_gen_initial'] = 1
        # args['token_gen_number'] = 1
        # args['token_gen_interval'] = make_timedelta(1800)
        # args['token_gen_max'] = 2

        task_cms_conf_path = os.path.join(task_path, 'files')
        task_cms_conf = None
        if os.path.exists(os.path.join(task_cms_conf_path, 'cms_conf.py')):
            sys.path.append(task_cms_conf_path)
            logger.info("Found additional CMS options for task %s.", name)
            task_cms_conf = __import__('cms_conf')
            # TODO: probably should find more clever way to get rid of caching
            task_cms_conf = reload(task_cms_conf)
            sys.path.pop()
        if task_cms_conf is not None and hasattr(task_cms_conf, "general"):
            args.update(task_cms_conf.general)

        task = Task(**args)

        judging = root.find('judging')
        testset = None
        for testset in judging:
            testset_name = testset.attrib["name"]

            args = {}
            args["task"] = task
            args["description"] = testset_name
            args["autojudge"] = False

            tl = float(testset.find('time-limit').text)
            ml = float(testset.find('memory-limit').text)
            args["time_limit"] = tl * 0.001
            args["memory_limit"] = int(ml / (1024 * 1024))

            args["managers"] = []
            infile_param = judging.attrib['input-file']
            outfile_param = judging.attrib['output-file']

            checker_src = os.path.join(task_path, "files", "check.cpp")
            if os.path.exists(checker_src):
                logger.info("Checker found, compiling")
                checker_exe = os.path.join(task_path, "files", "checker")
                testlib_path = "/usr/local/include/cms/testlib.h"
                if not config.installed:
                    testlib_path = os.path.join(os.path.dirname(__file__),
                                                "polygon", "testlib.h")
                os.system("cat %s | \
                    sed 's$testlib.h$%s$' | \
                    g++ -x c++ -O2 -static -o %s -" %
                          (checker_src, testlib_path, checker_exe))
                digest = self.file_cacher.put_file_from_path(
                    checker_exe, "Manager for task %s" % name)
                args["managers"] += [Manager("checker", digest)]
                evaluation_param = "comparator"
            else:
                logger.info("Checker not found, using diff")
                evaluation_param = "diff"

            args["task_type"] = "Batch"
            args["task_type_parameters"] = \
                '["%s", ["%s", "%s"], "%s"]' % \
                ("alone", infile_param, outfile_param, evaluation_param)

            args["score_type"] = "Sum"
            total_value = 100.0
            input_value = 0.0

            testcases = int(testset.find('test-count').text)

            n_input = testcases
            if n_input != 0:
                input_value = total_value / n_input
            args["score_type_parameters"] = str(input_value)

            args["testcases"] = []

            for i in xrange(testcases):
                infile = os.path.join(task_path, testset_name,
                                      "%02d" % (i + 1))
                outfile = os.path.join(task_path, testset_name,
                                       "%02d.a" % (i + 1))
                if self.dos2unix_found:
                    os.system('dos2unix -q %s' % (infile, ))
                    os.system('dos2unix -q %s' % (outfile, ))
                input_digest = self.file_cacher.put_file_from_path(
                    infile, "Input %d for task %s" % (i, name))
                output_digest = self.file_cacher.put_file_from_path(
                    outfile, "Output %d for task %s" % (i, name))
                testcase = Testcase("%03d" % (i, ), False, input_digest,
                                    output_digest)
                testcase.public = True
                args["testcases"] += [testcase]

            if task_cms_conf is not None and \
               hasattr(task_cms_conf, "datasets") and \
               testset_name in task_cms_conf.datasets:
                args.update(task_cms_conf.datasets[testset_name])

            dataset = Dataset(**args)
            if testset_name == "tests":
                task.active_dataset = dataset

        os.remove(os.path.join(task_path, ".import_error"))

        logger.info("Task parameters loaded.")
        return task
Пример #17
0
    def get_task(self, get_statement=True):
        """See docstring in class TaskLoader."""
        name = os.path.split(self.path)[1]

        if (not os.path.exists(os.path.join(self.path, "task.yaml"))) and \
           (not os.path.exists(os.path.join(self.path, "..", name + ".yaml"))):
            logger.critical("File missing: \"task.yaml\"")
            return None

        # We first look for the yaml file inside the task folder,
        # and eventually fallback to a yaml file in its parent folder.
        try:
            conf = yaml.safe_load(
                io.open(os.path.join(self.path, "task.yaml"),
                        "rt", encoding="utf-8"))
        except IOError as err:
            try:
                deprecated_path = os.path.join(self.path, "..", name + ".yaml")
                conf = yaml.safe_load(io.open(deprecated_path, "rt",
                                              encoding="utf-8"))

                logger.warning("You're using a deprecated location for the "
                               "task.yaml file. You're advised to move %s to "
                               "%s.", deprecated_path,
                               os.path.join(self.path, "task.yaml"))
            except IOError:
                # Since both task.yaml and the (deprecated) "../taskname.yaml"
                # are missing, we will only warn the user that task.yaml is
                # missing (to avoid encouraging the use of the deprecated one)
                raise err

        # Here we update the time of the last import
        touch(os.path.join(self.path, ".itime"))
        # If this file is not deleted, then the import failed
        touch(os.path.join(self.path, ".import_error"))

        args = {}

        load(conf, args, ["name", "nome_breve"])
        load(conf, args, ["title", "nome"])

        if name != args["name"]:
            logger.info("The task name (%s) and the directory name (%s) are "
                        "different. The former will be used.", args["name"],
                        name)

        if args["name"] == args["title"]:
            logger.warning("Short name equals long name (title). "
                           "Please check.")

        name = args["name"]

        logger.info("Loading parameters for task %s.", name)

        if get_statement:
            primary_language = load(conf, None, "primary_language")
            if primary_language is None:
                primary_language = 'it'
            paths = [os.path.join(self.path, "statement", "statement.pdf"),
                     os.path.join(self.path, "testo", "testo.pdf")]
            for path in paths:
                if os.path.exists(path):
                    digest = self.file_cacher.put_file_from_path(
                        path,
                        "Statement for task %s (lang: %s)" %
                        (name, primary_language))
                    break
            else:
                logger.critical("Couldn't find any task statement, aborting.")
                sys.exit(1)
            args["statements"] = {
                primary_language: Statement(primary_language, digest)
            }

            args["primary_statements"] = [primary_language]

        args["submission_format"] = ["%s.%%l" % name]

        if conf.get("score_mode", None) == SCORE_MODE_MAX:
            args["score_mode"] = SCORE_MODE_MAX
        elif conf.get("score_mode", None) == SCORE_MODE_MAX_TOKENED_LAST:
            args["score_mode"] = SCORE_MODE_MAX_TOKENED_LAST

        # Use the new token settings format if detected.
        if "token_mode" in conf:
            load(conf, args, "token_mode")
            load(conf, args, "token_max_number")
            load(conf, args, "token_min_interval", conv=make_timedelta)
            load(conf, args, "token_gen_initial")
            load(conf, args, "token_gen_number")
            load(conf, args, "token_gen_interval", conv=make_timedelta)
            load(conf, args, "token_gen_max")
        # Otherwise fall back on the old one.
        else:
            logger.warning(
                "task.yaml uses a deprecated format for token settings which "
                "will soon stop being supported, you're advised to update it.")
            # Determine the mode.
            if conf.get("token_initial", None) is None:
                args["token_mode"] = TOKEN_MODE_DISABLED
            elif conf.get("token_gen_number", 0) > 0 and \
                    conf.get("token_gen_time", 0) == 0:
                args["token_mode"] = TOKEN_MODE_INFINITE
            else:
                args["token_mode"] = TOKEN_MODE_FINITE
            # Set the old default values.
            args["token_gen_initial"] = 0
            args["token_gen_number"] = 0
            args["token_gen_interval"] = timedelta()
            # Copy the parameters to their new names.
            load(conf, args, "token_total", "token_max_number")
            load(conf, args, "token_min_interval", conv=make_timedelta)
            load(conf, args, "token_initial", "token_gen_initial")
            load(conf, args, "token_gen_number")
            load(conf, args, "token_gen_time", "token_gen_interval",
                 conv=make_timedelta)
            load(conf, args, "token_max", "token_gen_max")
            # Remove some corner cases.
            if args["token_gen_initial"] is None:
                args["token_gen_initial"] = 0
            if args["token_gen_interval"].total_seconds() == 0:
                args["token_gen_interval"] = timedelta(minutes=1)

        load(conf, args, "max_submission_number")
        load(conf, args, "max_user_test_number")
        load(conf, args, "min_submission_interval", conv=make_timedelta)
        load(conf, args, "min_user_test_interval", conv=make_timedelta)

        # Attachments
        args["attachments"] = dict()
        if os.path.exists(os.path.join(self.path, "att")):
            for filename in os.listdir(os.path.join(self.path, "att")):
                digest = self.file_cacher.put_file_from_path(
                    os.path.join(self.path, "att", filename),
                    "Attachment %s for task %s" % (filename, name))
                args["attachments"][filename] = Attachment(filename, digest)

        task = Task(**args)

        args = {}
        args["task"] = task
        args["description"] = conf.get("version", "Default")
        args["autojudge"] = False

        load(conf, args, ["time_limit", "timeout"], conv=float)
        load(conf, args, ["memory_limit", "memlimit"])

        # Builds the parameters that depend on the task type
        args["managers"] = []
        infile_param = conf.get("infile", "input.txt")
        outfile_param = conf.get("outfile", "output.txt")

        # If there is sol/grader.%l for some language %l, then,
        # presuming that the task type is Batch, we retrieve graders
        # in the form sol/grader.%l
        graders = False
        for lang in LANGUAGES:
            if os.path.exists(os.path.join(
                    self.path, "sol", "grader%s" % lang.source_extension)):
                graders = True
                break
        if graders:
            # Read grader for each language
            for lang in LANGUAGES:
                extension = lang.source_extension
                grader_filename = os.path.join(
                    self.path, "sol", "grader%s" % extension)
                if os.path.exists(grader_filename):
                    digest = self.file_cacher.put_file_from_path(
                        grader_filename,
                        "Grader for task %s and language %s" %
                        (task.name, lang))
                    args["managers"] += [
                        Manager("grader%s" % extension, digest)]
                else:
                    logger.warning("Grader for language %s not found ", lang)
            # Read managers with other known file extensions
            for other_filename in os.listdir(os.path.join(self.path, "sol")):
                if any(other_filename.endswith(header)
                       for header in HEADER_EXTS):
                    digest = self.file_cacher.put_file_from_path(
                        os.path.join(self.path, "sol", other_filename),
                        "Manager %s for task %s" % (other_filename, task.name))
                    args["managers"] += [
                        Manager(other_filename, digest)]
            compilation_param = "grader"
        else:
            compilation_param = "alone"

        # If there is check/checker (or equivalent), then, presuming
        # that the task type is Batch or OutputOnly, we retrieve the
        # comparator
        paths = [os.path.join(self.path, "check", "checker"),
                 os.path.join(self.path, "cor", "correttore")]
        for path in paths:
            if os.path.exists(path):
                digest = self.file_cacher.put_file_from_path(
                    path,
                    "Manager for task %s" % task.name)
                args["managers"] += [
                    Manager("checker", digest)]
                evaluation_param = "comparator"
                break
        else:
            evaluation_param = "diff"

        # Detect subtasks by checking GEN
        gen_filename = os.path.join(self.path, 'gen', 'GEN')
        try:
            with io.open(gen_filename, "rt", encoding="utf-8") as gen_file:
                subtasks = []
                testcases = 0
                points = None
                for line in gen_file:
                    line = line.strip()
                    splitted = line.split('#', 1)

                    if len(splitted) == 1:
                        # This line represents a testcase, otherwise
                        # it's just a blank
                        if splitted[0] != '':
                            testcases += 1

                    else:
                        testcase, comment = splitted
                        testcase = testcase.strip()
                        comment = comment.strip()
                        testcase_detected = len(testcase) > 0
                        copy_testcase_detected = comment.startswith("COPY:")
                        subtask_detected = comment.startswith('ST:')

                        flags = [testcase_detected,
                                 copy_testcase_detected,
                                 subtask_detected]
                        if len([x for x in flags if x]) > 1:
                            raise Exception("No testcase and command in"
                                            " the same line allowed")

                        # This line represents a testcase and contains a
                        # comment, but the comment doesn't start a new
                        # subtask
                        if testcase_detected or copy_testcase_detected:
                            testcases += 1

                        # This line starts a new subtask
                        if subtask_detected:
                            # Close the previous subtask
                            if points is None:
                                assert(testcases == 0)
                            else:
                                subtasks.append([points, testcases])
                            # Open the new one
                            testcases = 0
                            points = int(comment[3:].strip())

                # Close last subtask (if no subtasks were defined, just
                # fallback to Sum)
                if points is None:
                    args["score_type"] = "Sum"
                    total_value = float(conf.get("total_value", 100.0))
                    input_value = 0.0
                    n_input = testcases
                    if n_input != 0:
                        input_value = total_value / n_input
                    args["score_type_parameters"] = input_value
                else:
                    subtasks.append([points, testcases])
                    assert(100 == sum([int(st[0]) for st in subtasks]))
                    n_input = sum([int(st[1]) for st in subtasks])
                    args["score_type"] = "GroupMin"
                    args["score_type_parameters"] = subtasks

                if "n_input" in conf:
                    assert int(conf['n_input']) == n_input

        # If gen/GEN doesn't exist, just fallback to Sum
        except IOError:
            args["score_type"] = "Sum"
            total_value = float(conf.get("total_value", 100.0))
            input_value = 0.0
            n_input = int(conf['n_input'])
            if n_input != 0:
                input_value = total_value / n_input
            args["score_type_parameters"] = input_value

        # Override score_type if explicitly specified
        if "score_type" in conf and "score_type_parameters" in conf:
            logger.info("Overriding 'score_type' and 'score_type_parameters' "
                        "as per task.yaml")
            load(conf, args, "score_type")
            load(conf, args, "score_type_parameters")
        elif "score_type" in conf or "score_type_parameters" in conf:
            logger.warning("To override score type data, task.yaml must "
                           "specify both 'score_type' and "
                           "'score_type_parameters'.")

        # If output_only is set, then the task type is OutputOnly
        if conf.get('output_only', False):
            args["task_type"] = "OutputOnly"
            args["time_limit"] = None
            args["memory_limit"] = None
            args["task_type_parameters"] = [evaluation_param]
            task.submission_format = \
                ["output_%03d.txt" % i for i in range(n_input)]

        # If there is check/manager (or equivalent), then the task
        # type is Communication
        else:
            paths = [os.path.join(self.path, "check", "manager"),
                     os.path.join(self.path, "cor", "manager")]
            for path in paths:
                if os.path.exists(path):
                    num_processes = load(conf, None, "num_processes")
                    if num_processes is None:
                        num_processes = 1
                    logger.info("Task type Communication")
                    args["task_type"] = "Communication"
                    args["task_type_parameters"] = [num_processes]
                    digest = self.file_cacher.put_file_from_path(
                        path,
                        "Manager for task %s" % task.name)
                    args["managers"] += [
                        Manager("manager", digest)]
                    for lang in LANGUAGES:
                        stub_name = os.path.join(
                            self.path, "sol", "stub%s" % lang.source_extension)
                        if os.path.exists(stub_name):
                            digest = self.file_cacher.put_file_from_path(
                                stub_name,
                                "Stub for task %s and language %s" % (
                                    task.name, lang.name))
                            args["managers"] += [
                                Manager(
                                    "stub%s" % lang.source_extension, digest)]
                        else:
                            logger.warning("Stub for language %s not "
                                           "found.", lang.name)
                    for other_filename in os.listdir(os.path.join(self.path,
                                                                  "sol")):
                        if any(other_filename.endswith(header)
                               for header in HEADER_EXTS):
                            digest = self.file_cacher.put_file_from_path(
                                os.path.join(self.path, "sol", other_filename),
                                "Stub %s for task %s" % (other_filename,
                                                         task.name))
                            args["managers"] += [
                                Manager(other_filename, digest)]
                    break

            # Otherwise, the task type is Batch
            else:
                args["task_type"] = "Batch"
                args["task_type_parameters"] = \
                    [compilation_param, [infile_param, outfile_param],
                     evaluation_param]

        args["testcases"] = []
        for i in range(n_input):
            input_digest = self.file_cacher.put_file_from_path(
                os.path.join(self.path, "input", "input%d.txt" % i),
                "Input %d for task %s" % (i, task.name))
            output_digest = self.file_cacher.put_file_from_path(
                os.path.join(self.path, "output", "output%d.txt" % i),
                "Output %d for task %s" % (i, task.name))
            args["testcases"] += [
                Testcase("%03d" % i, False, input_digest, output_digest)]
            if args["task_type"] == "OutputOnly":
                task.attachments.set(
                    Attachment("input_%03d.txt" % i, input_digest))
        public_testcases = load(conf, None, ["public_testcases", "risultati"],
                                conv=lambda x: "" if x is None else x)
        if public_testcases == "all":
            for t in args["testcases"]:
                t.public = True
        elif len(public_testcases) > 0:
            for x in public_testcases.split(","):
                args["testcases"][int(x.strip())].public = True
        args["testcases"] = dict((tc.codename, tc) for tc in args["testcases"])
        args["managers"] = dict((mg.filename, mg) for mg in args["managers"])

        dataset = Dataset(**args)
        task.active_dataset = dataset

        # Import was successful
        os.remove(os.path.join(self.path, ".import_error"))

        logger.info("Task parameters loaded.")

        return task
Пример #18
0
    def post(self):
        fallback_page = "/tasks/add"

        try:
            attrs = dict()

            self.get_string(attrs, "name", empty=None)
            self.get_string(attrs, "title")

            assert attrs.get("name") is not None, "No task name specified."

            self.get_string(attrs, "primary_statements")

            self.get_submission_format(attrs)

            self.get_string(attrs, "token_mode")
            self.get_int(attrs, "token_max_number")
            self.get_timedelta_sec(attrs, "token_min_interval")
            self.get_int(attrs, "token_gen_initial")
            self.get_int(attrs, "token_gen_number")
            self.get_timedelta_min(attrs, "token_gen_interval")
            self.get_int(attrs, "token_gen_max")

            self.get_int(attrs, "max_submission_number")
            self.get_int(attrs, "max_user_test_number")
            self.get_timedelta_sec(attrs, "min_submission_interval")
            self.get_timedelta_sec(attrs, "min_user_test_interval")

            self.get_int(attrs, "score_precision")

            self.get_string(attrs, "score_mode")

            # Create the task.
            task = Task(**attrs)
            self.sql_session.add(task)

        except Exception as error:
            self.application.service.add_notification(make_datetime(),
                                                      "Invalid field(s)",
                                                      repr(error))
            self.redirect(fallback_page)
            return

        try:
            attrs = dict()

            self.get_time_limit(attrs, "time_limit")
            self.get_memory_limit(attrs, "memory_limit")
            self.get_task_type(attrs, "task_type", "TaskTypeOptions_")
            self.get_score_type(attrs, "score_type", "score_type_parameters")

            # Create its first dataset.
            attrs["description"] = "Default"
            attrs["autojudge"] = True
            attrs["task"] = task
            dataset = Dataset(**attrs)
            self.sql_session.add(dataset)

            # Make the dataset active. Life works better that way.
            task.active_dataset = dataset

        except Exception as error:
            self.application.service.add_notification(make_datetime(),
                                                      "Invalid field(s)",
                                                      repr(error))
            self.redirect(fallback_page)
            return

        if self.try_commit():
            # Create the task on RWS.
            self.application.service.proxy_service.reinitialize()
            self.redirect("/task/%s" % task.id)
        else:
            self.redirect(fallback_page)
Пример #19
0
    def get_task(self, get_statement=True):
        """See docstring in class TaskLoader."""
        name = os.path.split(self.path)[1]

        if (not os.path.exists(os.path.join(self.path, "task.yaml"))) and \
           (not os.path.exists(os.path.join(self.path, "problema.yaml"))) and \
           (not os.path.exists(os.path.join(self.path, "..", name + ".yaml"))):
            logger.critical("File missing: \"task.yaml\"")
            return None

        # We first look for the yaml file inside the task folder,
        # and eventually fallback to a yaml file in its parent folder.
        try:
            conf = yaml.safe_load(
                io.open(os.path.join(self.path, "task.yaml"),
                        "rt",
                        encoding="utf-8"))
        except IOError as err:
            try:
                conf = yaml.safe_load(
                    io.open(os.path.join(self.path, "problema.yaml"),
                            "rt",
                            encoding="utf-8"))
            except:
                try:
                    deprecated_path = os.path.join(self.path, "..",
                                                   name + ".yaml")
                    conf = yaml.safe_load(
                        io.open(deprecated_path, "rt", encoding="utf-8"))

                    logger.warning(
                        "You're using a deprecated location for the "
                        "task.yaml file. You're advised to move %s to "
                        "%s.", deprecated_path,
                        os.path.join(self.path, "task.yaml"))
                except IOError:
                    # Since both task.yaml and the (deprecated) "../taskname.yaml"
                    # are missing, we will only warn the user that task.yaml is
                    # missing (to avoid encouraging the use of the deprecated one)
                    raise err

        # Here we update the time of the last import
        touch(os.path.join(self.path, ".itime"))
        # If this file is not deleted, then the import failed
        touch(os.path.join(self.path, ".import_error"))

        args = {}

        load(conf, args, ["name", "nome_breve"])
        load(conf, args, ["title", "nome"])
        load(conf, args, "hide_task_prefix")
        load(conf, args, "category")
        load(conf, args, "level")
        if "level" in args:
            args["level"] = unicode(args["level"])

        if name != args["name"]:
            logger.info(
                "The task name (%s) and the directory name (%s) are "
                "different. The former will be used.", args["name"], name)

        if args["name"] == args["title"]:
            logger.warning("Short name equals long name (title). "
                           "Please check.")

        name = args["name"]

        logger.info("Loading parameters for task %s.", name)

        if get_statement:
            primary_language = load(conf, None, "primary_language")
            if primary_language is None:
                primary_language = 'it'
            paths = [
                os.path.join(self.path, "statement", "statement.pdf"),
                os.path.join(self.path, "statement.pdf"),
                os.path.join(self.path, "enunciado.pdf"),
                os.path.join(self.path, args["name"] + ".pdf"),
                os.path.join(self.path, "testo", "testo.pdf")
            ]
            for path in paths:
                if os.path.exists(path):
                    digest = self.file_cacher.put_file_from_path(
                        path, "Statement for task %s (lang: %s)" %
                        (name, primary_language))
                    break
            else:
                logger.critical("Couldn't find any task statement, aborting.")
                sys.exit(1)
            args["statements"] = [Statement(primary_language, digest)]

            args["primary_statements"] = '["%s"]' % (primary_language)

        args["attachments"] = []  # FIXME Use auxiliary

        args["submission_format"] = [SubmissionFormatElement("%s.%%l" % name)]

        if conf.get("score_mode", None) == SCORE_MODE_MAX:
            args["score_mode"] = SCORE_MODE_MAX
        elif conf.get("score_mode", None) == SCORE_MODE_MAX_TOKENED_LAST:
            args["score_mode"] = SCORE_MODE_MAX_TOKENED_LAST

        # Use the new token settings format if detected.
        if "token_mode" in conf:
            load(conf, args, "token_mode")
            load(conf, args, "token_max_number")
            load(conf, args, "token_min_interval", conv=make_timedelta)
            load(conf, args, "token_gen_initial")
            load(conf, args, "token_gen_number")
            load(conf, args, "token_gen_interval", conv=make_timedelta)
            load(conf, args, "token_gen_max")
        # Otherwise fall back on the old one.
        else:
            logger.warning(
                "task.yaml uses a deprecated format for token settings which "
                "will soon stop being supported, you're advised to update it.")
            # Determine the mode.
            if conf.get("token_initial", None) is None:
                args["token_mode"] = "disabled"
            elif conf.get("token_gen_number", 0) > 0 and \
                    conf.get("token_gen_time", 0) == 0:
                args["token_mode"] = "infinite"
            else:
                args["token_mode"] = "finite"
            # Set the old default values.
            args["token_gen_initial"] = 0
            args["token_gen_number"] = 0
            args["token_gen_interval"] = timedelta()
            # Copy the parameters to their new names.
            load(conf, args, "token_total", "token_max_number")
            load(conf, args, "token_min_interval", conv=make_timedelta)
            load(conf, args, "token_initial", "token_gen_initial")
            load(conf, args, "token_gen_number")
            load(conf,
                 args,
                 "token_gen_time",
                 "token_gen_interval",
                 conv=make_timedelta)
            load(conf, args, "token_max", "token_gen_max")
            # Remove some corner cases.
            if args["token_gen_initial"] is None:
                args["token_gen_initial"] = 0
            if args["token_gen_interval"].total_seconds() == 0:
                args["token_gen_interval"] = timedelta(minutes=1)

        load(conf, args, "max_submission_number")
        load(conf, args, "max_user_test_number")
        load(conf, args, "min_submission_interval", conv=make_timedelta)
        load(conf, args, "min_user_test_interval", conv=make_timedelta)

        # Attachments
        args["attachments"] = []
        if os.path.exists(os.path.join(self.path, "att")):
            for filename in os.listdir(os.path.join(self.path, "att")):
                digest = self.file_cacher.put_file_from_path(
                    os.path.join(self.path, "att", filename),
                    "Attachment %s for task %s" % (filename, name))
                args["attachments"] += [Attachment(filename, digest)]

        task = Task(**args)

        args = {}
        args["task"] = task
        args["description"] = conf.get("version", "Default")
        args["autojudge"] = False

        load(conf, args, ["time_limit", "timeout"], conv=float)
        load(conf, args, ["memory_limit", "memlimit"])

        # Builds the parameters that depend on the task type
        args["managers"] = []
        infile_param = conf.get("infile", "input.txt")
        outfile_param = conf.get("outfile", "output.txt")

        # If there is sol/grader.%l for some language %l, then,
        # presuming that the task type is Batch, we retrieve graders
        # in the form sol/grader.%l
        graders = False
        for lang in LANGUAGES:
            if os.path.exists(
                    os.path.join(self.path, "sol",
                                 "grader%s" % lang.source_extension)):
                graders = True
                break
        if graders:
            # Read grader for each language
            for lang in LANGUAGES:
                extension = lang.source_extension
                grader_filename = os.path.join(self.path, "sol",
                                               "grader%s" % extension)
                if os.path.exists(grader_filename):
                    digest = self.file_cacher.put_file_from_path(
                        grader_filename, "Grader for task %s and language %s" %
                        (task.name, lang))
                    args["managers"] += [
                        Manager("grader%s" % extension, digest)
                    ]
                else:
                    logger.warning("Grader for language %s not found ", lang)
            # Read managers with other known file extensions
            for other_filename in os.listdir(os.path.join(self.path, "sol")):
                if any(
                        other_filename.endswith(header)
                        for header in HEADER_EXTS):
                    digest = self.file_cacher.put_file_from_path(
                        os.path.join(self.path, "sol", other_filename),
                        "Manager %s for task %s" % (other_filename, task.name))
                    args["managers"] += [Manager(other_filename, digest)]
            compilation_param = "grader"
        else:
            compilation_param = "alone"

        # If there is check/checker (or equivalent), then, presuming
        # that the task type is Batch or OutputOnly, we retrieve the
        # comparator
        paths = [
            os.path.join(self.path, "check", "checker"),
            os.path.join(self.path, "corrector.exe"),
            os.path.join(self.path, "cor", "correttore")
        ]
        for path in paths:
            if os.path.exists(path):
                digest = self.file_cacher.put_file_from_path(
                    path, "Manager for task %s" % task.name)
                args["managers"] += [Manager("checker", digest)]
                evaluation_param = "comparator"
                break
        else:
            evaluation_param = "diff"

        # Detect subtasks by checking GEN
        gen_filename = os.path.join(self.path, 'gen', 'GEN')
        try:
            with io.open(gen_filename, "rt", encoding="utf-8") as gen_file:
                subtasks = []
                testcases = 0
                points = None
                for line in gen_file:
                    line = line.strip()
                    splitted = line.split('#', 1)

                    if len(splitted) == 1:
                        # This line represents a testcase, otherwise
                        # it's just a blank
                        if splitted[0] != '':
                            testcases += 1

                    else:
                        testcase, comment = splitted
                        testcase = testcase.strip()
                        comment = comment.strip()
                        testcase_detected = testcase != ''
                        copy_testcase_detected = comment.startswith("COPY:")
                        subtask_detected = comment.startswith('ST:')

                        flags = [
                            testcase_detected, copy_testcase_detected,
                            subtask_detected
                        ]
                        if len([x for x in flags if x]) > 1:
                            raise Exception("No testcase and command in"
                                            " the same line allowed")

                        # This line represents a testcase and contains a
                        # comment, but the comment doesn't start a new
                        # subtask
                        if testcase_detected or copy_testcase_detected:
                            testcases += 1

                        # This line starts a new subtask
                        if subtask_detected:
                            # Close the previous subtask
                            if points is None:
                                assert (testcases == 0)
                            else:
                                subtasks.append([points, testcases])
                            # Open the new one
                            testcases = 0
                            points = int(comment[3:].strip())

                # Close last subtask (if no subtasks were defined, just
                # fallback to Sum)
                if points is None:
                    args["score_type"] = "Sum"
                    total_value = float(conf.get("total_value", 100.0))
                    input_value = 0.0
                    n_input = testcases
                    if n_input != 0:
                        input_value = total_value / n_input
                    args["score_type_parameters"] = "%s" % input_value
                else:
                    subtasks.append([points, testcases])
                    assert (100 == sum([int(st[0]) for st in subtasks]))
                    n_input = sum([int(st[1]) for st in subtasks])
                    args["score_type"] = "GroupMin"
                    args["score_type_parameters"] = "%s" % subtasks

                if "n_input" in conf:
                    assert int(conf['n_input']) == n_input

        # If gen/GEN doesn't exist, just fallback to Sum
        except IOError:
            if 'n_input' not in conf:
                conf['n_input'] = 0
            n_input = int(conf['n_input'])
            if "score_type" in conf:
                args["score_type"] = conf["score_type"]
                if "score_type_parameters" in conf:
                    args["score_type_parameters"] = (
                        "%s" % conf["score_type_parameters"])
                    args["score_type_parameters"] = re.sub(
                        r'u\'([^\']+)\'', '\"\g<1>\"',
                        args["score_type_parameters"])
            else:
                args["score_type"] = "Sum"
                total_value = float(conf.get("total_value", 100.0))
                input_value = 0.0

                def count_testcases(folder):
                    c = 0
                    if os.path.isdir(folder):
                        for filename in sorted(os.listdir(folder)):
                            nombre, ext = os.path.splitext(filename)
                            if ext == ".in":
                                c += 1
                    return c

                casos = n_input + count_testcases(
                    os.path.join(self.path, "casos")) + count_testcases(
                        os.path.join(self.path, "casos", "generados"))
                if casos != 0:
                    input_value = total_value / casos
                args["score_type_parameters"] = "%s" % input_value

        # If output_only is set, then the task type is OutputOnly
        if conf.get('output_only', False):
            args["task_type"] = "OutputOnly"
            args["time_limit"] = None
            args["memory_limit"] = None
            args["task_type_parameters"] = '["%s"]' % evaluation_param
            task.submission_format = [
                SubmissionFormatElement("output_%03d.txt" % i)
                for i in xrange(n_input)
            ]

        # If there is check/manager (or equivalent), then the task
        # type is Communication
        else:
            paths = [
                os.path.join(self.path, "check", "manager"),
                os.path.join(self.path, "cor", "manager")
            ]
            for path in paths:
                if os.path.exists(path):
                    num_processes = load(conf, None, "num_processes")
                    if num_processes is None:
                        num_processes = 1
                    logger.info("Task type Communication")
                    args["task_type"] = "Communication"
                    args["task_type_parameters"] = '[%d]' % num_processes
                    digest = self.file_cacher.put_file_from_path(
                        path, "Manager for task %s" % task.name)
                    args["managers"] += [Manager("manager", digest)]
                    for lang in LANGUAGES:
                        stub_name = os.path.join(
                            self.path, "sol", "stub%s" % lang.source_extension)
                        if os.path.exists(stub_name):
                            digest = self.file_cacher.put_file_from_path(
                                stub_name, "Stub for task %s and language %s" %
                                (task.name, lang.name))
                            args["managers"] += [
                                Manager("stub%s" % lang.source_extension,
                                        digest)
                            ]
                        else:
                            logger.warning(
                                "Stub for language %s not "
                                "found.", lang.name)
                    for other_filename in os.listdir(
                            os.path.join(self.path, "sol")):
                        if any(
                                other_filename.endswith(header)
                                for header in HEADER_EXTS):
                            digest = self.file_cacher.put_file_from_path(
                                os.path.join(self.path, "sol", other_filename),
                                "Stub %s for task %s" %
                                (other_filename, task.name))
                            args["managers"] += [
                                Manager(other_filename, digest)
                            ]
                    break

            # Otherwise, the task type is Batch
            else:
                args["task_type"] = "Batch"
                args["task_type_parameters"] = \
                    '["%s", ["%s", "%s"], "%s"]' % \
                    (compilation_param, infile_param, outfile_param,
                     evaluation_param)

        args["testcases"] = []
        for i in xrange(n_input):
            input_digest = self.file_cacher.put_file_from_path(
                os.path.join(self.path, "input", "input%d.txt" % i),
                "Input %d for task %s" % (i, task.name))
            output_digest = self.file_cacher.put_file_from_path(
                os.path.join(self.path, "output", "output%d.txt" % i),
                "Output %d for task %s" % (i, task.name))
            args["testcases"] += [
                Testcase("%03d" % i, False, input_digest, output_digest)
            ]
            if args["task_type"] == "OutputOnly":
                task.attachments += [
                    Attachment("input_%03d.txt" % i, input_digest)
                ]

        def add_testcases_dir(folder):
            if os.path.isdir(folder):
                for filename in sorted(os.listdir(folder)):
                    nombre, ext = os.path.splitext(filename)
                    if ext == ".in":
                        input_digest = self.file_cacher.put_file_from_path(
                            os.path.join(folder, filename),
                            "Input %s for task %s" % (nombre, task.name))
                        output_digest = self.file_cacher.put_file_from_path(
                            os.path.join(folder, nombre + ".dat"),
                            "Output %s for task %s" % (nombre, task.name))
                        args["testcases"] += [
                            Testcase(nombre, False, input_digest,
                                     output_digest)
                        ]
                        if args["task_type"] == "OutputOnly":
                            task.attachments += [
                                Attachment(filename, input_digest)
                            ]

        add_testcases_dir(os.path.join(self.path, "casos"))
        add_testcases_dir(os.path.join(self.path, "casos", "generados"))

        public_testcases = load(conf,
                                None, ["public_testcases", "risultati"],
                                conv=lambda x: "" if x is None else x)
        if public_testcases == "all":
            for t in args["testcases"]:
                t.public = True
        elif public_testcases != "":
            for x in public_testcases.split(","):
                args["testcases"][int(x.strip())].public = True

        dataset = Dataset(**args)
        task.active_dataset = dataset

        # Import was successful
        os.remove(os.path.join(self.path, ".import_error"))

        logger.info("Task parameters loaded.")

        return task
Пример #20
0
    def get_task(self, get_statement=True):
        """See docstring in class Loader.

        """

        logger.info("Checking dos2unix presence")
        i = os.system('dos2unix -V 2>/dev/null')
        self.dos2unix_found = (i == 0)
        if not self.dos2unix_found:
            logger.error("dos2unix not found - tests will not be converted!")

        name = os.path.basename(self.path)
        logger.info("Loading parameters for task %s.", name)

        args = {}

        # Here we update the time of the last import.
        touch(os.path.join(self.path, ".itime"))
        # If this file is not deleted, then the import failed.
        touch(os.path.join(self.path, ".import_error"))

        # Get alphabetical task index for use in title.

        tree = ET.parse(os.path.join(self.path, "problem.xml"))
        root = tree.getroot()

        args["name"] = name
        args["title"] = str(root.find('names').find("name").attrib['value'])

        if get_statement:
            args["statements"] = {}
            args["primary_statements"] = []
            for language, lang in iteritems(LANGUAGE_MAP):
                path = os.path.join(self.path, 'statements', '.pdf', language,
                                    'problem.pdf')
                if os.path.exists(path):
                    digest = self.file_cacher.put_file_from_path(
                        path,
                        "Statement for task %s (lang: %s)" % (name, language))
                    args["statements"][lang] = Statement(lang, digest)
                    args["primary_statements"].append(lang)

        args["submission_format"] = ["%s.%%l" % name]

        # These options cannot be configured in the Polygon format.
        # Uncomment the following to set specific values for them.

        # args['max_submission_number'] = 100
        # args['max_user_test_number'] = 100
        # args['min_submission_interval'] = make_timedelta(60)
        # args['min_user_test_interval'] = make_timedelta(60)

        # args['max_user_test_number'] = 10
        # args['min_user_test_interval'] = make_timedelta(60)

        # args['token_mode'] = 'infinite'
        # args['token_max_number'] = 100
        # args['token_min_interval'] = make_timedelta(60)
        # args['token_gen_initial'] = 1
        # args['token_gen_number'] = 1
        # args['token_gen_interval'] = make_timedelta(1800)
        # args['token_gen_max'] = 2

        task_cms_conf_path = os.path.join(self.path, 'files', 'cms_conf.py')
        task_cms_conf = None
        if os.path.exists(task_cms_conf_path):
            logger.info("Found additional CMS options for task %s.", name)
            with io.open(task_cms_conf_path, 'rb') as f:
                task_cms_conf = imp.load_module('cms_conf', f,
                                                task_cms_conf_path,
                                                ('.py', 'r', imp.PY_SOURCE))
        if task_cms_conf is not None and hasattr(task_cms_conf, "general"):
            args.update(task_cms_conf.general)

        task = Task(**args)

        judging = root.find('judging')
        testset = None
        for testset in judging:
            testset_name = testset.attrib["name"]

            args = {}
            args["task"] = task
            args["description"] = str(testset_name)
            args["autojudge"] = False

            tl = float(testset.find('time-limit').text)
            ml = int(testset.find('memory-limit').text)
            args["time_limit"] = tl * 0.001
            args["memory_limit"] = ml // (1024 * 1024)

            args["managers"] = {}
            infile_param = judging.attrib['input-file']
            outfile_param = judging.attrib['output-file']

            # Checker can be in any of these two locations.
            checker_src = os.path.join(self.path, "files", "check.cpp")
            if not os.path.exists(checker_src):
                checker_src = os.path.join(self.path, "check.cpp")

            if os.path.exists(checker_src):
                logger.info("Checker found, compiling")
                checker_exe = os.path.join(os.path.dirname(checker_src),
                                           "checker")
                testlib_path = "/usr/local/include/cms"
                testlib_include = os.path.join(testlib_path, "testlib.h")
                if not config.installed:
                    testlib_path = os.path.join(os.path.dirname(__file__),
                                                "polygon")
                code = subprocess.call([
                    "g++", "-x", "c++", "-O2", "-static", "-DCMS", "-I",
                    testlib_path, "-include", testlib_include, "-o",
                    checker_exe, checker_src
                ])
                if code != 0:
                    logger.critical("Could not compile checker")
                    return None
                digest = self.file_cacher.put_file_from_path(
                    checker_exe, "Manager for task %s" % name)
                args["managers"]["checker"] = Manager("checker", digest)
                evaluation_param = "comparator"
            else:
                logger.info("Checker not found, using diff")
                evaluation_param = "diff"

            args["task_type"] = "Batch"
            args["task_type_parameters"] = \
                ["alone", [infile_param, outfile_param], evaluation_param]

            args["score_type"] = "Sum"
            total_value = 100.0
            input_value = 0.0

            testcases = int(testset.find('test-count').text)

            n_input = testcases
            if n_input != 0:
                input_value = total_value / n_input
            args["score_type_parameters"] = input_value

            args["testcases"] = {}

            for i in range(testcases):
                infile = os.path.join(self.path, testset_name,
                                      "%02d" % (i + 1))
                outfile = os.path.join(self.path, testset_name,
                                       "%02d.a" % (i + 1))
                if self.dos2unix_found:
                    os.system('dos2unix -q %s' % (infile, ))
                    os.system('dos2unix -q %s' % (outfile, ))
                input_digest = self.file_cacher.put_file_from_path(
                    infile, "Input %d for task %s" % (i, name))
                output_digest = self.file_cacher.put_file_from_path(
                    outfile, "Output %d for task %s" % (i, name))
                testcase = Testcase("%03d" % (i, ), False, input_digest,
                                    output_digest)
                testcase.public = True
                args["testcases"][testcase.codename] = testcase

            if task_cms_conf is not None and \
               hasattr(task_cms_conf, "datasets") and \
               testset_name in task_cms_conf.datasets:
                args.update(task_cms_conf.datasets[testset_name])

            dataset = Dataset(**args)
            if testset_name == "tests":
                task.active_dataset = dataset

        os.remove(os.path.join(self.path, ".import_error"))

        logger.info("Task parameters loaded.")
        return task
Пример #21
0
    def get_task(self, name):
        """See docstring in class Loader.

        """
        try:
            num = self.tasks_order[name]
        # Here we expose an undocumented behavior, so that cmsMake can
        # import a task even without the whole contest; this is not to
        # be relied upon in general
        except AttributeError:
            num = 1
        logger.info("Load task %s" % name)

        task_path = os.path.join(self.path, name)
        conf = {}
        try:
            conf = yaml.safe_load(
                io.open(os.path.join(task_path, "task.yaml"),
                        "rt", encoding="utf-8"))
        except IOError:
            if os.path.exists(os.path.join(task_path, name + ".yaml")):
                conf = yaml.safe_load(
                    io.open(os.path.join(task_path, name + ".yaml"),
                            "rt", encoding="utf-8"))
        args = {}

        args["num"] = num
        args["name"] = name
        args["title"] = name.title()
        primary_language = conf.get("task", {}).get("primary_language", "en")
        for path in os.listdir(os.path.join(task_path, "statement")):
            digest = self.file_cacher.put_file_from_path(
                os.path.join(task_path, "statement", path),
                "Statement for task %s (lang: %s)" % (name,
                                                      primary_language))
            break
        else:
            logger.critical("Couldn't find any task statement, aborting...")
            sys.exit(1)
        args["statements"] = [Statement(primary_language, digest)]
        args["primary_statements"] = '["%s"]' % (primary_language)
        args["submission_format"] = [
            SubmissionFormatElement("%s.%%l" % name)]
        args["token_mode"] = "disabled"
        
        args.update(self.token_mode)

        # Load attachments
        args["attachments"] = []
        if os.path.exists(os.path.join(task_path, "attachments")):
            for filename in os.listdir(os.path.join(task_path, "attachments")):
                digest = self.file_cacher.put_file_from_path(
                    os.path.join(task_path, "attachments", filename),
                    "Attachment %s for task %s" % (filename, name))
                args["attachments"] += [Attachment(filename, digest)]
        
        args.update(conf.get("task", {}))
        task = Task(**args)

        args = {}
        args["task"] = task
        args["description"] = "Default"
        args["autojudge"] = False
        args["time_limit"] = 2.0
        args["memory_limit"] = 256
        args["task_type"] = "Batch"
        args["score_type"] = "Sum"
        input_file = ""
        output_file = ""
        args["managers"] = []

        # Overwrite parameters
        for key, param in conf.iteritems():
            if key == "input":
                input_file = param
            elif key == "output":
                output_file = param
            elif key == "time_limit":
                args[key] = float(param)
            elif key in self.timedelta_params:
                args[key] = timedelta(seconds=param)
            elif key != "subtasks_parameters" and key != "subtasks" and key != "task":
                args[key] = param
        
        # Intelligent tests format detector
        # Load all tests recursively
        def load_tests(tests_path, name):
            if os.path.isfile(os.path.join(tests_path, name)):
                return [name]
            elif os.path.isdir(os.path.join(tests_path, name)):
                l = []
                for path in os.listdir(os.path.join(tests_path, name)):
                    l += load_tests(tests_path, os.path.join(name, path))
                return l
            else:
                return []
        full_names = load_tests(os.path.join(task_path, "tests"), "")
        tests_dict = dict((os.path.split(test)[-1], test)
                    for test in full_names)
        tests = []
        detected = False

        if not detected:
            # * / *.a format
            detected = True
            for test in tests_dict.keys():
                if test.endswith(".a"):
                    if test[:-2] not in tests_dict.keys():
                        detected = False
                else:
                    if test + ".a" not in tests_dict.keys():
                        detected = False
            if detected:
                logger.info("Tests format * / *.a detected")
                idx = 0
                for (short_name, test) in sorted(tests_dict.items()):
                    if not short_name.endswith(".a"):
                        tests.append({"idx": idx,
                                      "input": test,
                                      "output": tests_dict[short_name + ".a"],
                                      "public": False })
                        idx += 1

        if not detected:
            # *.in* / *.out* format
            detected = True
            for test in tests_dict.keys():
                if test.find(".in") != -1:
                    if test.replace(".in", ".out") not in tests_dict.keys():
                        detected = False
                elif test.find(".out") != -1:
                    if test.replace(".out", ".in") not in tests_dict.keys():
                        detected = False
                else:
                    detected = False
            if detected:
                logger.info("Tests format *.in* / *.out* detected")
                idx = 0
                for (short_name, test) in sorted(tests_dict.items()):
                    if short_name.find(".in") != -1:
                        tests.append({"idx": idx,
                                      "input": test,
                                      "output": tests_dict[short_name.replace(".in", ".out")],
                                      "public": False })
                        idx += 1

        if not detected:
            # *.in* / *.sol* format
            detected = True
            for test in tests_dict.keys():
                if test.find(".in") != -1:
                    if test.replace(".in", ".sol") not in tests_dict.keys():
                        detected = False
                elif test.find(".sol") != -1:
                    if test.replace(".sol", ".in") not in tests_dict.keys():
                        detected = False
                else:
                    detected = False
            if detected:
                logger.info("Tests format *.in* / *.sol* detected")
                idx = 0
                for (short_name, test) in sorted(tests_dict.items()):
                    if short_name.find(".in") != -1:
                        tests.append({"idx": idx,
                                      "input": test,
                                      "output": tests_dict[short_name.replace(".in", ".sol")],
                                      "public": False })
                        idx += 1

        if not detected:
            # *.in* / *.res* format
            detected = True
            for test in tests_dict.keys():
                if test.find(".in") != -1:
                    if test.replace(".in", ".res") not in tests_dict.keys():
                        detected = False
                elif test.find(".res") != -1:
                    if test.replace(".res", ".in") not in tests_dict.keys():
                        detected = False
                else:
                    detected = False
            if detected:
                logger.info("Tests format *.in* / *.res* detected")
                idx = 0
                for (short_name, test) in sorted(tests_dict.items()):
                    if short_name.find(".in") != -1:
                        tests.append({"idx": idx,
                                      "input": test,
                                      "output": tests_dict[short_name.replace(".in", ".res")],
                                      "public": False })
                        idx += 1

        if not detected:
            # *.in* / *.ans* format
            detected = True
            for test in tests_dict.keys():
                if test.find(".in") != -1:
                    if test.replace(".in", ".ans") not in tests_dict.keys():
                        detected = False
                elif test.find(".ans") != -1:
                    if test.replace(".ans", ".in") not in tests_dict.keys():
                        detected = False
                else:
                    detected = False
            if detected:
                logger.info("Tests format *.in* / *.ans* detected")
                idx = 0
                for (short_name, test) in sorted(tests_dict.items()):
                    if short_name.find(".in") != -1:
                        tests.append({"idx": idx,
                                      "input": test,
                                      "output": tests_dict[short_name.replace(".in", ".ans")],
                                      "public": False })
                        idx += 1

        if not detected:
            # *input* / *output* format
            detected = True
            for test in tests_dict.keys():
                if test.find("input") != -1:
                    if test.replace("input", "output") not in tests_dict.keys():
                        detected = False
                elif test.find("output") != -1:
                    if test.replace("output", "input") not in tests_dict.keys():
                        detected = False
                else:
                    detected = False
            if detected:
                logger.info("Tests format *input* / *output* detected")
                idx = 0
                for (short_name, test) in sorted(tests_dict.items()):
                    if short_name.find("input") != -1:
                        tests.append({"idx": idx,
                                      "input": test,
                                      "output": tests_dict[short_name.replace("input", "output")],
                                      "public": False })
                        idx += 1

        if not detected:
            # in* out* format using full paths
            detected = True
            for test in full_names:
                if test.startswith("in"):
                    if "out" + test[2:] not in full_names:
                        detected = False
                elif test.startswith("out"):
                    if "in" + test[3:] not in full_names:
                        detected = False
                else:
                    detected = False
            if detected:
                logger.info("Tests format in* / out* with full paths detected")
                idx = 0
                for test in sorted(full_names):
                    if test.startswith("in"):
                        tests.append({"idx": idx,
                                      "input": test,
                                      "output": "out" + test[2:],
                                      "public": False })
                        idx += 1

        if not detected:
            # Need more intelligence
            logger.critical("Sorry, I can't recognize tests format")
            sys.exit(1)

        # Detect subtasks
        if "subtasks_parameters" in conf:
            logger.info("Detected simple subtask description")
            args["score_type"] = "NamedGroup"
            subtasks = conf["subtasks_parameters"]
            total_value = float(subtasks.get("total_value", 100))
            is_public = subtasks.get("public_tests", False)
            if is_public:
                for test in tests:
                    test["public"] = True
            samples = list(int(test.strip()) - 1
                            for test in 
                            subtasks.get("sample_tests", "").strip().split(","))
            for i in samples:
                tests[i]["public"] = True
            samples_group = {
                "score": 0,
                "type": "sum",
                "public": rebuild_list(samples),
                "private": [],
                "hidden": [] }
            tests_group = {
                "score": total_value,
                "type": "sum",
                "public": [],
                "private": [],
                "hidden": [] }
            for i in xrange(len(tests)):
                if not i in samples:
                    if is_public:
                        tests_group["public"].append(i)
                    else:
                        tests_group["private"].append(i)
            tests_group["public"] = rebuild_list(tests_group["public"])
            tests_group["private"] = rebuild_list(tests_group["private"])
            if len(samples) == 0:
                args["score_type_parameters"] = json.dumps([tests_group])
            else:
                args["score_type_parameters"] = json.dumps([samples_group, tests_group])
        elif "subtasks" in conf:

            logger.info("Detected full subtask description")
            args["score_type"] = "NamedGroup"
            subtasks = conf.get("subtasks")
            for subtask in subtasks:
                if not "score" in subtask:
                    subtask["score"] = 0
                if not "type" in subtask:
                    subtask["type"] = "sum"
                if subtask["type"] != "sum" and subtask["type"] != "min":
                    # Custom evaluator parameter
                    with open(os.path.join(task_path, subtask["type"]), "r") as prog_file:
                        prog = prog_file.read()
                    subtask["type"] = prog
                subtask["public"] = rebuild_list(subtask.get("public", []), test_list = tests, delta = 1)
                subtask["private"] = rebuild_list(subtask.get("private", []), test_list = tests, delta = 1)
                subtask["hidden"] = rebuild_list(subtask.get("hidden", []), test_list = tests, delta = 1)
                for i in subtask["public"]:
                    tests[int(i)]["public"] = True
            args["score_type_parameters"] = json.dumps(conf.get("subtasks"))
        else:

            logger.info("Subtask description was not detected")
            args["score_type"] = "NamedGroup"
            # Autodetect samples
            samples = []
            for test in tests:
                if test["input"].find("dummy") != -1 or test["input"].find("sample") != -1:
                    samples.append(test["idx"])
            for i in samples:
                tests[i]["public"] = True
            samples_group = {
                "score": 0,
                "type": "sum",
                "public": rebuild_list(samples),
                "private": [],
                "hidden": [] }
            tests_group = {
                "score": 100,
                "type": "sum",
                "public": [],
                "private": [],
                "hidden": [] }
            for i in xrange(len(tests)):
                if not i in samples:
                    tests_group["private"].append(i)
            tests_group["public"] = rebuild_list(tests_group["public"])
            tests_group["private"] = rebuild_list(tests_group["private"])
            if len(samples) == 0:
                args["score_type_parameters"] = json.dumps([tests_group])
            else:
                args["score_type_parameters"] = json.dumps([samples_group, tests_group])

        # Load testcases
        args["testcases"] = []
        for test in tests:
            i = test["idx"]
            input_digest = self.file_cacher.put_file_from_path(
                os.path.join(task_path, "tests", test["input"]),
                "Input %d for task %s" % (i, name))
            output_digest = self.file_cacher.put_file_from_path(
                os.path.join(task_path, "tests", test["output"]),
                "Output %d for task %s" % (i, name))
            args["testcases"] += [
                Testcase("%03d" % i, test["public"], input_digest, output_digest)]

        # Load graders (and stubs if any)
        if os.path.isdir(os.path.join(task_path, "graders")):
            for filename in os.listdir(os.path.join(task_path, "graders")):
                digest = self.file_cacher.put_file_from_path(
                    os.path.join(task_path, "graders", filename),
                    "Grader %s for task %s" % (filename, name))
                args["managers"] += [
                    Manager(filename, digest)]
            compilation_param = "grader"
        else:
            compilation_param = "alone"

        # Load checker
        paths = [os.path.join(task_path, "checker"),
                 os.path.join(task_path, "check"),
                 os.path.join(task_path, "check.exe")]
        for path in paths:
            if os.path.isfile(path):
                digest = self.file_cacher.put_file_from_path(
                    path,
                    "Checker for task %s" % name)
                args["managers"] += [
                    Manager("checker", digest)]
                evaluation_param = "comparator"
                break
        else:
            evaluation_param = "diff"

        # If the task type is Communication, try to load manager
        path = os.path.join(task_path, "manager")
        if os.path.isfile(path):
            args["task_type"] = "Communication"
            args["task_type_parameters"] = '[]'
            digest = self.file_cacher.put_file_from_path(
                path,
                "Manager for task %s" % name)
            args["managers"] += [
                Manager("manager", digest)]

        # Set task type parameters
        if args["task_type"] == "OutputOnly":
            args["time_limit"] = None
            args["memory_limit"] = None
            args["task_type_parameters"] = '["%s"]' % evaluation_param
            task.submission_format = [
                SubmissionFormatElement("%03d.out" % (i + 1))
                for i in xrange(len(tests))]
        elif args["task_type"] == "Batch":
            args["task_type_parameters"] = \
                '["%s", ["%s", "%s"], "%s"]' % \
                (compilation_param, input_file, output_file,
                evaluation_param)

        logger.info("Task type is %s" % args["task_type"])
        dataset = Dataset(**args)
        task.active_dataset = dataset
        logger.info("Task parameters loaded.")
        return task