Esempio n. 1
0
    def from_user_test(user_test):
        job = EvaluationJob()

        # Job
        job.task_type = user_test.task.task_type
        job.task_type_parameters = json.loads(
            user_test.task.task_type_parameters)

        # EvaluationJob
        job.executables = user_test.executables
        job.testcases = [Testcase(input=user_test.input,
                                  output=None)]
        job.time_limit = user_test.task.time_limit
        job.memory_limit = user_test.task.memory_limit
        job.managers = dict(user_test.managers)
        job.files = user_test.files
        job.info = "evaluate user test %d" % (user_test.id)

        # Add the managers to be got from the Task; get_task_type must
        # be imported here to avoid circular dependencies
        from cms.grading.tasktypes import get_task_type
        task_type = get_task_type(task=user_test.task)
        auto_managers = task_type.get_auto_managers()
        if auto_managers is not None:
            for manager_filename in auto_managers:
                job.managers[manager_filename] = \
                    user_test.task.managers[manager_filename]
        else:
            for manager_filename in user_test.task.managers:
                if manager_filename not in job.managers:
                    job.managers[manager_filename] = \
                        user_test.task.managers[manager_filename]

        return job
Esempio n. 2
0
File: Job.py Progetto: bblackham/cms
 def import_from_dict(cls, data):
     data['executables'] = [
         Executable.import_from_dict(executable_data)
         for executable_data in data['executables']
     ]
     data['executables'] = dict([(executable.filename, executable)
                                 for executable in data['executables']])
     data['testcases'] = [
         Testcase.import_from_dict(testcase_data)
         for testcase_data in data['testcases']
     ]
     data['managers'] = [
         Manager.import_from_dict(manager_data)
         for manager_data in data['managers']
     ]
     data['managers'] = dict([(manager.filename, manager)
                              for manager in data['managers']])
     data['files'] = [
         File.import_from_dict(file_data) for file_data in data['files']
     ]
     data['files'] = dict([(file_.filename, file_)
                           for file_ in data['files']])
     # XXX We convert the key from str to int because it was the key
     # of a JSON object.
     data['evaluations'] = dict(
         (int(k), v) for k, v in data['evaluations'].iteritems())
     return cls(**data)
Esempio n. 3
0
File: Job.py Progetto: VittGam/cms
 def import_from_dict(cls, data):
     data["executables"] = [Executable.import_from_dict(executable_data) for executable_data in data["executables"]]
     data["executables"] = dict([(executable.filename, executable) for executable in data["executables"]])
     data["testcases"] = [Testcase.import_from_dict(testcase_data) for testcase_data in data["testcases"]]
     data["managers"] = [Manager.import_from_dict(manager_data) for manager_data in data["managers"]]
     data["managers"] = dict([(manager.filename, manager) for manager in data["managers"]])
     data["files"] = [File.import_from_dict(file_data) for file_data in data["files"]]
     data["files"] = dict([(file_.filename, file_) for file_ in data["files"]])
     # XXX We convert the key from str to int because it was the key
     # of a JSON object.
     data["evaluations"] = dict((int(k), v) for k, v in data["evaluations"].iteritems())
     return cls(**data)
Esempio n. 4
0
File: Job.py Progetto: bblackham/cms
    def from_user_test(user_test):
        job = EvaluationJob()

        # Job
        job.task_type = user_test.task.active_dataset.task_type
        job.task_type_parameters = json.loads(
            user_test.task.active_dataset.task_type_parameters)

        # EvaluationJob
        job.executables = user_test.executables
        # FIXME This is not a proper way to use Testcases!
        testcase = Testcase(num=0, input=user_test.input, output='')
        testcase.num = None
        testcase.output = None
        job.testcases = [testcase]
        job.time_limit = user_test.task.active_dataset.time_limit
        job.memory_limit = user_test.task.active_dataset.memory_limit
        job.managers = dict(user_test.managers)
        job.files = user_test.files
        job.info = "evaluate user test %d" % (user_test.id)

        # Add the managers to be got from the Task; get_task_type must
        # be imported here to avoid circular dependencies
        from cms.grading.tasktypes import get_task_type
        task_type = get_task_type(dataset=user_test.task.active_dataset)
        auto_managers = task_type.get_auto_managers()
        if auto_managers is not None:
            for manager_filename in auto_managers:
                job.managers[manager_filename] = \
                    user_test.task.active_dataset.managers[manager_filename]
        else:
            for manager_filename in user_test.task.active_dataset.managers:
                if manager_filename not in job.managers:
                    job.managers[manager_filename] = \
                        user_test.task.active_dataset.managers[
                            manager_filename]

        return job
Esempio n. 5
0
 def import_from_dict(cls, data):
     data['executables'] = [Executable.import_from_dict(executable_data)
                            for executable_data in data['executables']]
     data['executables'] = dict([(executable.filename, executable)
                                 for executable in data['executables']])
     data['testcases'] = [Testcase.import_from_dict(testcase_data)
                          for testcase_data in data['testcases']]
     data['managers'] = [Manager.import_from_dict(manager_data)
                         for manager_data in data['managers']]
     data['managers'] = dict([(manager.filename, manager)
                              for manager in data['managers']])
     data['files'] = [File.import_from_dict(file_data)
                      for file_data in data['files']]
     data['files'] = dict([(file_.filename, file_)
                           for file_ in data['files']])
     return cls(**data)
    def get_params_for_task(self, path, num):
        """Given the path of a task, this function put all needed data
        into FS, and fills the dictionary of parameters required by
        Task.import_from_dict().

        path (string): path of the task.
        num (int): number of the task in the contest task ordering.

        return (dict): info of the task.

        """
        path = os.path.realpath(path)
        super_path, name = os.path.split(path)
        conf = yaml.load(
            codecs.open(os.path.join(super_path, name + ".yaml"), "r",
                        "utf-8"))

        logger.info("Loading parameters for task %s." % name)

        params = {"name": name}
        assert name == conf["nome_breve"]
        params["title"] = conf["nome"]
        if name == params["title"]:
            logger.warning("Short name equals long name (title). "
                           "Is this intended?")
        params["num"] = num
        params["time_limit"] = conf.get("timeout", None)
        params["memory_limit"] = conf.get("memlimit", None)
        params["attachments"] = []  # FIXME - Use auxiliary
        params["statements"] = [
            Statement(
                self.file_cacher.put_file(
                    path=os.path.join(path, "testo", "testo.pdf"),
                    description="Statement for task %s (lang: )" % name),
                "").export_to_dict()
        ]
        params["official_language"] = \
            conf.get("official_language", "en_official")

        params["submission_format"] = [
            SubmissionFormatElement("%s.%%l" % name).export_to_dict()
        ]

        # Builds the parameters that depend on the task type
        params["managers"] = []
        infile_param = conf.get("infile", "input.txt")
        outfile_param = conf.get("outfile", "output.txt")

        # If there is sol/grader.%l for some language %l, then,
        # presuming that the task type is Batch, we retrieve graders
        # in the form sol/grader.%l
        graders = False
        for lang in Submission.LANGUAGES:
            if os.path.exists(os.path.join(path, "sol", "grader.%s" % (lang))):
                graders = True
                break
        if graders:
            # Read grader for each language
            for lang in Submission.LANGUAGES:
                grader_filename = os.path.join(path, "sol",
                                               "grader.%s" % (lang))
                if os.path.exists(grader_filename):
                    params["managers"].append(
                        Manager(
                            self.file_cacher.put_file(
                                path=grader_filename,
                                description="Grader for task %s and "
                                "language %s" % (name, lang)),
                            "grader.%s" % (lang)).export_to_dict())
                else:
                    logger.warning("Could not find grader for "
                                   "language %s" % (lang))
            # Read managers with other known file extensions
            for other_filename in os.listdir(os.path.join(path, "sol")):
                if other_filename.endswith('.h') or \
                        other_filename.endswith('lib.pas'):
                    params["managers"].append(
                        Manager(
                            self.file_cacher.put_file(
                                path=os.path.join(path, "sol", other_filename),
                                description="Manager %s for task %s" %
                                (other_filename, name)),
                            other_filename).export_to_dict())
            compilation_param = "grader"
        else:
            compilation_param = "alone"

        # If there is cor/correttore, then, presuming that the task
        # type is Batch or OutputOnly, we retrieve the comparator
        if os.path.exists(os.path.join(path, "cor", "correttore")):
            params["managers"] += [
                Manager(
                    self.file_cacher.put_file(
                        path=os.path.join(path, "cor", "correttore"),
                        description="Manager for task %s" % (name)),
                    "checker").export_to_dict()
            ]
            evaluation_parameter = "comparator"
        else:
            evaluation_parameter = "diff"

        # Detect subtasks by checking GEN
        gen_filename = os.path.join(path, 'gen', 'GEN')
        with open(gen_filename) as gen_file:
            subtasks = []
            testcases = 0
            points = None
            for line in gen_file:
                line = line.strip()
                splitted = line.split('#', 1)

                if len(splitted) == 1:
                    # This line represents a testcase, otherwise it's
                    # just a blank
                    if splitted[0] != '':
                        testcases += 1

                else:
                    testcase, comment = splitted
                    testcase_detected = False
                    subtask_detected = False
                    if testcase.strip() != '':
                        testcase_detected = True
                    comment = comment.strip()
                    if comment.startswith('ST:'):
                        subtask_detected = True

                    if testcase_detected and subtask_detected:
                        raise Exception("No testcase and subtask in the"
                                        " same line allowed")

                    # This line represents a testcase and contains a
                    # comment, but the comment doesn't start a new
                    # subtask
                    if testcase_detected:
                        testcases += 1

                    # This line starts a new subtask
                    if subtask_detected:
                        # Close the previous subtask
                        if points is None:
                            assert (testcases == 0)
                        else:
                            subtasks.append([points, testcases])
                        # Open the new one
                        testcases = 0
                        points = int(comment[3:].strip())

            # Close last subtask (if no subtasks were defined, just
            # fallback to Sum)
            if points is None:
                params["score_type"] = "Sum"
                input_value = 0.0
                if testcases != 0:
                    input_value = 100.0 / float(testcases)
                params["score_parameters"] = str(input_value)
            else:
                subtasks.append([points, testcases])
                assert (100 == sum([int(st[0]) for st in subtasks]))
                assert (int(conf['n_input']) == sum(
                    [int(st[1]) for st in subtasks]))
                params["score_type"] = "GroupMin"
                params["score_parameters"] = str(subtasks)

        # If output_only is set, then the task type is OutputOnly
        if conf.get('output_only', False):
            params["task_type"] = "OutputOnly"
            params["time_limit"] = None
            params["memory_limit"] = None
            params["task_type_parameters"] = '["%s"]' % (evaluation_parameter)
            params["submission_format"] = [
                SubmissionFormatElement("output_%03d.txt" %
                                        (i)).export_to_dict()
                for i in xrange(int(conf["n_input"]))
            ]

        # If there is cor/manager, then the task type is Communication
        elif os.path.exists(os.path.join(path, "cor", "manager")):
            params["task_type"] = "Communication"
            params["task_type_parameters"] = '[]'
            params["managers"] += [
                Manager(
                    self.file_cacher.put_file(
                        path=os.path.join(path, "cor", "manager"),
                        description="Manager for task %s" % (name)),
                    "manager").export_to_dict()
            ]
            for lang in Submission.LANGUAGES:
                stub_name = os.path.join(path, "sol", "stub.%s" % lang)
                if os.path.exists(stub_name):
                    params["managers"].append(
                        Manager(
                            self.file_cacher.put_file(
                                path=stub_name,
                                description="Stub for task %s and language %s"
                                % (name, lang)),
                            "stub.%s" % lang).export_to_dict())
                else:
                    logger.warning("Stub for language %s not found." % lang)

        # Otherwise, the task type is Batch
        else:
            params["task_type"] = "Batch"
            params["task_type_parameters"] = \
                '["%s", ["%s", "%s"], "%s"]' % \
                (compilation_param, infile_param, outfile_param,
                 evaluation_parameter)

        public_testcases = conf.get("risultati", "").strip()
        if public_testcases != "":
            public_testcases = [
                int(x.strip()) for x in public_testcases.split(",")
            ]
        else:
            public_testcases = []
        params["testcases"] = []
        for i in xrange(int(conf["n_input"])):
            _input = os.path.join(path, "input", "input%d.txt" % i)
            output = os.path.join(path, "output", "output%d.txt" % i)
            input_digest = self.file_cacher.put_file(
                path=_input, description="Input %d for task %s" % (i, name))
            output_digest = self.file_cacher.put_file(
                path=output, description="Output %d for task %s" % (i, name))
            params["testcases"].append(
                Testcase(input_digest,
                         output_digest,
                         public=(i in public_testcases)).export_to_dict())
            if params["task_type"] == "OutputOnly":
                params["attachments"].append(
                    Attachment(input_digest,
                               "input_%03d.txt" % (i)).export_to_dict())
        params["token_initial"] = conf.get("token_initial", None)
        params["token_max"] = conf.get("token_max", None)
        params["token_total"] = conf.get("token_total", None)
        params["token_min_interval"] = conf.get("token_min_interval", 0)
        params["token_gen_time"] = conf.get("token_gen_time", 0)
        params["token_gen_number"] = conf.get("token_gen_number", 0)

        params["max_submission_number"] = \
            conf.get("max_submission_number", None)
        params["max_user_test_number"] = \
            conf.get("max_user_test_number", None)
        params["min_submission_interval"] = \
            conf.get("min_submission_interval", None)
        params["min_user_test_interval"] = \
            conf.get("min_user_test_interval", None)

        logger.info("Task parameters loaded.")

        return params
Esempio n. 7
0
    def get_params_for_task(self, path, num):
        """Given the path of a task, this function put all needed data
        into FS, and fills the dictionary of parameters required by
        Task.import_from_dict().

        path (string): path of the task.
        num (int): number of the task in the contest task ordering.

        return (dict): info of the task.

        """
        path = os.path.realpath(path)
        super_path, name = os.path.split(path)
        conf = yaml.load(
            codecs.open(os.path.join(super_path, name + ".yaml"), "r",
                        "utf-8"))

        logger.info("Loading parameters for task %s." % name)

        params = {"name": name}
        assert name == conf["nome_breve"]
        params["title"] = conf["nome"]
        if name == params["title"]:
            logger.warning("Short name equals long name (title). "
                           "Is this intended?")
        params["num"] = num
        params["time_limit"] = conf.get("timeout", None)
        params["memory_limit"] = conf.get("memlimit", None)
        params["attachments"] = []  # FIXME - Use auxiliary
        params["statements"] = [
            Statement(
                self.file_cacher.put_file(
                    path=os.path.join(path, "testo", "testo.pdf"),
                    description="Statement for task %s (lang: )" % name),
                "").export_to_dict()
        ]
        params["official_language"] = ""

        params["submission_format"] = [
            SubmissionFormatElement("%s.%%l" % name).export_to_dict()
        ]

        # Builds the parameters that depend on the task type
        params["managers"] = []
        infile_param = conf.get("infile", "input.txt")
        outfile_param = conf.get("outfile", "output.txt")

        # If there is cor/grader.%l for some language %l, then,
        # presuming that the task type is Batch, we retrieve graders
        # in the form cor/grader.%l
        graders = False
        for lang in Submission.LANGUAGES:
            if os.path.exists(os.path.join(path, "cor", "grader.%s" % (lang))):
                graders = True
                break
        if graders:
            for lang in Submission.LANGUAGES:
                grader_filename = os.path.join(path, "cor",
                                               "grader.%s" % (lang))
                if os.path.exists(grader_filename):
                    params["managers"].append(
                        Manager(
                            self.file_cacher.put_file(
                                path=grader_filename,
                                description="Grader for task %s and "
                                "language %s" % (name, lang)),
                            "grader.%s" % (lang)).export_to_dict())
                else:
                    logger.warning("Could not find grader for "
                                   "language %s" % (lang))
            compilation_param = "grader"
        else:
            compilation_param = "alone"

        # If there is cor/correttore, then, presuming that the task
        # type is Batch or OutputOnly, we retrieve the comparator
        if os.path.exists(os.path.join(path, "cor", "correttore")):
            params["managers"] += [
                Manager(
                    self.file_cacher.put_file(
                        path=os.path.join(path, "cor", "correttore"),
                        description="Manager for task %s" % (name)),
                    "checker").export_to_dict()
            ]
            evaluation_parameter = "comparator"
        else:
            evaluation_parameter = "diff"

        # If there is no sol/ directory, the the task type is
        # OutputOnly
        if not os.path.exists(os.path.join(path, "sol")):
            params["task_type"] = "OutputOnly"
            params["task_type_parameters"] = '["%s"]' % (evaluation_parameter)
            params["submission_format"] = [
                SubmissionFormatElement("output_%03d.txt" %
                                        (i)).export_to_dict()
                for i in xrange(int(conf["n_input"]))
            ]

        # If there is cor/manager, then the task type is Communication
        elif os.path.exists(os.path.join(path, "cor", "manager")):
            params["task_type"] = "Communication"
            params["task_type_parameters"] = '[]'
            params["managers"] += [
                Manager(
                    self.file_cacher.put_file(
                        path=os.path.join(path, "cor", "manager"),
                        description="Manager for task %s" % (name)),
                    "manager").export_to_dict()
            ]
            for lang in Submission.LANGUAGES:
                stub_name = os.path.join(path, "sol", "stub.%s" % lang)
                if os.path.exists(stub_name):
                    params["managers"].append(
                        Manager(
                            self.file_cacher.put_file(
                                path=stub_name,
                                description="Stub for task %s and language %s"
                                % (name, lang)),
                            "stub.%s" % lang).export_to_dict())
                else:
                    logger.warning("Stub for language %s not found." % lang)

        # Otherwise, the task type is Batch
        else:
            params["task_type"] = "Batch"
            params["task_type_parameters"] = \
                '["%s", ["%s", "%s"], "%s"]' % \
                (compilation_param, infile_param, outfile_param,
                 evaluation_parameter)

        params["score_type"] = conf.get("score_type", "Sum")
        params["score_parameters"] = conf.get(
            "score_parameters", str(100.0 / float(conf["n_input"])))
        public_testcases = conf.get("risultati", "").strip()
        if public_testcases != "":
            public_testcases = [
                int(x.strip()) for x in public_testcases.split(",")
            ]
        else:
            public_testcases = []
        params["testcases"] = []
        for i in xrange(int(conf["n_input"])):
            _input = os.path.join(path, "input", "input%d.txt" % i)
            output = os.path.join(path, "output", "output%d.txt" % i)
            input_digest = self.file_cacher.put_file(
                path=_input, description="Input %d for task %s" % (i, name))
            output_digest = self.file_cacher.put_file(
                path=output, description="Output %d for task %s" % (i, name))
            params["testcases"].append(
                Testcase(input_digest,
                         output_digest,
                         public=(i in public_testcases)).export_to_dict())
            if params["task_type"] == "OutputOnly":
                params["attachments"].append(
                    Attachment(input_digest,
                               "input_%03d.txt" % (i)).export_to_dict())
        params["token_initial"] = conf.get("token_initial", None)
        params["token_max"] = conf.get("token_max", None)
        params["token_total"] = conf.get("token_total", None)
        params["token_min_interval"] = conf.get("token_min_interval", 0)
        params["token_gen_time"] = conf.get("token_gen_time", 0)
        params["token_gen_number"] = conf.get("token_gen_number", 0)

        logger.info("Task parameters loaded.")

        return params